1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14 #include <net/geneve.h>
15
16 struct nft_tunnel {
17 enum nft_tunnel_keys key:8;
18 u8 dreg;
19 enum nft_tunnel_mode mode:8;
20 u8 len;
21 };
22
nft_tunnel_get_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)23 static void nft_tunnel_get_eval(const struct nft_expr *expr,
24 struct nft_regs *regs,
25 const struct nft_pktinfo *pkt)
26 {
27 const struct nft_tunnel *priv = nft_expr_priv(expr);
28 u32 *dest = ®s->data[priv->dreg];
29 struct ip_tunnel_info *tun_info;
30
31 tun_info = skb_tunnel_info(pkt->skb);
32
33 switch (priv->key) {
34 case NFT_TUNNEL_PATH:
35 if (!tun_info) {
36 nft_reg_store8(dest, false);
37 return;
38 }
39 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
40 (priv->mode == NFT_TUNNEL_MODE_RX &&
41 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
42 (priv->mode == NFT_TUNNEL_MODE_TX &&
43 (tun_info->mode & IP_TUNNEL_INFO_TX)))
44 nft_reg_store8(dest, true);
45 else
46 nft_reg_store8(dest, false);
47 break;
48 case NFT_TUNNEL_ID:
49 if (!tun_info) {
50 regs->verdict.code = NFT_BREAK;
51 return;
52 }
53 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
54 (priv->mode == NFT_TUNNEL_MODE_RX &&
55 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
56 (priv->mode == NFT_TUNNEL_MODE_TX &&
57 (tun_info->mode & IP_TUNNEL_INFO_TX)))
58 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
59 else
60 regs->verdict.code = NFT_BREAK;
61 break;
62 default:
63 WARN_ON(1);
64 regs->verdict.code = NFT_BREAK;
65 }
66 }
67
68 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
69 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
70 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
71 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
72 };
73
nft_tunnel_get_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])74 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
75 const struct nft_expr *expr,
76 const struct nlattr * const tb[])
77 {
78 struct nft_tunnel *priv = nft_expr_priv(expr);
79 u32 len;
80
81 if (!tb[NFTA_TUNNEL_KEY] ||
82 !tb[NFTA_TUNNEL_DREG])
83 return -EINVAL;
84
85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
86 switch (priv->key) {
87 case NFT_TUNNEL_PATH:
88 len = sizeof(u8);
89 break;
90 case NFT_TUNNEL_ID:
91 len = sizeof(u32);
92 break;
93 default:
94 return -EOPNOTSUPP;
95 }
96
97 if (tb[NFTA_TUNNEL_MODE]) {
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 if (priv->mode > NFT_TUNNEL_MODE_MAX)
100 return -EOPNOTSUPP;
101 } else {
102 priv->mode = NFT_TUNNEL_MODE_NONE;
103 }
104
105 priv->len = len;
106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
107 NULL, NFT_DATA_VALUE, len);
108 }
109
nft_tunnel_get_dump(struct sk_buff * skb,const struct nft_expr * expr)110 static int nft_tunnel_get_dump(struct sk_buff *skb,
111 const struct nft_expr *expr)
112 {
113 const struct nft_tunnel *priv = nft_expr_priv(expr);
114
115 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
116 goto nla_put_failure;
117 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
118 goto nla_put_failure;
119 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
120 goto nla_put_failure;
121 return 0;
122
123 nla_put_failure:
124 return -1;
125 }
126
nft_tunnel_get_reduce(struct nft_regs_track * track,const struct nft_expr * expr)127 static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
128 const struct nft_expr *expr)
129 {
130 const struct nft_tunnel *priv = nft_expr_priv(expr);
131 const struct nft_tunnel *tunnel;
132
133 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
134 nft_reg_track_update(track, expr, priv->dreg, priv->len);
135 return false;
136 }
137
138 tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
139 if (priv->key != tunnel->key ||
140 priv->dreg != tunnel->dreg ||
141 priv->mode != tunnel->mode) {
142 nft_reg_track_update(track, expr, priv->dreg, priv->len);
143 return false;
144 }
145
146 if (!track->regs[priv->dreg].bitwise)
147 return true;
148
149 return false;
150 }
151
152 static struct nft_expr_type nft_tunnel_type;
153 static const struct nft_expr_ops nft_tunnel_get_ops = {
154 .type = &nft_tunnel_type,
155 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
156 .eval = nft_tunnel_get_eval,
157 .init = nft_tunnel_get_init,
158 .dump = nft_tunnel_get_dump,
159 .reduce = nft_tunnel_get_reduce,
160 };
161
162 static struct nft_expr_type nft_tunnel_type __read_mostly = {
163 .name = "tunnel",
164 .family = NFPROTO_NETDEV,
165 .ops = &nft_tunnel_get_ops,
166 .policy = nft_tunnel_policy,
167 .maxattr = NFTA_TUNNEL_MAX,
168 .owner = THIS_MODULE,
169 };
170
171 struct nft_tunnel_opts {
172 union {
173 struct vxlan_metadata vxlan;
174 struct erspan_metadata erspan;
175 u8 data[IP_TUNNEL_OPTS_MAX];
176 } u;
177 u32 len;
178 __be16 flags;
179 };
180
181 struct nft_tunnel_obj {
182 struct metadata_dst *md;
183 struct nft_tunnel_opts opts;
184 };
185
186 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
187 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
188 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
189 };
190
nft_tunnel_obj_ip_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info)191 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194 {
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
199 nft_tunnel_ip_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
207 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
208 if (tb[NFTA_TUNNEL_KEY_IP_DST])
209 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
210
211 return 0;
212 }
213
214 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
215 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
216 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
217 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
218 };
219
nft_tunnel_obj_ip6_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info)220 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
221 const struct nlattr *attr,
222 struct ip_tunnel_info *info)
223 {
224 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
225 int err;
226
227 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
228 nft_tunnel_ip6_policy, NULL);
229 if (err < 0)
230 return err;
231
232 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
233 return -EINVAL;
234
235 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
236 memcpy(&info->key.u.ipv6.src,
237 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
238 sizeof(struct in6_addr));
239 }
240 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
241 memcpy(&info->key.u.ipv6.dst,
242 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
243 sizeof(struct in6_addr));
244 }
245 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
246 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
247
248 info->mode |= IP_TUNNEL_INFO_IPV6;
249
250 return 0;
251 }
252
253 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
254 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
255 };
256
nft_tunnel_obj_vxlan_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)257 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
258 struct nft_tunnel_opts *opts)
259 {
260 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
261 int err;
262
263 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
264 nft_tunnel_opts_vxlan_policy, NULL);
265 if (err < 0)
266 return err;
267
268 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
269 return -EINVAL;
270
271 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
272
273 opts->len = sizeof(struct vxlan_metadata);
274 opts->flags = TUNNEL_VXLAN_OPT;
275
276 return 0;
277 }
278
279 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
280 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
281 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
282 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
283 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
284 };
285
nft_tunnel_obj_erspan_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)286 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
287 struct nft_tunnel_opts *opts)
288 {
289 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
290 uint8_t hwid, dir;
291 int err, version;
292
293 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
294 attr, nft_tunnel_opts_erspan_policy,
295 NULL);
296 if (err < 0)
297 return err;
298
299 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
300 return -EINVAL;
301
302 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
303 switch (version) {
304 case ERSPAN_VERSION:
305 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
306 return -EINVAL;
307
308 opts->u.erspan.u.index =
309 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
310 break;
311 case ERSPAN_VERSION2:
312 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
313 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
314 return -EINVAL;
315
316 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
317 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
318
319 set_hwid(&opts->u.erspan.u.md2, hwid);
320 opts->u.erspan.u.md2.dir = dir;
321 break;
322 default:
323 return -EOPNOTSUPP;
324 }
325 opts->u.erspan.version = version;
326
327 opts->len = sizeof(struct erspan_metadata);
328 opts->flags = TUNNEL_ERSPAN_OPT;
329
330 return 0;
331 }
332
333 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
334 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
335 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
336 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
337 };
338
nft_tunnel_obj_geneve_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)339 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
340 struct nft_tunnel_opts *opts)
341 {
342 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
343 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
344 int err, data_len;
345
346 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
347 nft_tunnel_opts_geneve_policy, NULL);
348 if (err < 0)
349 return err;
350
351 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
352 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
353 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
354 return -EINVAL;
355
356 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
357 data_len = nla_len(attr);
358 if (data_len % 4)
359 return -EINVAL;
360
361 opts->len += sizeof(*opt) + data_len;
362 if (opts->len > IP_TUNNEL_OPTS_MAX)
363 return -EINVAL;
364
365 memcpy(opt->opt_data, nla_data(attr), data_len);
366 opt->length = data_len / 4;
367 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
368 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
369 opts->flags = TUNNEL_GENEVE_OPT;
370
371 return 0;
372 }
373
374 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
375 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
376 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
377 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
378 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
379 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
380 };
381
nft_tunnel_obj_opts_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info,struct nft_tunnel_opts * opts)382 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
383 const struct nlattr *attr,
384 struct ip_tunnel_info *info,
385 struct nft_tunnel_opts *opts)
386 {
387 int err, rem, type = 0;
388 struct nlattr *nla;
389
390 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
391 nft_tunnel_opts_policy, NULL);
392 if (err < 0)
393 return err;
394
395 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
396 switch (nla_type(nla)) {
397 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
398 if (type)
399 return -EINVAL;
400 err = nft_tunnel_obj_vxlan_init(nla, opts);
401 if (err)
402 return err;
403 type = TUNNEL_VXLAN_OPT;
404 break;
405 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
406 if (type)
407 return -EINVAL;
408 err = nft_tunnel_obj_erspan_init(nla, opts);
409 if (err)
410 return err;
411 type = TUNNEL_ERSPAN_OPT;
412 break;
413 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
414 if (type && type != TUNNEL_GENEVE_OPT)
415 return -EINVAL;
416 err = nft_tunnel_obj_geneve_init(nla, opts);
417 if (err)
418 return err;
419 type = TUNNEL_GENEVE_OPT;
420 break;
421 default:
422 return -EOPNOTSUPP;
423 }
424 }
425
426 return err;
427 }
428
429 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
430 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
431 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
432 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
433 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
434 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
435 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
436 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
437 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
438 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
439 };
440
nft_tunnel_obj_init(const struct nft_ctx * ctx,const struct nlattr * const tb[],struct nft_object * obj)441 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
442 const struct nlattr * const tb[],
443 struct nft_object *obj)
444 {
445 struct nft_tunnel_obj *priv = nft_obj_data(obj);
446 struct ip_tunnel_info info;
447 struct metadata_dst *md;
448 int err;
449
450 if (!tb[NFTA_TUNNEL_KEY_ID])
451 return -EINVAL;
452
453 memset(&info, 0, sizeof(info));
454 info.mode = IP_TUNNEL_INFO_TX;
455 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
456 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
457
458 if (tb[NFTA_TUNNEL_KEY_IP]) {
459 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
460 if (err < 0)
461 return err;
462 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
463 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
464 if (err < 0)
465 return err;
466 } else {
467 return -EINVAL;
468 }
469
470 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
471 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
472 }
473 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
474 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
475 }
476
477 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
478 u32 tun_flags;
479
480 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
481 if (tun_flags & ~NFT_TUNNEL_F_MASK)
482 return -EOPNOTSUPP;
483
484 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
485 info.key.tun_flags &= ~TUNNEL_CSUM;
486 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
487 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
488 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
489 info.key.tun_flags |= TUNNEL_SEQ;
490 }
491 if (tb[NFTA_TUNNEL_KEY_TOS])
492 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
493 if (tb[NFTA_TUNNEL_KEY_TTL])
494 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
495 else
496 info.key.ttl = U8_MAX;
497
498 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
499 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
500 &info, &priv->opts);
501 if (err < 0)
502 return err;
503 }
504
505 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
506 if (!md)
507 return -ENOMEM;
508
509 memcpy(&md->u.tun_info, &info, sizeof(info));
510 #ifdef CONFIG_DST_CACHE
511 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
512 if (err < 0) {
513 metadata_dst_free(md);
514 return err;
515 }
516 #endif
517 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
518 priv->opts.flags);
519 priv->md = md;
520
521 return 0;
522 }
523
nft_tunnel_obj_eval(struct nft_object * obj,struct nft_regs * regs,const struct nft_pktinfo * pkt)524 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
525 struct nft_regs *regs,
526 const struct nft_pktinfo *pkt)
527 {
528 struct nft_tunnel_obj *priv = nft_obj_data(obj);
529 struct sk_buff *skb = pkt->skb;
530
531 skb_dst_drop(skb);
532 dst_hold((struct dst_entry *) priv->md);
533 skb_dst_set(skb, (struct dst_entry *) priv->md);
534 }
535
nft_tunnel_ip_dump(struct sk_buff * skb,struct ip_tunnel_info * info)536 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
537 {
538 struct nlattr *nest;
539
540 if (info->mode & IP_TUNNEL_INFO_IPV6) {
541 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
542 if (!nest)
543 return -1;
544
545 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
546 &info->key.u.ipv6.src) < 0 ||
547 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
548 &info->key.u.ipv6.dst) < 0 ||
549 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
550 info->key.label)) {
551 nla_nest_cancel(skb, nest);
552 return -1;
553 }
554
555 nla_nest_end(skb, nest);
556 } else {
557 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
558 if (!nest)
559 return -1;
560
561 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
562 info->key.u.ipv4.src) < 0 ||
563 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
564 info->key.u.ipv4.dst) < 0) {
565 nla_nest_cancel(skb, nest);
566 return -1;
567 }
568
569 nla_nest_end(skb, nest);
570 }
571
572 return 0;
573 }
574
nft_tunnel_opts_dump(struct sk_buff * skb,struct nft_tunnel_obj * priv)575 static int nft_tunnel_opts_dump(struct sk_buff *skb,
576 struct nft_tunnel_obj *priv)
577 {
578 struct nft_tunnel_opts *opts = &priv->opts;
579 struct nlattr *nest, *inner;
580
581 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
582 if (!nest)
583 return -1;
584
585 if (opts->flags & TUNNEL_VXLAN_OPT) {
586 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
587 if (!inner)
588 goto failure;
589 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
590 htonl(opts->u.vxlan.gbp)))
591 goto inner_failure;
592 nla_nest_end(skb, inner);
593 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
594 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
595 if (!inner)
596 goto failure;
597 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
598 htonl(opts->u.erspan.version)))
599 goto inner_failure;
600 switch (opts->u.erspan.version) {
601 case ERSPAN_VERSION:
602 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
603 opts->u.erspan.u.index))
604 goto inner_failure;
605 break;
606 case ERSPAN_VERSION2:
607 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
608 get_hwid(&opts->u.erspan.u.md2)) ||
609 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
610 opts->u.erspan.u.md2.dir))
611 goto inner_failure;
612 break;
613 }
614 nla_nest_end(skb, inner);
615 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
616 struct geneve_opt *opt;
617 int offset = 0;
618
619 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
620 if (!inner)
621 goto failure;
622 while (opts->len > offset) {
623 opt = (struct geneve_opt *)opts->u.data + offset;
624 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
625 opt->opt_class) ||
626 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
627 opt->type) ||
628 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
629 opt->length * 4, opt->opt_data))
630 goto inner_failure;
631 offset += sizeof(*opt) + opt->length * 4;
632 }
633 nla_nest_end(skb, inner);
634 }
635 nla_nest_end(skb, nest);
636 return 0;
637
638 inner_failure:
639 nla_nest_cancel(skb, inner);
640 failure:
641 nla_nest_cancel(skb, nest);
642 return -1;
643 }
644
nft_tunnel_ports_dump(struct sk_buff * skb,struct ip_tunnel_info * info)645 static int nft_tunnel_ports_dump(struct sk_buff *skb,
646 struct ip_tunnel_info *info)
647 {
648 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
649 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
650 return -1;
651
652 return 0;
653 }
654
nft_tunnel_flags_dump(struct sk_buff * skb,struct ip_tunnel_info * info)655 static int nft_tunnel_flags_dump(struct sk_buff *skb,
656 struct ip_tunnel_info *info)
657 {
658 u32 flags = 0;
659
660 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
661 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
662 if (!(info->key.tun_flags & TUNNEL_CSUM))
663 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
664 if (info->key.tun_flags & TUNNEL_SEQ)
665 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
666
667 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
668 return -1;
669
670 return 0;
671 }
672
nft_tunnel_obj_dump(struct sk_buff * skb,struct nft_object * obj,bool reset)673 static int nft_tunnel_obj_dump(struct sk_buff *skb,
674 struct nft_object *obj, bool reset)
675 {
676 struct nft_tunnel_obj *priv = nft_obj_data(obj);
677 struct ip_tunnel_info *info = &priv->md->u.tun_info;
678
679 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
680 tunnel_id_to_key32(info->key.tun_id)) ||
681 nft_tunnel_ip_dump(skb, info) < 0 ||
682 nft_tunnel_ports_dump(skb, info) < 0 ||
683 nft_tunnel_flags_dump(skb, info) < 0 ||
684 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
685 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
686 nft_tunnel_opts_dump(skb, priv) < 0)
687 goto nla_put_failure;
688
689 return 0;
690
691 nla_put_failure:
692 return -1;
693 }
694
nft_tunnel_obj_destroy(const struct nft_ctx * ctx,struct nft_object * obj)695 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
696 struct nft_object *obj)
697 {
698 struct nft_tunnel_obj *priv = nft_obj_data(obj);
699
700 metadata_dst_free(priv->md);
701 }
702
703 static struct nft_object_type nft_tunnel_obj_type;
704 static const struct nft_object_ops nft_tunnel_obj_ops = {
705 .type = &nft_tunnel_obj_type,
706 .size = sizeof(struct nft_tunnel_obj),
707 .eval = nft_tunnel_obj_eval,
708 .init = nft_tunnel_obj_init,
709 .destroy = nft_tunnel_obj_destroy,
710 .dump = nft_tunnel_obj_dump,
711 };
712
713 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
714 .type = NFT_OBJECT_TUNNEL,
715 .ops = &nft_tunnel_obj_ops,
716 .maxattr = NFTA_TUNNEL_KEY_MAX,
717 .policy = nft_tunnel_key_policy,
718 .owner = THIS_MODULE,
719 };
720
nft_tunnel_module_init(void)721 static int __init nft_tunnel_module_init(void)
722 {
723 int err;
724
725 err = nft_register_expr(&nft_tunnel_type);
726 if (err < 0)
727 return err;
728
729 err = nft_register_obj(&nft_tunnel_obj_type);
730 if (err < 0)
731 nft_unregister_expr(&nft_tunnel_type);
732
733 return err;
734 }
735
nft_tunnel_module_exit(void)736 static void __exit nft_tunnel_module_exit(void)
737 {
738 nft_unregister_obj(&nft_tunnel_obj_type);
739 nft_unregister_expr(&nft_tunnel_type);
740 }
741
742 module_init(nft_tunnel_module_init);
743 module_exit(nft_tunnel_module_exit);
744
745 MODULE_LICENSE("GPL");
746 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
747 MODULE_ALIAS_NFT_EXPR("tunnel");
748 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
749 MODULE_DESCRIPTION("nftables tunnel expression support");
750