1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <net/gso.h>
12 #include <net/ip.h>
13 #include <net/ipv6.h>
14 #include <net/ip6_route.h>
15 #include <net/neighbour.h>
16 #include <net/netfilter/nf_flow_table.h>
17 #include <net/netfilter/nf_conntrack_acct.h>
18 /* For layer 4 checksum field offset. */
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21
nf_flow_state_check(struct flow_offload * flow,int proto,struct sk_buff * skb,unsigned int thoff)22 static int nf_flow_state_check(struct flow_offload *flow, int proto,
23 struct sk_buff *skb, unsigned int thoff)
24 {
25 struct tcphdr *tcph;
26
27 if (proto != IPPROTO_TCP)
28 return 0;
29
30 tcph = (void *)(skb_network_header(skb) + thoff);
31 if (unlikely(tcph->fin || tcph->rst)) {
32 flow_offload_teardown(flow);
33 return -1;
34 }
35
36 return 0;
37 }
38
nf_flow_nat_ip_tcp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)39 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
40 __be32 addr, __be32 new_addr)
41 {
42 struct tcphdr *tcph;
43
44 tcph = (void *)(skb_network_header(skb) + thoff);
45 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
46 }
47
nf_flow_nat_ip_udp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)48 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
49 __be32 addr, __be32 new_addr)
50 {
51 struct udphdr *udph;
52
53 udph = (void *)(skb_network_header(skb) + thoff);
54 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
55 inet_proto_csum_replace4(&udph->check, skb, addr,
56 new_addr, true);
57 if (!udph->check)
58 udph->check = CSUM_MANGLED_0;
59 }
60 }
61
nf_flow_nat_ip_l4proto(struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,__be32 addr,__be32 new_addr)62 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
63 unsigned int thoff, __be32 addr,
64 __be32 new_addr)
65 {
66 switch (iph->protocol) {
67 case IPPROTO_TCP:
68 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
69 break;
70 case IPPROTO_UDP:
71 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
72 break;
73 }
74 }
75
nf_flow_snat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)76 static void nf_flow_snat_ip(const struct flow_offload *flow,
77 struct sk_buff *skb, struct iphdr *iph,
78 unsigned int thoff, enum flow_offload_tuple_dir dir)
79 {
80 __be32 addr, new_addr;
81
82 switch (dir) {
83 case FLOW_OFFLOAD_DIR_ORIGINAL:
84 addr = iph->saddr;
85 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
86 iph->saddr = new_addr;
87 break;
88 case FLOW_OFFLOAD_DIR_REPLY:
89 addr = iph->daddr;
90 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
91 iph->daddr = new_addr;
92 break;
93 }
94 csum_replace4(&iph->check, addr, new_addr);
95
96 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
97 }
98
nf_flow_dnat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)99 static void nf_flow_dnat_ip(const struct flow_offload *flow,
100 struct sk_buff *skb, struct iphdr *iph,
101 unsigned int thoff, enum flow_offload_tuple_dir dir)
102 {
103 __be32 addr, new_addr;
104
105 switch (dir) {
106 case FLOW_OFFLOAD_DIR_ORIGINAL:
107 addr = iph->daddr;
108 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
109 iph->daddr = new_addr;
110 break;
111 case FLOW_OFFLOAD_DIR_REPLY:
112 addr = iph->saddr;
113 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
114 iph->saddr = new_addr;
115 break;
116 }
117 csum_replace4(&iph->check, addr, new_addr);
118
119 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
120 }
121
nf_flow_nat_ip(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,enum flow_offload_tuple_dir dir,struct iphdr * iph)122 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
123 unsigned int thoff, enum flow_offload_tuple_dir dir,
124 struct iphdr *iph)
125 {
126 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
127 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
128 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
129 }
130 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
131 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
132 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
133 }
134 }
135
ip_has_options(unsigned int thoff)136 static bool ip_has_options(unsigned int thoff)
137 {
138 return thoff != sizeof(struct iphdr);
139 }
140
nf_flow_tuple_encap(struct sk_buff * skb,struct flow_offload_tuple * tuple)141 static void nf_flow_tuple_encap(struct sk_buff *skb,
142 struct flow_offload_tuple *tuple)
143 {
144 struct vlan_ethhdr *veth;
145 struct pppoe_hdr *phdr;
146 int i = 0;
147
148 if (skb_vlan_tag_present(skb)) {
149 tuple->encap[i].id = skb_vlan_tag_get(skb);
150 tuple->encap[i].proto = skb->vlan_proto;
151 i++;
152 }
153 switch (skb->protocol) {
154 case htons(ETH_P_8021Q):
155 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
156 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
157 tuple->encap[i].proto = skb->protocol;
158 break;
159 case htons(ETH_P_PPP_SES):
160 phdr = (struct pppoe_hdr *)skb_mac_header(skb);
161 tuple->encap[i].id = ntohs(phdr->sid);
162 tuple->encap[i].proto = skb->protocol;
163 break;
164 }
165 }
166
167 struct nf_flowtable_ctx {
168 const struct net_device *in;
169 u32 offset;
170 u32 hdrsize;
171 };
172
nf_flow_tuple_ip(struct nf_flowtable_ctx * ctx,struct sk_buff * skb,struct flow_offload_tuple * tuple)173 static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
174 struct flow_offload_tuple *tuple)
175 {
176 struct flow_ports *ports;
177 unsigned int thoff;
178 struct iphdr *iph;
179 u8 ipproto;
180
181 if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset))
182 return -1;
183
184 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
185 thoff = (iph->ihl * 4);
186
187 if (ip_is_fragment(iph) ||
188 unlikely(ip_has_options(thoff)))
189 return -1;
190
191 thoff += ctx->offset;
192
193 ipproto = iph->protocol;
194 switch (ipproto) {
195 case IPPROTO_TCP:
196 ctx->hdrsize = sizeof(struct tcphdr);
197 break;
198 case IPPROTO_UDP:
199 ctx->hdrsize = sizeof(struct udphdr);
200 break;
201 #ifdef CONFIG_NF_CT_PROTO_GRE
202 case IPPROTO_GRE:
203 ctx->hdrsize = sizeof(struct gre_base_hdr);
204 break;
205 #endif
206 default:
207 return -1;
208 }
209
210 if (iph->ttl <= 1)
211 return -1;
212
213 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
214 return -1;
215
216 switch (ipproto) {
217 case IPPROTO_TCP:
218 case IPPROTO_UDP:
219 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
220 tuple->src_port = ports->source;
221 tuple->dst_port = ports->dest;
222 break;
223 case IPPROTO_GRE: {
224 struct gre_base_hdr *greh;
225
226 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
227 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
228 return -1;
229 break;
230 }
231 }
232
233 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
234
235 tuple->src_v4.s_addr = iph->saddr;
236 tuple->dst_v4.s_addr = iph->daddr;
237 tuple->l3proto = AF_INET;
238 tuple->l4proto = ipproto;
239 tuple->iifidx = ctx->in->ifindex;
240 nf_flow_tuple_encap(skb, tuple);
241
242 return 0;
243 }
244
245 /* Based on ip_exceeds_mtu(). */
nf_flow_exceeds_mtu(const struct sk_buff * skb,unsigned int mtu)246 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
247 {
248 if (skb->len <= mtu)
249 return false;
250
251 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
252 return false;
253
254 return true;
255 }
256
nf_flow_dst_check(struct flow_offload_tuple * tuple)257 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
258 {
259 if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
260 tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
261 return true;
262
263 return dst_check(tuple->dst_cache, tuple->dst_cookie);
264 }
265
nf_flow_xmit_xfrm(struct sk_buff * skb,const struct nf_hook_state * state,struct dst_entry * dst)266 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
267 const struct nf_hook_state *state,
268 struct dst_entry *dst)
269 {
270 skb_orphan(skb);
271 skb_dst_set_noref(skb, dst);
272 dst_output(state->net, state->sk, skb);
273 return NF_STOLEN;
274 }
275
nf_flow_skb_encap_protocol(const struct sk_buff * skb,__be16 proto,u32 * offset)276 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
277 u32 *offset)
278 {
279 struct vlan_ethhdr *veth;
280
281 switch (skb->protocol) {
282 case htons(ETH_P_8021Q):
283 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
284 if (veth->h_vlan_encapsulated_proto == proto) {
285 *offset += VLAN_HLEN;
286 return true;
287 }
288 break;
289 case htons(ETH_P_PPP_SES):
290 if (nf_flow_pppoe_proto(skb) == proto) {
291 *offset += PPPOE_SES_HLEN;
292 return true;
293 }
294 break;
295 }
296
297 return false;
298 }
299
nf_flow_encap_pop(struct sk_buff * skb,struct flow_offload_tuple_rhash * tuplehash)300 static void nf_flow_encap_pop(struct sk_buff *skb,
301 struct flow_offload_tuple_rhash *tuplehash)
302 {
303 struct vlan_hdr *vlan_hdr;
304 int i;
305
306 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
307 if (skb_vlan_tag_present(skb)) {
308 __vlan_hwaccel_clear_tag(skb);
309 continue;
310 }
311 switch (skb->protocol) {
312 case htons(ETH_P_8021Q):
313 vlan_hdr = (struct vlan_hdr *)skb->data;
314 __skb_pull(skb, VLAN_HLEN);
315 vlan_set_encap_proto(skb, vlan_hdr);
316 skb_reset_network_header(skb);
317 break;
318 case htons(ETH_P_PPP_SES):
319 skb->protocol = nf_flow_pppoe_proto(skb);
320 skb_pull(skb, PPPOE_SES_HLEN);
321 skb_reset_network_header(skb);
322 break;
323 }
324 }
325 }
326
nf_flow_queue_xmit(struct net * net,struct sk_buff * skb,const struct flow_offload_tuple_rhash * tuplehash,unsigned short type)327 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
328 const struct flow_offload_tuple_rhash *tuplehash,
329 unsigned short type)
330 {
331 struct net_device *outdev;
332
333 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
334 if (!outdev)
335 return NF_DROP;
336
337 skb->dev = outdev;
338 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
339 tuplehash->tuple.out.h_source, skb->len);
340 dev_queue_xmit(skb);
341
342 return NF_STOLEN;
343 }
344
345 static struct flow_offload_tuple_rhash *
nf_flow_offload_lookup(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct sk_buff * skb)346 nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx,
347 struct nf_flowtable *flow_table, struct sk_buff *skb)
348 {
349 struct flow_offload_tuple tuple = {};
350
351 if (skb->protocol != htons(ETH_P_IP) &&
352 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset))
353 return NULL;
354
355 if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
356 return NULL;
357
358 return flow_offload_lookup(flow_table, &tuple);
359 }
360
nf_flow_offload_forward(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct flow_offload_tuple_rhash * tuplehash,struct sk_buff * skb)361 static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
362 struct nf_flowtable *flow_table,
363 struct flow_offload_tuple_rhash *tuplehash,
364 struct sk_buff *skb)
365 {
366 enum flow_offload_tuple_dir dir;
367 struct flow_offload *flow;
368 unsigned int thoff, mtu;
369 struct iphdr *iph;
370
371 dir = tuplehash->tuple.dir;
372 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
373
374 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
375 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
376 return 0;
377
378 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
379 thoff = (iph->ihl * 4) + ctx->offset;
380 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
381 return 0;
382
383 if (!nf_flow_dst_check(&tuplehash->tuple)) {
384 flow_offload_teardown(flow);
385 return 0;
386 }
387
388 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
389 return -1;
390
391 flow_offload_refresh(flow_table, flow, false);
392
393 nf_flow_encap_pop(skb, tuplehash);
394 thoff -= ctx->offset;
395
396 iph = ip_hdr(skb);
397 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
398
399 ip_decrease_ttl(iph);
400 skb_clear_tstamp(skb);
401
402 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
403 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
404
405 return 1;
406 }
407
408 unsigned int
nf_flow_offload_ip_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)409 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
410 const struct nf_hook_state *state)
411 {
412 struct flow_offload_tuple_rhash *tuplehash;
413 struct nf_flowtable *flow_table = priv;
414 enum flow_offload_tuple_dir dir;
415 struct nf_flowtable_ctx ctx = {
416 .in = state->in,
417 };
418 struct flow_offload *flow;
419 struct net_device *outdev;
420 struct rtable *rt;
421 __be32 nexthop;
422 int ret;
423
424 tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb);
425 if (!tuplehash)
426 return NF_ACCEPT;
427
428 ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb);
429 if (ret < 0)
430 return NF_DROP;
431 else if (ret == 0)
432 return NF_ACCEPT;
433
434 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
435 rt = (struct rtable *)tuplehash->tuple.dst_cache;
436 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
437 IPCB(skb)->iif = skb->dev->ifindex;
438 IPCB(skb)->flags = IPSKB_FORWARDED;
439 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
440 }
441
442 dir = tuplehash->tuple.dir;
443 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
444
445 switch (tuplehash->tuple.xmit_type) {
446 case FLOW_OFFLOAD_XMIT_NEIGH:
447 rt = (struct rtable *)tuplehash->tuple.dst_cache;
448 outdev = rt->dst.dev;
449 skb->dev = outdev;
450 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
451 skb_dst_set_noref(skb, &rt->dst);
452 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
453 ret = NF_STOLEN;
454 break;
455 case FLOW_OFFLOAD_XMIT_DIRECT:
456 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
457 if (ret == NF_DROP)
458 flow_offload_teardown(flow);
459 break;
460 default:
461 WARN_ON_ONCE(1);
462 ret = NF_DROP;
463 break;
464 }
465
466 return ret;
467 }
468 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
469
nf_flow_nat_ipv6_tcp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr,struct ipv6hdr * ip6h)470 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
471 struct in6_addr *addr,
472 struct in6_addr *new_addr,
473 struct ipv6hdr *ip6h)
474 {
475 struct tcphdr *tcph;
476
477 tcph = (void *)(skb_network_header(skb) + thoff);
478 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
479 new_addr->s6_addr32, true);
480 }
481
nf_flow_nat_ipv6_udp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)482 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
483 struct in6_addr *addr,
484 struct in6_addr *new_addr)
485 {
486 struct udphdr *udph;
487
488 udph = (void *)(skb_network_header(skb) + thoff);
489 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
490 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
491 new_addr->s6_addr32, true);
492 if (!udph->check)
493 udph->check = CSUM_MANGLED_0;
494 }
495 }
496
nf_flow_nat_ipv6_l4proto(struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)497 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
498 unsigned int thoff, struct in6_addr *addr,
499 struct in6_addr *new_addr)
500 {
501 switch (ip6h->nexthdr) {
502 case IPPROTO_TCP:
503 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
504 break;
505 case IPPROTO_UDP:
506 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
507 break;
508 }
509 }
510
nf_flow_snat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)511 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
512 struct sk_buff *skb, struct ipv6hdr *ip6h,
513 unsigned int thoff,
514 enum flow_offload_tuple_dir dir)
515 {
516 struct in6_addr addr, new_addr;
517
518 switch (dir) {
519 case FLOW_OFFLOAD_DIR_ORIGINAL:
520 addr = ip6h->saddr;
521 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
522 ip6h->saddr = new_addr;
523 break;
524 case FLOW_OFFLOAD_DIR_REPLY:
525 addr = ip6h->daddr;
526 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
527 ip6h->daddr = new_addr;
528 break;
529 }
530
531 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
532 }
533
nf_flow_dnat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)534 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
535 struct sk_buff *skb, struct ipv6hdr *ip6h,
536 unsigned int thoff,
537 enum flow_offload_tuple_dir dir)
538 {
539 struct in6_addr addr, new_addr;
540
541 switch (dir) {
542 case FLOW_OFFLOAD_DIR_ORIGINAL:
543 addr = ip6h->daddr;
544 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
545 ip6h->daddr = new_addr;
546 break;
547 case FLOW_OFFLOAD_DIR_REPLY:
548 addr = ip6h->saddr;
549 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
550 ip6h->saddr = new_addr;
551 break;
552 }
553
554 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
555 }
556
nf_flow_nat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,enum flow_offload_tuple_dir dir,struct ipv6hdr * ip6h)557 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
558 struct sk_buff *skb,
559 enum flow_offload_tuple_dir dir,
560 struct ipv6hdr *ip6h)
561 {
562 unsigned int thoff = sizeof(*ip6h);
563
564 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
565 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
566 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
567 }
568 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
569 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
570 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
571 }
572 }
573
nf_flow_tuple_ipv6(struct nf_flowtable_ctx * ctx,struct sk_buff * skb,struct flow_offload_tuple * tuple)574 static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
575 struct flow_offload_tuple *tuple)
576 {
577 struct flow_ports *ports;
578 struct ipv6hdr *ip6h;
579 unsigned int thoff;
580 u8 nexthdr;
581
582 thoff = sizeof(*ip6h) + ctx->offset;
583 if (!pskb_may_pull(skb, thoff))
584 return -1;
585
586 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
587
588 nexthdr = ip6h->nexthdr;
589 switch (nexthdr) {
590 case IPPROTO_TCP:
591 ctx->hdrsize = sizeof(struct tcphdr);
592 break;
593 case IPPROTO_UDP:
594 ctx->hdrsize = sizeof(struct udphdr);
595 break;
596 #ifdef CONFIG_NF_CT_PROTO_GRE
597 case IPPROTO_GRE:
598 ctx->hdrsize = sizeof(struct gre_base_hdr);
599 break;
600 #endif
601 default:
602 return -1;
603 }
604
605 if (ip6h->hop_limit <= 1)
606 return -1;
607
608 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
609 return -1;
610
611 switch (nexthdr) {
612 case IPPROTO_TCP:
613 case IPPROTO_UDP:
614 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
615 tuple->src_port = ports->source;
616 tuple->dst_port = ports->dest;
617 break;
618 case IPPROTO_GRE: {
619 struct gre_base_hdr *greh;
620
621 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
622 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
623 return -1;
624 break;
625 }
626 }
627
628 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
629
630 tuple->src_v6 = ip6h->saddr;
631 tuple->dst_v6 = ip6h->daddr;
632 tuple->l3proto = AF_INET6;
633 tuple->l4proto = nexthdr;
634 tuple->iifidx = ctx->in->ifindex;
635 nf_flow_tuple_encap(skb, tuple);
636
637 return 0;
638 }
639
nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct flow_offload_tuple_rhash * tuplehash,struct sk_buff * skb)640 static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx,
641 struct nf_flowtable *flow_table,
642 struct flow_offload_tuple_rhash *tuplehash,
643 struct sk_buff *skb)
644 {
645 enum flow_offload_tuple_dir dir;
646 struct flow_offload *flow;
647 unsigned int thoff, mtu;
648 struct ipv6hdr *ip6h;
649
650 dir = tuplehash->tuple.dir;
651 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
652
653 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
654 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
655 return 0;
656
657 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
658 thoff = sizeof(*ip6h) + ctx->offset;
659 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
660 return 0;
661
662 if (!nf_flow_dst_check(&tuplehash->tuple)) {
663 flow_offload_teardown(flow);
664 return 0;
665 }
666
667 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
668 return -1;
669
670 flow_offload_refresh(flow_table, flow, false);
671
672 nf_flow_encap_pop(skb, tuplehash);
673
674 ip6h = ipv6_hdr(skb);
675 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
676
677 ip6h->hop_limit--;
678 skb_clear_tstamp(skb);
679
680 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
681 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
682
683 return 1;
684 }
685
686 static struct flow_offload_tuple_rhash *
nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct sk_buff * skb)687 nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx *ctx,
688 struct nf_flowtable *flow_table,
689 struct sk_buff *skb)
690 {
691 struct flow_offload_tuple tuple = {};
692
693 if (skb->protocol != htons(ETH_P_IPV6) &&
694 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset))
695 return NULL;
696
697 if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
698 return NULL;
699
700 return flow_offload_lookup(flow_table, &tuple);
701 }
702
703 unsigned int
nf_flow_offload_ipv6_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)704 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
705 const struct nf_hook_state *state)
706 {
707 struct flow_offload_tuple_rhash *tuplehash;
708 struct nf_flowtable *flow_table = priv;
709 enum flow_offload_tuple_dir dir;
710 struct nf_flowtable_ctx ctx = {
711 .in = state->in,
712 };
713 const struct in6_addr *nexthop;
714 struct flow_offload *flow;
715 struct net_device *outdev;
716 struct rt6_info *rt;
717 int ret;
718
719 tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb);
720 if (tuplehash == NULL)
721 return NF_ACCEPT;
722
723 ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb);
724 if (ret < 0)
725 return NF_DROP;
726 else if (ret == 0)
727 return NF_ACCEPT;
728
729 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
730 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
731 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
732 IP6CB(skb)->iif = skb->dev->ifindex;
733 IP6CB(skb)->flags = IP6SKB_FORWARDED;
734 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
735 }
736
737 dir = tuplehash->tuple.dir;
738 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
739
740 switch (tuplehash->tuple.xmit_type) {
741 case FLOW_OFFLOAD_XMIT_NEIGH:
742 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
743 outdev = rt->dst.dev;
744 skb->dev = outdev;
745 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
746 skb_dst_set_noref(skb, &rt->dst);
747 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
748 ret = NF_STOLEN;
749 break;
750 case FLOW_OFFLOAD_XMIT_DIRECT:
751 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
752 if (ret == NF_DROP)
753 flow_offload_teardown(flow);
754 break;
755 default:
756 WARN_ON_ONCE(1);
757 ret = NF_DROP;
758 break;
759 }
760
761 return ret;
762 }
763 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
764