1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <net/ip.h>
12 #include <net/ipv6.h>
13 #include <net/ip6_route.h>
14 #include <net/neighbour.h>
15 #include <net/netfilter/nf_flow_table.h>
16 #include <net/netfilter/nf_conntrack_acct.h>
17 /* For layer 4 checksum field offset. */
18 #include <linux/tcp.h>
19 #include <linux/udp.h>
20
nf_flow_state_check(struct flow_offload * flow,int proto,struct sk_buff * skb,unsigned int thoff)21 static int nf_flow_state_check(struct flow_offload *flow, int proto,
22 struct sk_buff *skb, unsigned int thoff)
23 {
24 struct tcphdr *tcph;
25
26 if (proto != IPPROTO_TCP)
27 return 0;
28
29 tcph = (void *)(skb_network_header(skb) + thoff);
30 if (unlikely(tcph->fin || tcph->rst)) {
31 flow_offload_teardown(flow);
32 return -1;
33 }
34
35 return 0;
36 }
37
nf_flow_nat_ip_tcp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
39 __be32 addr, __be32 new_addr)
40 {
41 struct tcphdr *tcph;
42
43 tcph = (void *)(skb_network_header(skb) + thoff);
44 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
45 }
46
nf_flow_nat_ip_udp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
48 __be32 addr, __be32 new_addr)
49 {
50 struct udphdr *udph;
51
52 udph = (void *)(skb_network_header(skb) + thoff);
53 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
54 inet_proto_csum_replace4(&udph->check, skb, addr,
55 new_addr, true);
56 if (!udph->check)
57 udph->check = CSUM_MANGLED_0;
58 }
59 }
60
nf_flow_nat_ip_l4proto(struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,__be32 addr,__be32 new_addr)61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
62 unsigned int thoff, __be32 addr,
63 __be32 new_addr)
64 {
65 switch (iph->protocol) {
66 case IPPROTO_TCP:
67 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
68 break;
69 case IPPROTO_UDP:
70 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
71 break;
72 }
73 }
74
nf_flow_snat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)75 static void nf_flow_snat_ip(const struct flow_offload *flow,
76 struct sk_buff *skb, struct iphdr *iph,
77 unsigned int thoff, enum flow_offload_tuple_dir dir)
78 {
79 __be32 addr, new_addr;
80
81 switch (dir) {
82 case FLOW_OFFLOAD_DIR_ORIGINAL:
83 addr = iph->saddr;
84 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
85 iph->saddr = new_addr;
86 break;
87 case FLOW_OFFLOAD_DIR_REPLY:
88 addr = iph->daddr;
89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
90 iph->daddr = new_addr;
91 break;
92 }
93 csum_replace4(&iph->check, addr, new_addr);
94
95 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
96 }
97
nf_flow_dnat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)98 static void nf_flow_dnat_ip(const struct flow_offload *flow,
99 struct sk_buff *skb, struct iphdr *iph,
100 unsigned int thoff, enum flow_offload_tuple_dir dir)
101 {
102 __be32 addr, new_addr;
103
104 switch (dir) {
105 case FLOW_OFFLOAD_DIR_ORIGINAL:
106 addr = iph->daddr;
107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
108 iph->daddr = new_addr;
109 break;
110 case FLOW_OFFLOAD_DIR_REPLY:
111 addr = iph->saddr;
112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
113 iph->saddr = new_addr;
114 break;
115 }
116 csum_replace4(&iph->check, addr, new_addr);
117
118 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
119 }
120
nf_flow_nat_ip(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,enum flow_offload_tuple_dir dir,struct iphdr * iph)121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
122 unsigned int thoff, enum flow_offload_tuple_dir dir,
123 struct iphdr *iph)
124 {
125 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
126 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
127 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
128 }
129 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
130 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
131 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
132 }
133 }
134
ip_has_options(unsigned int thoff)135 static bool ip_has_options(unsigned int thoff)
136 {
137 return thoff != sizeof(struct iphdr);
138 }
139
nf_flow_tuple_encap(struct sk_buff * skb,struct flow_offload_tuple * tuple)140 static void nf_flow_tuple_encap(struct sk_buff *skb,
141 struct flow_offload_tuple *tuple)
142 {
143 struct vlan_ethhdr *veth;
144 struct pppoe_hdr *phdr;
145 int i = 0;
146
147 if (skb_vlan_tag_present(skb)) {
148 tuple->encap[i].id = skb_vlan_tag_get(skb);
149 tuple->encap[i].proto = skb->vlan_proto;
150 i++;
151 }
152 switch (skb->protocol) {
153 case htons(ETH_P_8021Q):
154 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
155 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
156 tuple->encap[i].proto = skb->protocol;
157 break;
158 case htons(ETH_P_PPP_SES):
159 phdr = (struct pppoe_hdr *)skb_mac_header(skb);
160 tuple->encap[i].id = ntohs(phdr->sid);
161 tuple->encap[i].proto = skb->protocol;
162 break;
163 }
164 }
165
nf_flow_tuple_ip(struct sk_buff * skb,const struct net_device * dev,struct flow_offload_tuple * tuple,u32 * hdrsize,u32 offset)166 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
167 struct flow_offload_tuple *tuple, u32 *hdrsize,
168 u32 offset)
169 {
170 struct flow_ports *ports;
171 unsigned int thoff;
172 struct iphdr *iph;
173 u8 ipproto;
174
175 if (!pskb_may_pull(skb, sizeof(*iph) + offset))
176 return -1;
177
178 iph = (struct iphdr *)(skb_network_header(skb) + offset);
179 thoff = (iph->ihl * 4);
180
181 if (ip_is_fragment(iph) ||
182 unlikely(ip_has_options(thoff)))
183 return -1;
184
185 thoff += offset;
186
187 ipproto = iph->protocol;
188 switch (ipproto) {
189 case IPPROTO_TCP:
190 *hdrsize = sizeof(struct tcphdr);
191 break;
192 case IPPROTO_UDP:
193 *hdrsize = sizeof(struct udphdr);
194 break;
195 #ifdef CONFIG_NF_CT_PROTO_GRE
196 case IPPROTO_GRE:
197 *hdrsize = sizeof(struct gre_base_hdr);
198 break;
199 #endif
200 default:
201 return -1;
202 }
203
204 if (iph->ttl <= 1)
205 return -1;
206
207 if (!pskb_may_pull(skb, thoff + *hdrsize))
208 return -1;
209
210 switch (ipproto) {
211 case IPPROTO_TCP:
212 case IPPROTO_UDP:
213 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
214 tuple->src_port = ports->source;
215 tuple->dst_port = ports->dest;
216 break;
217 case IPPROTO_GRE: {
218 struct gre_base_hdr *greh;
219
220 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
221 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
222 return -1;
223 break;
224 }
225 }
226
227 iph = (struct iphdr *)(skb_network_header(skb) + offset);
228
229 tuple->src_v4.s_addr = iph->saddr;
230 tuple->dst_v4.s_addr = iph->daddr;
231 tuple->l3proto = AF_INET;
232 tuple->l4proto = ipproto;
233 tuple->iifidx = dev->ifindex;
234 nf_flow_tuple_encap(skb, tuple);
235
236 return 0;
237 }
238
239 /* Based on ip_exceeds_mtu(). */
nf_flow_exceeds_mtu(const struct sk_buff * skb,unsigned int mtu)240 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
241 {
242 if (skb->len <= mtu)
243 return false;
244
245 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
246 return false;
247
248 return true;
249 }
250
nf_flow_dst_check(struct flow_offload_tuple * tuple)251 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
252 {
253 if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
254 tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
255 return true;
256
257 return dst_check(tuple->dst_cache, tuple->dst_cookie);
258 }
259
nf_flow_xmit_xfrm(struct sk_buff * skb,const struct nf_hook_state * state,struct dst_entry * dst)260 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
261 const struct nf_hook_state *state,
262 struct dst_entry *dst)
263 {
264 skb_orphan(skb);
265 skb_dst_set_noref(skb, dst);
266 dst_output(state->net, state->sk, skb);
267 return NF_STOLEN;
268 }
269
nf_flow_skb_encap_protocol(const struct sk_buff * skb,__be16 proto,u32 * offset)270 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
271 u32 *offset)
272 {
273 struct vlan_ethhdr *veth;
274
275 switch (skb->protocol) {
276 case htons(ETH_P_8021Q):
277 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
278 if (veth->h_vlan_encapsulated_proto == proto) {
279 *offset += VLAN_HLEN;
280 return true;
281 }
282 break;
283 case htons(ETH_P_PPP_SES):
284 if (nf_flow_pppoe_proto(skb) == proto) {
285 *offset += PPPOE_SES_HLEN;
286 return true;
287 }
288 break;
289 }
290
291 return false;
292 }
293
nf_flow_encap_pop(struct sk_buff * skb,struct flow_offload_tuple_rhash * tuplehash)294 static void nf_flow_encap_pop(struct sk_buff *skb,
295 struct flow_offload_tuple_rhash *tuplehash)
296 {
297 struct vlan_hdr *vlan_hdr;
298 int i;
299
300 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
301 if (skb_vlan_tag_present(skb)) {
302 __vlan_hwaccel_clear_tag(skb);
303 continue;
304 }
305 switch (skb->protocol) {
306 case htons(ETH_P_8021Q):
307 vlan_hdr = (struct vlan_hdr *)skb->data;
308 __skb_pull(skb, VLAN_HLEN);
309 vlan_set_encap_proto(skb, vlan_hdr);
310 skb_reset_network_header(skb);
311 break;
312 case htons(ETH_P_PPP_SES):
313 skb->protocol = nf_flow_pppoe_proto(skb);
314 skb_pull(skb, PPPOE_SES_HLEN);
315 skb_reset_network_header(skb);
316 break;
317 }
318 }
319 }
320
nf_flow_queue_xmit(struct net * net,struct sk_buff * skb,const struct flow_offload_tuple_rhash * tuplehash,unsigned short type)321 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
322 const struct flow_offload_tuple_rhash *tuplehash,
323 unsigned short type)
324 {
325 struct net_device *outdev;
326
327 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
328 if (!outdev)
329 return NF_DROP;
330
331 skb->dev = outdev;
332 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
333 tuplehash->tuple.out.h_source, skb->len);
334 dev_queue_xmit(skb);
335
336 return NF_STOLEN;
337 }
338
339 unsigned int
nf_flow_offload_ip_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)340 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
341 const struct nf_hook_state *state)
342 {
343 struct flow_offload_tuple_rhash *tuplehash;
344 struct nf_flowtable *flow_table = priv;
345 struct flow_offload_tuple tuple = {};
346 enum flow_offload_tuple_dir dir;
347 struct flow_offload *flow;
348 struct net_device *outdev;
349 u32 hdrsize, offset = 0;
350 unsigned int thoff, mtu;
351 struct rtable *rt;
352 struct iphdr *iph;
353 __be32 nexthop;
354 int ret;
355
356 if (skb->protocol != htons(ETH_P_IP) &&
357 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset))
358 return NF_ACCEPT;
359
360 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0)
361 return NF_ACCEPT;
362
363 tuplehash = flow_offload_lookup(flow_table, &tuple);
364 if (tuplehash == NULL)
365 return NF_ACCEPT;
366
367 dir = tuplehash->tuple.dir;
368 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
369
370 mtu = flow->tuplehash[dir].tuple.mtu + offset;
371 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
372 return NF_ACCEPT;
373
374 iph = (struct iphdr *)(skb_network_header(skb) + offset);
375 thoff = (iph->ihl * 4) + offset;
376 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
377 return NF_ACCEPT;
378
379 if (!nf_flow_dst_check(&tuplehash->tuple)) {
380 flow_offload_teardown(flow);
381 return NF_ACCEPT;
382 }
383
384 if (skb_try_make_writable(skb, thoff + hdrsize))
385 return NF_DROP;
386
387 flow_offload_refresh(flow_table, flow);
388
389 nf_flow_encap_pop(skb, tuplehash);
390 thoff -= offset;
391
392 iph = ip_hdr(skb);
393 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
394
395 ip_decrease_ttl(iph);
396 skb_clear_tstamp(skb);
397
398 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
399 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
400
401 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
402 rt = (struct rtable *)tuplehash->tuple.dst_cache;
403 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
404 IPCB(skb)->iif = skb->dev->ifindex;
405 IPCB(skb)->flags = IPSKB_FORWARDED;
406 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
407 }
408
409 switch (tuplehash->tuple.xmit_type) {
410 case FLOW_OFFLOAD_XMIT_NEIGH:
411 rt = (struct rtable *)tuplehash->tuple.dst_cache;
412 outdev = rt->dst.dev;
413 skb->dev = outdev;
414 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
415 skb_dst_set_noref(skb, &rt->dst);
416 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
417 ret = NF_STOLEN;
418 break;
419 case FLOW_OFFLOAD_XMIT_DIRECT:
420 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
421 if (ret == NF_DROP)
422 flow_offload_teardown(flow);
423 break;
424 }
425
426 return ret;
427 }
428 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
429
nf_flow_nat_ipv6_tcp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr,struct ipv6hdr * ip6h)430 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
431 struct in6_addr *addr,
432 struct in6_addr *new_addr,
433 struct ipv6hdr *ip6h)
434 {
435 struct tcphdr *tcph;
436
437 tcph = (void *)(skb_network_header(skb) + thoff);
438 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
439 new_addr->s6_addr32, true);
440 }
441
nf_flow_nat_ipv6_udp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)442 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
443 struct in6_addr *addr,
444 struct in6_addr *new_addr)
445 {
446 struct udphdr *udph;
447
448 udph = (void *)(skb_network_header(skb) + thoff);
449 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
450 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
451 new_addr->s6_addr32, true);
452 if (!udph->check)
453 udph->check = CSUM_MANGLED_0;
454 }
455 }
456
nf_flow_nat_ipv6_l4proto(struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)457 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
458 unsigned int thoff, struct in6_addr *addr,
459 struct in6_addr *new_addr)
460 {
461 switch (ip6h->nexthdr) {
462 case IPPROTO_TCP:
463 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
464 break;
465 case IPPROTO_UDP:
466 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
467 break;
468 }
469 }
470
nf_flow_snat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)471 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
472 struct sk_buff *skb, struct ipv6hdr *ip6h,
473 unsigned int thoff,
474 enum flow_offload_tuple_dir dir)
475 {
476 struct in6_addr addr, new_addr;
477
478 switch (dir) {
479 case FLOW_OFFLOAD_DIR_ORIGINAL:
480 addr = ip6h->saddr;
481 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
482 ip6h->saddr = new_addr;
483 break;
484 case FLOW_OFFLOAD_DIR_REPLY:
485 addr = ip6h->daddr;
486 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
487 ip6h->daddr = new_addr;
488 break;
489 }
490
491 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
492 }
493
nf_flow_dnat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)494 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
495 struct sk_buff *skb, struct ipv6hdr *ip6h,
496 unsigned int thoff,
497 enum flow_offload_tuple_dir dir)
498 {
499 struct in6_addr addr, new_addr;
500
501 switch (dir) {
502 case FLOW_OFFLOAD_DIR_ORIGINAL:
503 addr = ip6h->daddr;
504 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
505 ip6h->daddr = new_addr;
506 break;
507 case FLOW_OFFLOAD_DIR_REPLY:
508 addr = ip6h->saddr;
509 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
510 ip6h->saddr = new_addr;
511 break;
512 }
513
514 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
515 }
516
nf_flow_nat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,enum flow_offload_tuple_dir dir,struct ipv6hdr * ip6h)517 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
518 struct sk_buff *skb,
519 enum flow_offload_tuple_dir dir,
520 struct ipv6hdr *ip6h)
521 {
522 unsigned int thoff = sizeof(*ip6h);
523
524 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
525 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
526 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
527 }
528 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
529 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
530 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
531 }
532 }
533
nf_flow_tuple_ipv6(struct sk_buff * skb,const struct net_device * dev,struct flow_offload_tuple * tuple,u32 * hdrsize,u32 offset)534 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
535 struct flow_offload_tuple *tuple, u32 *hdrsize,
536 u32 offset)
537 {
538 struct flow_ports *ports;
539 struct ipv6hdr *ip6h;
540 unsigned int thoff;
541 u8 nexthdr;
542
543 thoff = sizeof(*ip6h) + offset;
544 if (!pskb_may_pull(skb, thoff))
545 return -1;
546
547 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
548
549 nexthdr = ip6h->nexthdr;
550 switch (nexthdr) {
551 case IPPROTO_TCP:
552 *hdrsize = sizeof(struct tcphdr);
553 break;
554 case IPPROTO_UDP:
555 *hdrsize = sizeof(struct udphdr);
556 break;
557 #ifdef CONFIG_NF_CT_PROTO_GRE
558 case IPPROTO_GRE:
559 *hdrsize = sizeof(struct gre_base_hdr);
560 break;
561 #endif
562 default:
563 return -1;
564 }
565
566 if (ip6h->hop_limit <= 1)
567 return -1;
568
569 if (!pskb_may_pull(skb, thoff + *hdrsize))
570 return -1;
571
572 switch (nexthdr) {
573 case IPPROTO_TCP:
574 case IPPROTO_UDP:
575 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
576 tuple->src_port = ports->source;
577 tuple->dst_port = ports->dest;
578 break;
579 case IPPROTO_GRE: {
580 struct gre_base_hdr *greh;
581
582 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
583 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
584 return -1;
585 break;
586 }
587 }
588
589 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
590
591 tuple->src_v6 = ip6h->saddr;
592 tuple->dst_v6 = ip6h->daddr;
593 tuple->l3proto = AF_INET6;
594 tuple->l4proto = nexthdr;
595 tuple->iifidx = dev->ifindex;
596 nf_flow_tuple_encap(skb, tuple);
597
598 return 0;
599 }
600
601 unsigned int
nf_flow_offload_ipv6_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)602 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
603 const struct nf_hook_state *state)
604 {
605 struct flow_offload_tuple_rhash *tuplehash;
606 struct nf_flowtable *flow_table = priv;
607 struct flow_offload_tuple tuple = {};
608 enum flow_offload_tuple_dir dir;
609 const struct in6_addr *nexthop;
610 struct flow_offload *flow;
611 struct net_device *outdev;
612 unsigned int thoff, mtu;
613 u32 hdrsize, offset = 0;
614 struct ipv6hdr *ip6h;
615 struct rt6_info *rt;
616 int ret;
617
618 if (skb->protocol != htons(ETH_P_IPV6) &&
619 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset))
620 return NF_ACCEPT;
621
622 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0)
623 return NF_ACCEPT;
624
625 tuplehash = flow_offload_lookup(flow_table, &tuple);
626 if (tuplehash == NULL)
627 return NF_ACCEPT;
628
629 dir = tuplehash->tuple.dir;
630 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
631
632 mtu = flow->tuplehash[dir].tuple.mtu + offset;
633 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
634 return NF_ACCEPT;
635
636 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
637 thoff = sizeof(*ip6h) + offset;
638 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
639 return NF_ACCEPT;
640
641 if (!nf_flow_dst_check(&tuplehash->tuple)) {
642 flow_offload_teardown(flow);
643 return NF_ACCEPT;
644 }
645
646 if (skb_try_make_writable(skb, thoff + hdrsize))
647 return NF_DROP;
648
649 flow_offload_refresh(flow_table, flow);
650
651 nf_flow_encap_pop(skb, tuplehash);
652
653 ip6h = ipv6_hdr(skb);
654 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
655
656 ip6h->hop_limit--;
657 skb_clear_tstamp(skb);
658
659 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
660 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
661
662 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
663 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
664 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
665 IP6CB(skb)->iif = skb->dev->ifindex;
666 IP6CB(skb)->flags = IP6SKB_FORWARDED;
667 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
668 }
669
670 switch (tuplehash->tuple.xmit_type) {
671 case FLOW_OFFLOAD_XMIT_NEIGH:
672 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
673 outdev = rt->dst.dev;
674 skb->dev = outdev;
675 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
676 skb_dst_set_noref(skb, &rt->dst);
677 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
678 ret = NF_STOLEN;
679 break;
680 case FLOW_OFFLOAD_XMIT_DIRECT:
681 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
682 if (ret == NF_DROP)
683 flow_offload_teardown(flow);
684 break;
685 }
686
687 return ret;
688 }
689 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
690