1 /*
2  *	IPv6 fragment reassembly
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on: net/ipv4/ip_fragment.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 /*
17  *	Fixes:
18  *	Andi Kleen	Make it work with multiple hosts.
19  *			More RFC compliance.
20  *
21  *      Horst von Brand Add missing #include <linux/string.h>
22  *	Alexey Kuznetsov	SMP races, threading, cleanup.
23  *	Patrick McHardy		LRU queue of frag heads for evictor.
24  *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
25  *	David Stevens and
26  *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
27  *				calculate ICV correctly.
28  */
29 
30 #define pr_fmt(fmt) "IPv6: " fmt
31 
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 #include <linux/skbuff.h>
47 #include <linux/slab.h>
48 #include <linux/export.h>
49 
50 #include <net/sock.h>
51 #include <net/snmp.h>
52 
53 #include <net/ipv6.h>
54 #include <net/ip6_route.h>
55 #include <net/protocol.h>
56 #include <net/transp_v6.h>
57 #include <net/rawv6.h>
58 #include <net/ndisc.h>
59 #include <net/addrconf.h>
60 #include <net/inet_frag.h>
61 
62 struct ip6frag_skb_cb
63 {
64 	struct inet6_skb_parm	h;
65 	int			offset;
66 };
67 
68 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
69 
70 
71 /*
72  *	Equivalent of ipv4 struct ipq
73  */
74 
75 struct frag_queue
76 {
77 	struct inet_frag_queue	q;
78 
79 	__be32			id;		/* fragment id		*/
80 	u32			user;
81 	struct in6_addr		saddr;
82 	struct in6_addr		daddr;
83 
84 	int			iif;
85 	unsigned int		csum;
86 	__u16			nhoffset;
87 };
88 
89 static struct inet_frags ip6_frags;
90 
ip6_frag_nqueues(struct net * net)91 int ip6_frag_nqueues(struct net *net)
92 {
93 	return net->ipv6.frags.nqueues;
94 }
95 
ip6_frag_mem(struct net * net)96 int ip6_frag_mem(struct net *net)
97 {
98 	return atomic_read(&net->ipv6.frags.mem);
99 }
100 
101 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
102 			  struct net_device *dev);
103 
104 /*
105  * callers should be careful not to use the hash value outside the ipfrag_lock
106  * as doing so could race with ipfrag_hash_rnd being recalculated.
107  */
inet6_hash_frag(__be32 id,const struct in6_addr * saddr,const struct in6_addr * daddr,u32 rnd)108 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
109 			     const struct in6_addr *daddr, u32 rnd)
110 {
111 	u32 c;
112 
113 	c = jhash_3words((__force u32)saddr->s6_addr32[0],
114 			 (__force u32)saddr->s6_addr32[1],
115 			 (__force u32)saddr->s6_addr32[2],
116 			 rnd);
117 
118 	c = jhash_3words((__force u32)saddr->s6_addr32[3],
119 			 (__force u32)daddr->s6_addr32[0],
120 			 (__force u32)daddr->s6_addr32[1],
121 			 c);
122 
123 	c =  jhash_3words((__force u32)daddr->s6_addr32[2],
124 			  (__force u32)daddr->s6_addr32[3],
125 			  (__force u32)id,
126 			  c);
127 
128 	return c & (INETFRAGS_HASHSZ - 1);
129 }
130 EXPORT_SYMBOL_GPL(inet6_hash_frag);
131 
ip6_hashfn(struct inet_frag_queue * q)132 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
133 {
134 	struct frag_queue *fq;
135 
136 	fq = container_of(q, struct frag_queue, q);
137 	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
138 }
139 
ip6_frag_match(struct inet_frag_queue * q,void * a)140 int ip6_frag_match(struct inet_frag_queue *q, void *a)
141 {
142 	struct frag_queue *fq;
143 	struct ip6_create_arg *arg = a;
144 
145 	fq = container_of(q, struct frag_queue, q);
146 	return (fq->id == arg->id && fq->user == arg->user &&
147 			ipv6_addr_equal(&fq->saddr, arg->src) &&
148 			ipv6_addr_equal(&fq->daddr, arg->dst));
149 }
150 EXPORT_SYMBOL(ip6_frag_match);
151 
ip6_frag_init(struct inet_frag_queue * q,void * a)152 void ip6_frag_init(struct inet_frag_queue *q, void *a)
153 {
154 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
155 	struct ip6_create_arg *arg = a;
156 
157 	fq->id = arg->id;
158 	fq->user = arg->user;
159 	fq->saddr = *arg->src;
160 	fq->daddr = *arg->dst;
161 }
162 EXPORT_SYMBOL(ip6_frag_init);
163 
164 /* Destruction primitives. */
165 
fq_put(struct frag_queue * fq)166 static __inline__ void fq_put(struct frag_queue *fq)
167 {
168 	inet_frag_put(&fq->q, &ip6_frags);
169 }
170 
171 /* Kill fq entry. It is not destroyed immediately,
172  * because caller (and someone more) holds reference count.
173  */
fq_kill(struct frag_queue * fq)174 static __inline__ void fq_kill(struct frag_queue *fq)
175 {
176 	inet_frag_kill(&fq->q, &ip6_frags);
177 }
178 
ip6_evictor(struct net * net,struct inet6_dev * idev)179 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
180 {
181 	int evicted;
182 
183 	evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
184 	if (evicted)
185 		IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
186 }
187 
ip6_frag_expire(unsigned long data)188 static void ip6_frag_expire(unsigned long data)
189 {
190 	struct frag_queue *fq;
191 	struct net_device *dev = NULL;
192 	struct net *net;
193 
194 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
195 
196 	spin_lock(&fq->q.lock);
197 
198 	if (fq->q.last_in & INET_FRAG_COMPLETE)
199 		goto out;
200 
201 	fq_kill(fq);
202 
203 	net = container_of(fq->q.net, struct net, ipv6.frags);
204 	rcu_read_lock();
205 	dev = dev_get_by_index_rcu(net, fq->iif);
206 	if (!dev)
207 		goto out_rcu_unlock;
208 
209 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
210 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
211 
212 	/* Don't send error if the first segment did not arrive. */
213 	if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
214 		goto out_rcu_unlock;
215 
216 	/*
217 	   But use as source device on which LAST ARRIVED
218 	   segment was received. And do not use fq->dev
219 	   pointer directly, device might already disappeared.
220 	 */
221 	fq->q.fragments->dev = dev;
222 	icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
223 out_rcu_unlock:
224 	rcu_read_unlock();
225 out:
226 	spin_unlock(&fq->q.lock);
227 	fq_put(fq);
228 }
229 
230 static __inline__ struct frag_queue *
fq_find(struct net * net,__be32 id,const struct in6_addr * src,const struct in6_addr * dst)231 fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
232 {
233 	struct inet_frag_queue *q;
234 	struct ip6_create_arg arg;
235 	unsigned int hash;
236 
237 	arg.id = id;
238 	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
239 	arg.src = src;
240 	arg.dst = dst;
241 
242 	read_lock(&ip6_frags.lock);
243 	hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
244 
245 	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
246 	if (IS_ERR_OR_NULL(q)) {
247 		inet_frag_maybe_warn_overflow(q, pr_fmt());
248 		return NULL;
249 	}
250 	return container_of(q, struct frag_queue, q);
251 }
252 
ip6_frag_queue(struct frag_queue * fq,struct sk_buff * skb,struct frag_hdr * fhdr,int nhoff)253 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
254 			   struct frag_hdr *fhdr, int nhoff)
255 {
256 	struct sk_buff *prev, *next;
257 	struct net_device *dev;
258 	int offset, end;
259 	struct net *net = dev_net(skb_dst(skb)->dev);
260 
261 	if (fq->q.last_in & INET_FRAG_COMPLETE)
262 		goto err;
263 
264 	offset = ntohs(fhdr->frag_off) & ~0x7;
265 	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
266 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
267 
268 	if ((unsigned int)end > IPV6_MAXPLEN) {
269 		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
270 				 IPSTATS_MIB_INHDRERRORS);
271 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
272 				  ((u8 *)&fhdr->frag_off -
273 				   skb_network_header(skb)));
274 		return -1;
275 	}
276 
277 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
278 		const unsigned char *nh = skb_network_header(skb);
279 		skb->csum = csum_sub(skb->csum,
280 				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
281 						  0));
282 	}
283 
284 	/* Is this the final fragment? */
285 	if (!(fhdr->frag_off & htons(IP6_MF))) {
286 		/* If we already have some bits beyond end
287 		 * or have different end, the segment is corrupted.
288 		 */
289 		if (end < fq->q.len ||
290 		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
291 			goto err;
292 		fq->q.last_in |= INET_FRAG_LAST_IN;
293 		fq->q.len = end;
294 	} else {
295 		/* Check if the fragment is rounded to 8 bytes.
296 		 * Required by the RFC.
297 		 */
298 		if (end & 0x7) {
299 			/* RFC2460 says always send parameter problem in
300 			 * this case. -DaveM
301 			 */
302 			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
303 					 IPSTATS_MIB_INHDRERRORS);
304 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
305 					  offsetof(struct ipv6hdr, payload_len));
306 			return -1;
307 		}
308 		if (end > fq->q.len) {
309 			/* Some bits beyond end -> corruption. */
310 			if (fq->q.last_in & INET_FRAG_LAST_IN)
311 				goto err;
312 			fq->q.len = end;
313 		}
314 	}
315 
316 	if (end == offset)
317 		goto err;
318 
319 	/* Point into the IP datagram 'data' part. */
320 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
321 		goto err;
322 
323 	if (pskb_trim_rcsum(skb, end - offset))
324 		goto err;
325 
326 	/* Find out which fragments are in front and at the back of us
327 	 * in the chain of fragments so far.  We must know where to put
328 	 * this fragment, right?
329 	 */
330 	prev = fq->q.fragments_tail;
331 	if (!prev || FRAG6_CB(prev)->offset < offset) {
332 		next = NULL;
333 		goto found;
334 	}
335 	prev = NULL;
336 	for(next = fq->q.fragments; next != NULL; next = next->next) {
337 		if (FRAG6_CB(next)->offset >= offset)
338 			break;	/* bingo! */
339 		prev = next;
340 	}
341 
342 found:
343 	/* RFC5722, Section 4, amended by Errata ID : 3089
344 	 *                          When reassembling an IPv6 datagram, if
345 	 *   one or more its constituent fragments is determined to be an
346 	 *   overlapping fragment, the entire datagram (and any constituent
347 	 *   fragments) MUST be silently discarded.
348 	 */
349 
350 	/* Check for overlap with preceding fragment. */
351 	if (prev &&
352 	    (FRAG6_CB(prev)->offset + prev->len) > offset)
353 		goto discard_fq;
354 
355 	/* Look for overlap with succeeding segment. */
356 	if (next && FRAG6_CB(next)->offset < end)
357 		goto discard_fq;
358 
359 	FRAG6_CB(skb)->offset = offset;
360 
361 	/* Insert this fragment in the chain of fragments. */
362 	skb->next = next;
363 	if (!next)
364 		fq->q.fragments_tail = skb;
365 	if (prev)
366 		prev->next = skb;
367 	else
368 		fq->q.fragments = skb;
369 
370 	dev = skb->dev;
371 	if (dev) {
372 		fq->iif = dev->ifindex;
373 		skb->dev = NULL;
374 	}
375 	fq->q.stamp = skb->tstamp;
376 	fq->q.meat += skb->len;
377 	atomic_add(skb->truesize, &fq->q.net->mem);
378 
379 	/* The first fragment.
380 	 * nhoffset is obtained from the first fragment, of course.
381 	 */
382 	if (offset == 0) {
383 		fq->nhoffset = nhoff;
384 		fq->q.last_in |= INET_FRAG_FIRST_IN;
385 	}
386 
387 	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
388 	    fq->q.meat == fq->q.len) {
389 		int res;
390 		unsigned long orefdst = skb->_skb_refdst;
391 
392 		skb->_skb_refdst = 0UL;
393 		res = ip6_frag_reasm(fq, prev, dev);
394 		skb->_skb_refdst = orefdst;
395 		return res;
396 	}
397 
398 	skb_dst_drop(skb);
399 
400 	write_lock(&ip6_frags.lock);
401 	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
402 	write_unlock(&ip6_frags.lock);
403 	return -1;
404 
405 discard_fq:
406 	fq_kill(fq);
407 err:
408 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
409 		      IPSTATS_MIB_REASMFAILS);
410 	kfree_skb(skb);
411 	return -1;
412 }
413 
414 /*
415  *	Check if this packet is complete.
416  *	Returns NULL on failure by any reason, and pointer
417  *	to current nexthdr field in reassembled frame.
418  *
419  *	It is called with locked fq, and caller must check that
420  *	queue is eligible for reassembly i.e. it is not COMPLETE,
421  *	the last and the first frames arrived and all the bits are here.
422  */
ip6_frag_reasm(struct frag_queue * fq,struct sk_buff * prev,struct net_device * dev)423 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
424 			  struct net_device *dev)
425 {
426 	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
427 	struct sk_buff *fp, *head = fq->q.fragments;
428 	int    payload_len;
429 	unsigned int nhoff;
430 
431 	fq_kill(fq);
432 
433 	/* Make the one we just received the head. */
434 	if (prev) {
435 		head = prev->next;
436 		fp = skb_clone(head, GFP_ATOMIC);
437 
438 		if (!fp)
439 			goto out_oom;
440 
441 		fp->next = head->next;
442 		if (!fp->next)
443 			fq->q.fragments_tail = fp;
444 		prev->next = fp;
445 
446 		skb_morph(head, fq->q.fragments);
447 		head->next = fq->q.fragments->next;
448 
449 		kfree_skb(fq->q.fragments);
450 		fq->q.fragments = head;
451 	}
452 
453 	WARN_ON(head == NULL);
454 	WARN_ON(FRAG6_CB(head)->offset != 0);
455 
456 	/* Unfragmented part is taken from the first segment. */
457 	payload_len = ((head->data - skb_network_header(head)) -
458 		       sizeof(struct ipv6hdr) + fq->q.len -
459 		       sizeof(struct frag_hdr));
460 	if (payload_len > IPV6_MAXPLEN)
461 		goto out_oversize;
462 
463 	/* Head of list must not be cloned. */
464 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
465 		goto out_oom;
466 
467 	/* If the first fragment is fragmented itself, we split
468 	 * it to two chunks: the first with data and paged part
469 	 * and the second, holding only fragments. */
470 	if (skb_has_frag_list(head)) {
471 		struct sk_buff *clone;
472 		int i, plen = 0;
473 
474 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
475 			goto out_oom;
476 		clone->next = head->next;
477 		head->next = clone;
478 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
479 		skb_frag_list_init(head);
480 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
481 			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
482 		clone->len = clone->data_len = head->data_len - plen;
483 		head->data_len -= clone->len;
484 		head->len -= clone->len;
485 		clone->csum = 0;
486 		clone->ip_summed = head->ip_summed;
487 		atomic_add(clone->truesize, &fq->q.net->mem);
488 	}
489 
490 	/* We have to remove fragment header from datagram and to relocate
491 	 * header in order to calculate ICV correctly. */
492 	nhoff = fq->nhoffset;
493 	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
494 	memmove(head->head + sizeof(struct frag_hdr), head->head,
495 		(head->data - head->head) - sizeof(struct frag_hdr));
496 	head->mac_header += sizeof(struct frag_hdr);
497 	head->network_header += sizeof(struct frag_hdr);
498 
499 	skb_shinfo(head)->frag_list = head->next;
500 	skb_reset_transport_header(head);
501 	skb_push(head, head->data - skb_network_header(head));
502 
503 	for (fp=head->next; fp; fp = fp->next) {
504 		head->data_len += fp->len;
505 		head->len += fp->len;
506 		if (head->ip_summed != fp->ip_summed)
507 			head->ip_summed = CHECKSUM_NONE;
508 		else if (head->ip_summed == CHECKSUM_COMPLETE)
509 			head->csum = csum_add(head->csum, fp->csum);
510 		head->truesize += fp->truesize;
511 	}
512 	atomic_sub(head->truesize, &fq->q.net->mem);
513 
514 	head->next = NULL;
515 	head->dev = dev;
516 	head->tstamp = fq->q.stamp;
517 	ipv6_hdr(head)->payload_len = htons(payload_len);
518 	IP6CB(head)->nhoff = nhoff;
519 	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
520 
521 	/* Yes, and fold redundant checksum back. 8) */
522 	if (head->ip_summed == CHECKSUM_COMPLETE)
523 		head->csum = csum_partial(skb_network_header(head),
524 					  skb_network_header_len(head),
525 					  head->csum);
526 
527 	rcu_read_lock();
528 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
529 	rcu_read_unlock();
530 	fq->q.fragments = NULL;
531 	fq->q.fragments_tail = NULL;
532 	return 1;
533 
534 out_oversize:
535 	if (net_ratelimit())
536 		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
537 	goto out_fail;
538 out_oom:
539 	if (net_ratelimit())
540 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
541 out_fail:
542 	rcu_read_lock();
543 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
544 	rcu_read_unlock();
545 	return -1;
546 }
547 
ipv6_frag_rcv(struct sk_buff * skb)548 static int ipv6_frag_rcv(struct sk_buff *skb)
549 {
550 	struct frag_hdr *fhdr;
551 	struct frag_queue *fq;
552 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
553 	struct net *net = dev_net(skb_dst(skb)->dev);
554 
555 	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
556 		goto fail_hdr;
557 
558 	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
559 
560 	/* Jumbo payload inhibits frag. header */
561 	if (hdr->payload_len==0)
562 		goto fail_hdr;
563 
564 	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
565 				 sizeof(struct frag_hdr))))
566 		goto fail_hdr;
567 
568 	hdr = ipv6_hdr(skb);
569 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
570 
571 	if (!(fhdr->frag_off & htons(0xFFF9))) {
572 		/* It is not a fragmented frame */
573 		skb->transport_header += sizeof(struct frag_hdr);
574 		IP6_INC_STATS_BH(net,
575 				 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
576 
577 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
578 		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
579 		return 1;
580 	}
581 
582 	if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
583 		ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
584 
585 	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
586 	if (fq != NULL) {
587 		int ret;
588 
589 		spin_lock(&fq->q.lock);
590 
591 		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
592 
593 		spin_unlock(&fq->q.lock);
594 		fq_put(fq);
595 		return ret;
596 	}
597 
598 	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
599 	kfree_skb(skb);
600 	return -1;
601 
602 fail_hdr:
603 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
604 	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
605 	return -1;
606 }
607 
608 static const struct inet6_protocol frag_protocol =
609 {
610 	.handler	=	ipv6_frag_rcv,
611 	.flags		=	INET6_PROTO_NOPOLICY,
612 };
613 
614 #ifdef CONFIG_SYSCTL
615 static struct ctl_table ip6_frags_ns_ctl_table[] = {
616 	{
617 		.procname	= "ip6frag_high_thresh",
618 		.data		= &init_net.ipv6.frags.high_thresh,
619 		.maxlen		= sizeof(int),
620 		.mode		= 0644,
621 		.proc_handler	= proc_dointvec
622 	},
623 	{
624 		.procname	= "ip6frag_low_thresh",
625 		.data		= &init_net.ipv6.frags.low_thresh,
626 		.maxlen		= sizeof(int),
627 		.mode		= 0644,
628 		.proc_handler	= proc_dointvec
629 	},
630 	{
631 		.procname	= "ip6frag_time",
632 		.data		= &init_net.ipv6.frags.timeout,
633 		.maxlen		= sizeof(int),
634 		.mode		= 0644,
635 		.proc_handler	= proc_dointvec_jiffies,
636 	},
637 	{ }
638 };
639 
640 static struct ctl_table ip6_frags_ctl_table[] = {
641 	{
642 		.procname	= "ip6frag_secret_interval",
643 		.data		= &ip6_frags.secret_interval,
644 		.maxlen		= sizeof(int),
645 		.mode		= 0644,
646 		.proc_handler	= proc_dointvec_jiffies,
647 	},
648 	{ }
649 };
650 
ip6_frags_ns_sysctl_register(struct net * net)651 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
652 {
653 	struct ctl_table *table;
654 	struct ctl_table_header *hdr;
655 
656 	table = ip6_frags_ns_ctl_table;
657 	if (!net_eq(net, &init_net)) {
658 		table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
659 		if (table == NULL)
660 			goto err_alloc;
661 
662 		table[0].data = &net->ipv6.frags.high_thresh;
663 		table[1].data = &net->ipv6.frags.low_thresh;
664 		table[2].data = &net->ipv6.frags.timeout;
665 	}
666 
667 	hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
668 	if (hdr == NULL)
669 		goto err_reg;
670 
671 	net->ipv6.sysctl.frags_hdr = hdr;
672 	return 0;
673 
674 err_reg:
675 	if (!net_eq(net, &init_net))
676 		kfree(table);
677 err_alloc:
678 	return -ENOMEM;
679 }
680 
ip6_frags_ns_sysctl_unregister(struct net * net)681 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
682 {
683 	struct ctl_table *table;
684 
685 	table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
686 	unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
687 	if (!net_eq(net, &init_net))
688 		kfree(table);
689 }
690 
691 static struct ctl_table_header *ip6_ctl_header;
692 
ip6_frags_sysctl_register(void)693 static int ip6_frags_sysctl_register(void)
694 {
695 	ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
696 			ip6_frags_ctl_table);
697 	return ip6_ctl_header == NULL ? -ENOMEM : 0;
698 }
699 
ip6_frags_sysctl_unregister(void)700 static void ip6_frags_sysctl_unregister(void)
701 {
702 	unregister_net_sysctl_table(ip6_ctl_header);
703 }
704 #else
ip6_frags_ns_sysctl_register(struct net * net)705 static inline int ip6_frags_ns_sysctl_register(struct net *net)
706 {
707 	return 0;
708 }
709 
ip6_frags_ns_sysctl_unregister(struct net * net)710 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
711 {
712 }
713 
ip6_frags_sysctl_register(void)714 static inline int ip6_frags_sysctl_register(void)
715 {
716 	return 0;
717 }
718 
ip6_frags_sysctl_unregister(void)719 static inline void ip6_frags_sysctl_unregister(void)
720 {
721 }
722 #endif
723 
ipv6_frags_init_net(struct net * net)724 static int __net_init ipv6_frags_init_net(struct net *net)
725 {
726 	net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
727 	net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
728 	net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
729 
730 	inet_frags_init_net(&net->ipv6.frags);
731 
732 	return ip6_frags_ns_sysctl_register(net);
733 }
734 
ipv6_frags_exit_net(struct net * net)735 static void __net_exit ipv6_frags_exit_net(struct net *net)
736 {
737 	ip6_frags_ns_sysctl_unregister(net);
738 	inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
739 }
740 
741 static struct pernet_operations ip6_frags_ops = {
742 	.init = ipv6_frags_init_net,
743 	.exit = ipv6_frags_exit_net,
744 };
745 
ipv6_frag_init(void)746 int __init ipv6_frag_init(void)
747 {
748 	int ret;
749 
750 	ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
751 	if (ret)
752 		goto out;
753 
754 	ret = ip6_frags_sysctl_register();
755 	if (ret)
756 		goto err_sysctl;
757 
758 	ret = register_pernet_subsys(&ip6_frags_ops);
759 	if (ret)
760 		goto err_pernet;
761 
762 	ip6_frags.hashfn = ip6_hashfn;
763 	ip6_frags.constructor = ip6_frag_init;
764 	ip6_frags.destructor = NULL;
765 	ip6_frags.skb_free = NULL;
766 	ip6_frags.qsize = sizeof(struct frag_queue);
767 	ip6_frags.match = ip6_frag_match;
768 	ip6_frags.frag_expire = ip6_frag_expire;
769 	ip6_frags.secret_interval = 10 * 60 * HZ;
770 	inet_frags_init(&ip6_frags);
771 out:
772 	return ret;
773 
774 err_pernet:
775 	ip6_frags_sysctl_unregister();
776 err_sysctl:
777 	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
778 	goto out;
779 }
780 
ipv6_frag_exit(void)781 void ipv6_frag_exit(void)
782 {
783 	inet_frags_fini(&ip6_frags);
784 	ip6_frags_sysctl_unregister();
785 	unregister_pernet_subsys(&ip6_frags_ops);
786 	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
787 }
788