1 /*
2  *	IPv6 fragment reassembly
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<pedro_m@yahoo.com>
7  *
8  *	$Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *	Based on: net/ipv4/ip_fragment.c
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Fixes:
20  *	Andi Kleen	Make it work with multiple hosts.
21  *			More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *	Alexey Kuznetsov	SMP races, threading, cleanup.
25  *	Patrick McHardy		LRU queue of frag heads for evictor.
26  */
27 #include <linux/config.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/sched.h>
34 #include <linux/list.h>
35 #include <linux/net.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/ipv6.h>
39 #include <linux/icmpv6.h>
40 #include <linux/random.h>
41 #include <linux/jhash.h>
42 
43 #include <net/sock.h>
44 #include <net/snmp.h>
45 
46 #include <net/ipv6.h>
47 #include <net/protocol.h>
48 #include <net/transp_v6.h>
49 #include <net/rawv6.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 
53 int sysctl_ip6frag_high_thresh = 256*1024;
54 int sysctl_ip6frag_low_thresh = 192*1024;
55 
56 int sysctl_ip6frag_time = IPV6_FRAG_TIMEOUT;
57 
58 struct ip6frag_skb_cb
59 {
60 	struct inet6_skb_parm	h;
61 	int			offset;
62 };
63 
64 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
65 
66 
67 /*
68  *	Equivalent of ipv4 struct ipq
69  */
70 
71 struct frag_queue
72 {
73 	struct frag_queue	*next;
74 	struct list_head lru_list;		/* lru list member	*/
75 
76 	__u32			id;		/* fragment id		*/
77 	struct in6_addr		saddr;
78 	struct in6_addr		daddr;
79 
80 	spinlock_t		lock;
81 	atomic_t		refcnt;
82 	struct timer_list	timer;		/* expire timer		*/
83 	struct sk_buff		*fragments;
84 	int			len;
85 	int			meat;
86 	int			iif;
87 	struct timeval		stamp;
88 	unsigned int		csum;
89 	__u8			last_in;	/* has first/last segment arrived? */
90 #define COMPLETE		4
91 #define FIRST_IN		2
92 #define LAST_IN			1
93 	__u16			nhoffset;
94 	struct frag_queue	**pprev;
95 };
96 
97 /* Hash table. */
98 
99 #define IP6Q_HASHSZ	64
100 
101 static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
102 static rwlock_t ip6_frag_lock = RW_LOCK_UNLOCKED;
103 static u32 ip6_frag_hash_rnd;
104 static LIST_HEAD(ip6_frag_lru_list);
105 int ip6_frag_nqueues = 0;
106 
__fq_unlink(struct frag_queue * fq)107 static __inline__ void __fq_unlink(struct frag_queue *fq)
108 {
109 	if(fq->next)
110 		fq->next->pprev = fq->pprev;
111 	*fq->pprev = fq->next;
112 	list_del(&fq->lru_list);
113 	ip6_frag_nqueues--;
114 }
115 
fq_unlink(struct frag_queue * fq)116 static __inline__ void fq_unlink(struct frag_queue *fq)
117 {
118 	write_lock(&ip6_frag_lock);
119 	__fq_unlink(fq);
120 	write_unlock(&ip6_frag_lock);
121 }
122 
ip6qhashfn(u32 id,struct in6_addr * saddr,struct in6_addr * daddr)123 static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
124 			       struct in6_addr *daddr)
125 {
126 	u32 a, b, c;
127 
128 	a = saddr->s6_addr32[0];
129 	b = saddr->s6_addr32[1];
130 	c = saddr->s6_addr32[2];
131 
132 	a += JHASH_GOLDEN_RATIO;
133 	b += JHASH_GOLDEN_RATIO;
134 	c += ip6_frag_hash_rnd;
135 	__jhash_mix(a, b, c);
136 
137 	a += saddr->s6_addr32[3];
138 	b += daddr->s6_addr32[0];
139 	c += daddr->s6_addr32[1];
140 	__jhash_mix(a, b, c);
141 
142 	a += daddr->s6_addr32[2];
143 	b += daddr->s6_addr32[3];
144 	c += id;
145 	__jhash_mix(a, b, c);
146 
147 	return c & (IP6Q_HASHSZ - 1);
148 }
149 
150 static struct timer_list ip6_frag_secret_timer;
151 static int ip6_frag_secret_interval = 10 * 60 * HZ;
152 
ip6_frag_secret_rebuild(unsigned long dummy)153 static void ip6_frag_secret_rebuild(unsigned long dummy)
154 {
155 	unsigned long now = jiffies;
156 	int i;
157 
158 	write_lock(&ip6_frag_lock);
159 	get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
160 	for (i = 0; i < IP6Q_HASHSZ; i++) {
161 		struct frag_queue *q;
162 
163 		q = ip6_frag_hash[i];
164 		while (q) {
165 			struct frag_queue *next = q->next;
166 			unsigned int hval = ip6qhashfn(q->id,
167 						       &q->saddr,
168 						       &q->daddr);
169 
170 			if (hval != i) {
171 				/* Unlink. */
172 				if (q->next)
173 					q->next->pprev = q->pprev;
174 				*q->pprev = q->next;
175 
176 				/* Relink to new hash chain. */
177 				if ((q->next = ip6_frag_hash[hval]) != NULL)
178 					q->next->pprev = &q->next;
179 				ip6_frag_hash[hval] = q;
180 				q->pprev = &ip6_frag_hash[hval];
181 			}
182 
183 			q = next;
184 		}
185 	}
186 	write_unlock(&ip6_frag_lock);
187 
188 	mod_timer(&ip6_frag_secret_timer, now + ip6_frag_secret_interval);
189 }
190 
191 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
192 
193 /* Memory Tracking Functions. */
frag_kfree_skb(struct sk_buff * skb,int * work)194 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
195 {
196 	if (work)
197 		*work -= skb->truesize;
198 	atomic_sub(skb->truesize, &ip6_frag_mem);
199 	kfree_skb(skb);
200 }
201 
frag_free_queue(struct frag_queue * fq,int * work)202 static inline void frag_free_queue(struct frag_queue *fq, int *work)
203 {
204 	if (work)
205 		*work -= sizeof(struct frag_queue);
206 	atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
207 	kfree(fq);
208 }
209 
frag_alloc_queue(void)210 static inline struct frag_queue *frag_alloc_queue(void)
211 {
212 	struct frag_queue *fq = kmalloc(sizeof(struct frag_queue), GFP_ATOMIC);
213 
214 	if(!fq)
215 		return NULL;
216 	atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
217 	return fq;
218 }
219 
220 /* Destruction primitives. */
221 
222 /* Complete destruction of fq. */
ip6_frag_destroy(struct frag_queue * fq,int * work)223 static void ip6_frag_destroy(struct frag_queue *fq, int *work)
224 {
225 	struct sk_buff *fp;
226 
227 	BUG_TRAP(fq->last_in&COMPLETE);
228 	BUG_TRAP(del_timer(&fq->timer) == 0);
229 
230 	/* Release all fragment data. */
231 	fp = fq->fragments;
232 	while (fp) {
233 		struct sk_buff *xp = fp->next;
234 
235 		frag_kfree_skb(fp, work);
236 		fp = xp;
237 	}
238 
239 	frag_free_queue(fq, work);
240 }
241 
fq_put(struct frag_queue * fq,int * work)242 static __inline__ void fq_put(struct frag_queue *fq, int *work)
243 {
244 	if (atomic_dec_and_test(&fq->refcnt))
245 		ip6_frag_destroy(fq, work);
246 }
247 
248 /* Kill fq entry. It is not destroyed immediately,
249  * because caller (and someone more) holds reference count.
250  */
fq_kill(struct frag_queue * fq)251 static __inline__ void fq_kill(struct frag_queue *fq)
252 {
253 	if (del_timer(&fq->timer))
254 		atomic_dec(&fq->refcnt);
255 
256 	if (!(fq->last_in & COMPLETE)) {
257 		fq_unlink(fq);
258 		atomic_dec(&fq->refcnt);
259 		fq->last_in |= COMPLETE;
260 	}
261 }
262 
ip6_evictor(void)263 static void ip6_evictor(void)
264 {
265 	struct frag_queue *fq;
266 	struct list_head *tmp;
267 	int work;
268 
269 	work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh;
270 	if (work <= 0)
271 		return;
272 
273 	while(work > 0) {
274 		read_lock(&ip6_frag_lock);
275 		if (list_empty(&ip6_frag_lru_list)) {
276 			read_unlock(&ip6_frag_lock);
277 			return;
278 		}
279 		tmp = ip6_frag_lru_list.next;
280 		fq = list_entry(tmp, struct frag_queue, lru_list);
281 		atomic_inc(&fq->refcnt);
282 		read_unlock(&ip6_frag_lock);
283 
284 		spin_lock(&fq->lock);
285 		if (!(fq->last_in&COMPLETE))
286 			fq_kill(fq);
287 		spin_unlock(&fq->lock);
288 
289 		fq_put(fq, &work);
290 		IP6_INC_STATS_BH(Ip6ReasmFails);
291 	}
292 }
293 
ip6_frag_expire(unsigned long data)294 static void ip6_frag_expire(unsigned long data)
295 {
296 	struct frag_queue *fq = (struct frag_queue *) data;
297 
298 	spin_lock(&fq->lock);
299 
300 	if (fq->last_in & COMPLETE)
301 		goto out;
302 
303 	fq_kill(fq);
304 
305 	IP6_INC_STATS_BH(Ip6ReasmTimeout);
306 	IP6_INC_STATS_BH(Ip6ReasmFails);
307 
308 	/* Send error only if the first segment arrived. */
309 	if (fq->last_in&FIRST_IN && fq->fragments) {
310 		struct net_device *dev = dev_get_by_index(fq->iif);
311 
312 		/*
313 		   But use as source device on which LAST ARRIVED
314 		   segment was received. And do not use fq->dev
315 		   pointer directly, device might already disappeared.
316 		 */
317 		if (dev) {
318 			fq->fragments->dev = dev;
319 			icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0,
320 				    dev);
321 			dev_put(dev);
322 		}
323 	}
324 out:
325 	spin_unlock(&fq->lock);
326 	fq_put(fq, NULL);
327 }
328 
329 /* Creation primitives. */
330 
331 
ip6_frag_intern(unsigned int hash,struct frag_queue * fq_in)332 static struct frag_queue *ip6_frag_intern(unsigned int hash,
333 					  struct frag_queue *fq_in)
334 {
335 	struct frag_queue *fq;
336 
337 	write_lock(&ip6_frag_lock);
338 #ifdef CONFIG_SMP
339 	for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
340 		if (fq->id == fq_in->id &&
341 		    !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
342 		    !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
343 			atomic_inc(&fq->refcnt);
344 			write_unlock(&ip6_frag_lock);
345 			fq_in->last_in |= COMPLETE;
346 			fq_put(fq_in, NULL);
347 			return fq;
348 		}
349 	}
350 #endif
351 	fq = fq_in;
352 
353 	if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
354 		atomic_inc(&fq->refcnt);
355 
356 	atomic_inc(&fq->refcnt);
357 	if((fq->next = ip6_frag_hash[hash]) != NULL)
358 		fq->next->pprev = &fq->next;
359 	ip6_frag_hash[hash] = fq;
360 	fq->pprev = &ip6_frag_hash[hash];
361 	INIT_LIST_HEAD(&fq->lru_list);
362 	list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
363 	ip6_frag_nqueues++;
364 	write_unlock(&ip6_frag_lock);
365 	return fq;
366 }
367 
368 
369 static struct frag_queue *
ip6_frag_create(unsigned int hash,u32 id,struct in6_addr * src,struct in6_addr * dst)370 ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst)
371 {
372 	struct frag_queue *fq;
373 
374 	if ((fq = frag_alloc_queue()) == NULL)
375 		goto oom;
376 
377 	memset(fq, 0, sizeof(struct frag_queue));
378 
379 	fq->id = id;
380 	ipv6_addr_copy(&fq->saddr, src);
381 	ipv6_addr_copy(&fq->daddr, dst);
382 
383 	/* init_timer has been done by the memset */
384 	fq->timer.function = ip6_frag_expire;
385 	fq->timer.data = (long) fq;
386 	fq->lock = SPIN_LOCK_UNLOCKED;
387 	atomic_set(&fq->refcnt, 1);
388 
389 	return ip6_frag_intern(hash, fq);
390 
391 oom:
392 	IP6_INC_STATS_BH(Ip6ReasmFails);
393 	return NULL;
394 }
395 
396 static __inline__ struct frag_queue *
fq_find(u32 id,struct in6_addr * src,struct in6_addr * dst)397 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
398 {
399 	struct frag_queue *fq;
400 	unsigned int hash = ip6qhashfn(id, src, dst);
401 
402 	read_lock(&ip6_frag_lock);
403 	for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
404 		if (fq->id == id &&
405 		    !ipv6_addr_cmp(src, &fq->saddr) &&
406 		    !ipv6_addr_cmp(dst, &fq->daddr)) {
407 			atomic_inc(&fq->refcnt);
408 			read_unlock(&ip6_frag_lock);
409 			return fq;
410 		}
411 	}
412 	read_unlock(&ip6_frag_lock);
413 
414 	return ip6_frag_create(hash, id, src, dst);
415 }
416 
417 
ip6_frag_queue(struct frag_queue * fq,struct sk_buff * skb,struct frag_hdr * fhdr,int nhoff)418 static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
419 			   struct frag_hdr *fhdr, int nhoff)
420 {
421 	struct sk_buff *prev, *next;
422 	int offset, end;
423 
424 	if (fq->last_in & COMPLETE)
425 		goto err;
426 
427 	offset = ntohs(fhdr->frag_off) & ~0x7;
428 	end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
429 			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
430 
431 	if ((unsigned int)end >= 65536) {
432  		icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
433  		return;
434 	}
435 
436  	if (skb->ip_summed == CHECKSUM_HW)
437  		skb->csum = csum_sub(skb->csum,
438  				     csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
439 
440 	/* Is this the final fragment? */
441 	if (!(fhdr->frag_off & htons(0x0001))) {
442 		/* If we already have some bits beyond end
443 		 * or have different end, the segment is corrupted.
444 		 */
445 		if (end < fq->len ||
446 		    ((fq->last_in & LAST_IN) && end != fq->len))
447 			goto err;
448 		fq->last_in |= LAST_IN;
449 		fq->len = end;
450 	} else {
451 		/* Check if the fragment is rounded to 8 bytes.
452 		 * Required by the RFC.
453 		 */
454 		if (end & 0x7) {
455 			/* RFC2460 says always send parameter problem in
456 			 * this case. -DaveM
457 			 */
458 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
459 					  offsetof(struct ipv6hdr, payload_len));
460 			return;
461 		}
462 		if (end > fq->len) {
463 			/* Some bits beyond end -> corruption. */
464 			if (fq->last_in & LAST_IN)
465 				goto err;
466 			fq->len = end;
467 		}
468 	}
469 
470 	if (end == offset)
471 		goto err;
472 
473 	/* Point into the IP datagram 'data' part. */
474 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
475 		goto err;
476 	if (end-offset < skb->len) {
477 		if (pskb_trim(skb, end - offset))
478 			goto err;
479 		if (skb->ip_summed != CHECKSUM_UNNECESSARY)
480 			skb->ip_summed = CHECKSUM_NONE;
481 	}
482 
483 	/* Find out which fragments are in front and at the back of us
484 	 * in the chain of fragments so far.  We must know where to put
485 	 * this fragment, right?
486 	 */
487 	prev = NULL;
488 	for(next = fq->fragments; next != NULL; next = next->next) {
489 		if (FRAG6_CB(next)->offset >= offset)
490 			break;	/* bingo! */
491 		prev = next;
492 	}
493 
494 	/* We found where to put this one.  Check for overlap with
495 	 * preceding fragment, and, if needed, align things so that
496 	 * any overlaps are eliminated.
497 	 */
498 	if (prev) {
499 		int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
500 
501 		if (i > 0) {
502 			offset += i;
503 			if (end <= offset)
504 				goto err;
505 			if (!pskb_pull(skb, i))
506 				goto err;
507 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
508 				skb->ip_summed = CHECKSUM_NONE;
509 		}
510 	}
511 
512 	/* Look for overlap with succeeding segments.
513 	 * If we can merge fragments, do it.
514 	 */
515 	while (next && FRAG6_CB(next)->offset < end) {
516 		int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
517 
518 		if (i < next->len) {
519 			/* Eat head of the next overlapped fragment
520 			 * and leave the loop. The next ones cannot overlap.
521 			 */
522 			if (!pskb_pull(next, i))
523 				goto err;
524 			FRAG6_CB(next)->offset += i;	/* next fragment */
525 			fq->meat -= i;
526 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
527 				next->ip_summed = CHECKSUM_NONE;
528 			break;
529 		} else {
530 			struct sk_buff *free_it = next;
531 
532 			/* Old fragmnet is completely overridden with
533 			 * new one drop it.
534 			 */
535 			next = next->next;
536 
537 			if (prev)
538 				prev->next = next;
539 			else
540 				fq->fragments = next;
541 
542 			fq->meat -= free_it->len;
543 			frag_kfree_skb(free_it, NULL);
544 		}
545 	}
546 
547 	FRAG6_CB(skb)->offset = offset;
548 
549 	/* Insert this fragment in the chain of fragments. */
550 	skb->next = next;
551 	if (prev)
552 		prev->next = skb;
553 	else
554 		fq->fragments = skb;
555 
556 	if (skb->dev)
557 		fq->iif = skb->dev->ifindex;
558 	skb->dev = NULL;
559 	fq->stamp = skb->stamp;
560 	fq->meat += skb->len;
561 	atomic_add(skb->truesize, &ip6_frag_mem);
562 
563 	/* The first fragment.
564 	 * nhoffset is obtained from the first fragment, of course.
565 	 */
566 	if (offset == 0) {
567 		fq->nhoffset = nhoff;
568 		fq->last_in |= FIRST_IN;
569 	}
570 	write_lock(&ip6_frag_lock);
571 	list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
572 	write_unlock(&ip6_frag_lock);
573 	return;
574 
575 err:
576 	kfree_skb(skb);
577 }
578 
579 /*
580  *	Check if this packet is complete.
581  *	Returns NULL on failure by any reason, and pointer
582  *	to current nexthdr field in reassembled frame.
583  *
584  *	It is called with locked fq, and caller must check that
585  *	queue is eligible for reassembly i.e. it is not COMPLETE,
586  *	the last and the first frames arrived and all the bits are here.
587  */
ip6_frag_reasm(struct frag_queue * fq,struct sk_buff ** skb_in,struct net_device * dev)588 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
589 			  struct net_device *dev)
590 {
591 	struct sk_buff *fp, *head = fq->fragments;
592 	int    remove_fraghdr = 0;
593 	int    payload_len;
594 	int    nhoff;
595 
596 	fq_kill(fq);
597 
598 	BUG_TRAP(head != NULL);
599 	BUG_TRAP(FRAG6_CB(head)->offset == 0);
600 
601 	/* Unfragmented part is taken from the first segment. */
602 	payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len;
603 	nhoff = head->h.raw - head->nh.raw;
604 
605 	if (payload_len > 65535) {
606 		payload_len -= 8;
607 		if (payload_len > 65535)
608 			goto out_oversize;
609 		remove_fraghdr = 1;
610 	}
611 
612 	/* Head of list must not be cloned. */
613 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
614 		goto out_oom;
615 
616 	/* If the first fragment is fragmented itself, we split
617 	 * it to two chunks: the first with data and paged part
618 	 * and the second, holding only fragments. */
619 	if (skb_shinfo(head)->frag_list) {
620 		struct sk_buff *clone;
621 		int i, plen = 0;
622 
623 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
624 			goto out_oom;
625 		clone->next = head->next;
626 		head->next = clone;
627 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
628 		skb_shinfo(head)->frag_list = NULL;
629 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
630 			plen += skb_shinfo(head)->frags[i].size;
631 		clone->len = clone->data_len = head->data_len - plen;
632 		head->data_len -= clone->len;
633 		head->len -= clone->len;
634 		clone->csum = 0;
635 		clone->ip_summed = head->ip_summed;
636 		atomic_add(clone->truesize, &ip6_frag_mem);
637 	}
638 
639 	/* Normally we do not remove frag header from datagram, but
640 	 * we have to do this and to relocate header, when payload
641 	 * is > 65535-8. */
642 	if (remove_fraghdr) {
643 		nhoff = fq->nhoffset;
644 		head->nh.raw[nhoff] = head->h.raw[0];
645 		memmove(head->head+8, head->head, (head->data-head->head)-8);
646 		head->mac.raw += 8;
647 		head->nh.raw += 8;
648 	} else {
649 		((struct frag_hdr*)head->h.raw)->frag_off = 0;
650 	}
651 
652 	skb_shinfo(head)->frag_list = head->next;
653 	head->h.raw = head->data;
654 	skb_push(head, head->data - head->nh.raw);
655 	atomic_sub(head->truesize, &ip6_frag_mem);
656 
657 	for (fp=head->next; fp; fp = fp->next) {
658 		head->data_len += fp->len;
659 		head->len += fp->len;
660 		if (head->ip_summed != fp->ip_summed)
661 			head->ip_summed = CHECKSUM_NONE;
662 		else if (head->ip_summed == CHECKSUM_HW)
663 			head->csum = csum_add(head->csum, fp->csum);
664 		head->truesize += fp->truesize;
665 		atomic_sub(fp->truesize, &ip6_frag_mem);
666 	}
667 
668 	head->next = NULL;
669 	head->dev = dev;
670 	head->stamp = fq->stamp;
671 	head->nh.ipv6h->payload_len = ntohs(payload_len);
672 
673 	*skb_in = head;
674 
675 	/* Yes, and fold redundant checksum back. 8) */
676 	if (head->ip_summed == CHECKSUM_HW)
677 		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
678 
679 	IP6_INC_STATS_BH(Ip6ReasmOKs);
680 	fq->fragments = NULL;
681 	return nhoff;
682 
683 out_oversize:
684 	if (net_ratelimit())
685 		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
686 	goto out_fail;
687 out_oom:
688 	if (net_ratelimit())
689 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
690 out_fail:
691 	IP6_INC_STATS_BH(Ip6ReasmFails);
692 	return -1;
693 }
694 
ipv6_reassembly(struct sk_buff ** skbp,int nhoff)695 int ipv6_reassembly(struct sk_buff **skbp, int nhoff)
696 {
697 	struct sk_buff *skb = *skbp;
698 	struct net_device *dev = skb->dev;
699 	struct frag_hdr *fhdr;
700 	struct frag_queue *fq;
701 	struct ipv6hdr *hdr;
702 
703 	hdr = skb->nh.ipv6h;
704 
705 	IP6_INC_STATS_BH(Ip6ReasmReqds);
706 
707 	/* Jumbo payload inhibits frag. header */
708 	if (hdr->payload_len==0) {
709 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
710 		return -1;
711 	}
712 	if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
713 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
714 		return -1;
715 	}
716 
717 	hdr = skb->nh.ipv6h;
718 	fhdr = (struct frag_hdr *)skb->h.raw;
719 
720 	if (!(fhdr->frag_off & htons(0xFFF9))) {
721 		/* It is not a fragmented frame */
722 		skb->h.raw += sizeof(struct frag_hdr);
723 		IP6_INC_STATS_BH(Ip6ReasmOKs);
724 
725 		return (u8*)fhdr - skb->nh.raw;
726 	}
727 
728 	if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
729 		ip6_evictor();
730 
731 	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
732 		int ret = -1;
733 
734 		spin_lock(&fq->lock);
735 
736 		ip6_frag_queue(fq, skb, fhdr, nhoff);
737 
738 		if (fq->last_in == (FIRST_IN|LAST_IN) &&
739 		    fq->meat == fq->len)
740 			ret = ip6_frag_reasm(fq, skbp, dev);
741 
742 		spin_unlock(&fq->lock);
743 		fq_put(fq, NULL);
744 		return ret;
745 	}
746 
747 	IP6_INC_STATS_BH(Ip6ReasmFails);
748 	kfree_skb(skb);
749 	return -1;
750 }
751 
ipv6_frag_init(void)752 void __init ipv6_frag_init(void)
753 {
754 	ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
755 				   (jiffies ^ (jiffies >> 6)));
756 
757 	init_timer(&ip6_frag_secret_timer);
758 	ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
759 	ip6_frag_secret_timer.expires = jiffies + ip6_frag_secret_interval;
760 	add_timer(&ip6_frag_secret_timer);
761 }
762