1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/mm.h>
44 #include <linux/interrupt.h>
45 #include <linux/in.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 
61 #include <net/protocol.h>
62 #include <net/dst.h>
63 #include <net/sock.h>
64 #include <net/checksum.h>
65 #include <net/xfrm.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 #include <trace/events/skb.h>
70 
71 #include "kmap_skb.h"
72 
73 static struct kmem_cache *skbuff_head_cache __read_mostly;
74 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
75 
sock_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
77 				  struct pipe_buffer *buf)
78 {
79 	put_page(buf->page);
80 }
81 
sock_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
83 				struct pipe_buffer *buf)
84 {
85 	get_page(buf->page);
86 }
87 
sock_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
89 			       struct pipe_buffer *buf)
90 {
91 	return 1;
92 }
93 
94 
95 /* Pipe buffer operations for a socket. */
96 static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 	.can_merge = 0,
98 	.map = generic_pipe_buf_map,
99 	.unmap = generic_pipe_buf_unmap,
100 	.confirm = generic_pipe_buf_confirm,
101 	.release = sock_pipe_buf_release,
102 	.steal = sock_pipe_buf_steal,
103 	.get = sock_pipe_buf_get,
104 };
105 
106 /*
107  *	Keep out-of-line to prevent kernel bloat.
108  *	__builtin_return_address is not used because it is not always
109  *	reliable.
110  */
111 
112 /**
113  *	skb_over_panic	- 	private function
114  *	@skb: buffer
115  *	@sz: size
116  *	@here: address
117  *
118  *	Out of line support code for skb_put(). Not user callable.
119  */
skb_over_panic(struct sk_buff * skb,int sz,void * here)120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121 {
122 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
124 	       here, skb->len, sz, skb->head, skb->data,
125 	       (unsigned long)skb->tail, (unsigned long)skb->end,
126 	       skb->dev ? skb->dev->name : "<NULL>");
127 	BUG();
128 }
129 
130 /**
131  *	skb_under_panic	- 	private function
132  *	@skb: buffer
133  *	@sz: size
134  *	@here: address
135  *
136  *	Out of line support code for skb_push(). Not user callable.
137  */
138 
skb_under_panic(struct sk_buff * skb,int sz,void * here)139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
140 {
141 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
142 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
143 	       here, skb->len, sz, skb->head, skb->data,
144 	       (unsigned long)skb->tail, (unsigned long)skb->end,
145 	       skb->dev ? skb->dev->name : "<NULL>");
146 	BUG();
147 }
148 
149 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
150  *	'private' fields and also do memory statistics to find all the
151  *	[BEEP] leaks.
152  *
153  */
154 
155 /**
156  *	__alloc_skb	-	allocate a network buffer
157  *	@size: size to allocate
158  *	@gfp_mask: allocation mask
159  *	@fclone: allocate from fclone cache instead of head cache
160  *		and allocate a cloned (child) skb
161  *	@node: numa node to allocate memory on
162  *
163  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
164  *	tail room of size bytes. The object has a reference count of one.
165  *	The return is the buffer. On a failure the return is %NULL.
166  *
167  *	Buffers may only be allocated from interrupts using a @gfp_mask of
168  *	%GFP_ATOMIC.
169  */
__alloc_skb(unsigned int size,gfp_t gfp_mask,int fclone,int node)170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
171 			    int fclone, int node)
172 {
173 	struct kmem_cache *cache;
174 	struct skb_shared_info *shinfo;
175 	struct sk_buff *skb;
176 	u8 *data;
177 
178 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
179 
180 	/* Get the HEAD */
181 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
182 	if (!skb)
183 		goto out;
184 	prefetchw(skb);
185 
186 	size = SKB_DATA_ALIGN(size);
187 	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
188 			gfp_mask, node);
189 	if (!data)
190 		goto nodata;
191 	prefetchw(data + size);
192 
193 	/*
194 	 * Only clear those fields we need to clear, not those that we will
195 	 * actually initialise below. Hence, don't put any more fields after
196 	 * the tail pointer in struct sk_buff!
197 	 */
198 	memset(skb, 0, offsetof(struct sk_buff, tail));
199 	skb->truesize = size + sizeof(struct sk_buff);
200 	atomic_set(&skb->users, 1);
201 	skb->head = data;
202 	skb->data = data;
203 	skb_reset_tail_pointer(skb);
204 	skb->end = skb->tail + size;
205 #ifdef NET_SKBUFF_DATA_USES_OFFSET
206 	skb->mac_header = ~0U;
207 #endif
208 
209 	/* make sure we initialize shinfo sequentially */
210 	shinfo = skb_shinfo(skb);
211 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
212 	atomic_set(&shinfo->dataref, 1);
213 	kmemcheck_annotate_variable(shinfo->destructor_arg);
214 
215 	if (fclone) {
216 		struct sk_buff *child = skb + 1;
217 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
218 
219 		kmemcheck_annotate_bitfield(child, flags1);
220 		kmemcheck_annotate_bitfield(child, flags2);
221 		skb->fclone = SKB_FCLONE_ORIG;
222 		atomic_set(fclone_ref, 1);
223 
224 		child->fclone = SKB_FCLONE_UNAVAILABLE;
225 	}
226 out:
227 	return skb;
228 nodata:
229 	kmem_cache_free(cache, skb);
230 	skb = NULL;
231 	goto out;
232 }
233 EXPORT_SYMBOL(__alloc_skb);
234 
235 /**
236  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
237  *	@dev: network device to receive on
238  *	@length: length to allocate
239  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
240  *
241  *	Allocate a new &sk_buff and assign it a usage count of one. The
242  *	buffer has unspecified headroom built in. Users should allocate
243  *	the headroom they think they need without accounting for the
244  *	built in space. The built in space is used for optimisations.
245  *
246  *	%NULL is returned if there is no free memory.
247  */
__netdev_alloc_skb(struct net_device * dev,unsigned int length,gfp_t gfp_mask)248 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
249 		unsigned int length, gfp_t gfp_mask)
250 {
251 	struct sk_buff *skb;
252 
253 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
254 	if (likely(skb)) {
255 		skb_reserve(skb, NET_SKB_PAD);
256 		skb->dev = dev;
257 	}
258 	return skb;
259 }
260 EXPORT_SYMBOL(__netdev_alloc_skb);
261 
skb_add_rx_frag(struct sk_buff * skb,int i,struct page * page,int off,int size)262 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
263 		int size)
264 {
265 	skb_fill_page_desc(skb, i, page, off, size);
266 	skb->len += size;
267 	skb->data_len += size;
268 	skb->truesize += size;
269 }
270 EXPORT_SYMBOL(skb_add_rx_frag);
271 
272 /**
273  *	dev_alloc_skb - allocate an skbuff for receiving
274  *	@length: length to allocate
275  *
276  *	Allocate a new &sk_buff and assign it a usage count of one. The
277  *	buffer has unspecified headroom built in. Users should allocate
278  *	the headroom they think they need without accounting for the
279  *	built in space. The built in space is used for optimisations.
280  *
281  *	%NULL is returned if there is no free memory. Although this function
282  *	allocates memory it can be called from an interrupt.
283  */
dev_alloc_skb(unsigned int length)284 struct sk_buff *dev_alloc_skb(unsigned int length)
285 {
286 	/*
287 	 * There is more code here than it seems:
288 	 * __dev_alloc_skb is an inline
289 	 */
290 	return __dev_alloc_skb(length, GFP_ATOMIC);
291 }
292 EXPORT_SYMBOL(dev_alloc_skb);
293 
skb_drop_list(struct sk_buff ** listp)294 static void skb_drop_list(struct sk_buff **listp)
295 {
296 	struct sk_buff *list = *listp;
297 
298 	*listp = NULL;
299 
300 	do {
301 		struct sk_buff *this = list;
302 		list = list->next;
303 		kfree_skb(this);
304 	} while (list);
305 }
306 
skb_drop_fraglist(struct sk_buff * skb)307 static inline void skb_drop_fraglist(struct sk_buff *skb)
308 {
309 	skb_drop_list(&skb_shinfo(skb)->frag_list);
310 }
311 
skb_clone_fraglist(struct sk_buff * skb)312 static void skb_clone_fraglist(struct sk_buff *skb)
313 {
314 	struct sk_buff *list;
315 
316 	skb_walk_frags(skb, list)
317 		skb_get(list);
318 }
319 
skb_release_data(struct sk_buff * skb)320 static void skb_release_data(struct sk_buff *skb)
321 {
322 	if (!skb->cloned ||
323 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
324 			       &skb_shinfo(skb)->dataref)) {
325 		if (skb_shinfo(skb)->nr_frags) {
326 			int i;
327 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
328 				put_page(skb_shinfo(skb)->frags[i].page);
329 		}
330 
331 		if (skb_has_frag_list(skb))
332 			skb_drop_fraglist(skb);
333 
334 		kfree(skb->head);
335 	}
336 }
337 
338 /*
339  *	Free an skbuff by memory without cleaning the state.
340  */
kfree_skbmem(struct sk_buff * skb)341 static void kfree_skbmem(struct sk_buff *skb)
342 {
343 	struct sk_buff *other;
344 	atomic_t *fclone_ref;
345 
346 	switch (skb->fclone) {
347 	case SKB_FCLONE_UNAVAILABLE:
348 		kmem_cache_free(skbuff_head_cache, skb);
349 		break;
350 
351 	case SKB_FCLONE_ORIG:
352 		fclone_ref = (atomic_t *) (skb + 2);
353 		if (atomic_dec_and_test(fclone_ref))
354 			kmem_cache_free(skbuff_fclone_cache, skb);
355 		break;
356 
357 	case SKB_FCLONE_CLONE:
358 		fclone_ref = (atomic_t *) (skb + 1);
359 		other = skb - 1;
360 
361 		/* The clone portion is available for
362 		 * fast-cloning again.
363 		 */
364 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
365 
366 		if (atomic_dec_and_test(fclone_ref))
367 			kmem_cache_free(skbuff_fclone_cache, other);
368 		break;
369 	}
370 }
371 
skb_release_head_state(struct sk_buff * skb)372 static void skb_release_head_state(struct sk_buff *skb)
373 {
374 	skb_dst_drop(skb);
375 #ifdef CONFIG_XFRM
376 	secpath_put(skb->sp);
377 #endif
378 	if (skb->destructor) {
379 		WARN_ON(in_irq());
380 		skb->destructor(skb);
381 	}
382 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
383 	nf_conntrack_put(skb->nfct);
384 #endif
385 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
386 	nf_conntrack_put_reasm(skb->nfct_reasm);
387 #endif
388 #ifdef CONFIG_BRIDGE_NETFILTER
389 	nf_bridge_put(skb->nf_bridge);
390 #endif
391 /* XXX: IS this still necessary? - JHS */
392 #ifdef CONFIG_NET_SCHED
393 	skb->tc_index = 0;
394 #ifdef CONFIG_NET_CLS_ACT
395 	skb->tc_verd = 0;
396 #endif
397 #endif
398 }
399 
400 /* Free everything but the sk_buff shell. */
skb_release_all(struct sk_buff * skb)401 static void skb_release_all(struct sk_buff *skb)
402 {
403 	skb_release_head_state(skb);
404 	skb_release_data(skb);
405 }
406 
407 /**
408  *	__kfree_skb - private function
409  *	@skb: buffer
410  *
411  *	Free an sk_buff. Release anything attached to the buffer.
412  *	Clean the state. This is an internal helper function. Users should
413  *	always call kfree_skb
414  */
415 
__kfree_skb(struct sk_buff * skb)416 void __kfree_skb(struct sk_buff *skb)
417 {
418 	skb_release_all(skb);
419 	kfree_skbmem(skb);
420 }
421 EXPORT_SYMBOL(__kfree_skb);
422 
423 /**
424  *	kfree_skb - free an sk_buff
425  *	@skb: buffer to free
426  *
427  *	Drop a reference to the buffer and free it if the usage count has
428  *	hit zero.
429  */
kfree_skb(struct sk_buff * skb)430 void kfree_skb(struct sk_buff *skb)
431 {
432 	if (unlikely(!skb))
433 		return;
434 	if (likely(atomic_read(&skb->users) == 1))
435 		smp_rmb();
436 	else if (likely(!atomic_dec_and_test(&skb->users)))
437 		return;
438 	trace_kfree_skb(skb, __builtin_return_address(0));
439 	__kfree_skb(skb);
440 }
441 EXPORT_SYMBOL(kfree_skb);
442 
443 /**
444  *	consume_skb - free an skbuff
445  *	@skb: buffer to free
446  *
447  *	Drop a ref to the buffer and free it if the usage count has hit zero
448  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
449  *	is being dropped after a failure and notes that
450  */
consume_skb(struct sk_buff * skb)451 void consume_skb(struct sk_buff *skb)
452 {
453 	if (unlikely(!skb))
454 		return;
455 	if (likely(atomic_read(&skb->users) == 1))
456 		smp_rmb();
457 	else if (likely(!atomic_dec_and_test(&skb->users)))
458 		return;
459 	trace_consume_skb(skb);
460 	__kfree_skb(skb);
461 }
462 EXPORT_SYMBOL(consume_skb);
463 
464 /**
465  *	skb_recycle_check - check if skb can be reused for receive
466  *	@skb: buffer
467  *	@skb_size: minimum receive buffer size
468  *
469  *	Checks that the skb passed in is not shared or cloned, and
470  *	that it is linear and its head portion at least as large as
471  *	skb_size so that it can be recycled as a receive buffer.
472  *	If these conditions are met, this function does any necessary
473  *	reference count dropping and cleans up the skbuff as if it
474  *	just came from __alloc_skb().
475  */
skb_recycle_check(struct sk_buff * skb,int skb_size)476 bool skb_recycle_check(struct sk_buff *skb, int skb_size)
477 {
478 	struct skb_shared_info *shinfo;
479 
480 	if (irqs_disabled())
481 		return false;
482 
483 	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
484 		return false;
485 
486 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
487 	if (skb_end_pointer(skb) - skb->head < skb_size)
488 		return false;
489 
490 	if (skb_shared(skb) || skb_cloned(skb))
491 		return false;
492 
493 	skb_release_head_state(skb);
494 
495 	shinfo = skb_shinfo(skb);
496 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
497 	atomic_set(&shinfo->dataref, 1);
498 
499 	memset(skb, 0, offsetof(struct sk_buff, tail));
500 	skb->data = skb->head + NET_SKB_PAD;
501 	skb_reset_tail_pointer(skb);
502 
503 	return true;
504 }
505 EXPORT_SYMBOL(skb_recycle_check);
506 
__copy_skb_header(struct sk_buff * new,const struct sk_buff * old)507 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
508 {
509 	new->tstamp		= old->tstamp;
510 	new->dev		= old->dev;
511 	new->transport_header	= old->transport_header;
512 	new->network_header	= old->network_header;
513 	new->mac_header		= old->mac_header;
514 	skb_dst_copy(new, old);
515 	new->rxhash		= old->rxhash;
516 #ifdef CONFIG_XFRM
517 	new->sp			= secpath_get(old->sp);
518 #endif
519 	memcpy(new->cb, old->cb, sizeof(old->cb));
520 	new->csum		= old->csum;
521 	new->local_df		= old->local_df;
522 	new->pkt_type		= old->pkt_type;
523 	new->ip_summed		= old->ip_summed;
524 	skb_copy_queue_mapping(new, old);
525 	new->priority		= old->priority;
526 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
527 	new->ipvs_property	= old->ipvs_property;
528 #endif
529 	new->protocol		= old->protocol;
530 	new->mark		= old->mark;
531 	new->skb_iif		= old->skb_iif;
532 	__nf_copy(new, old);
533 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
534     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
535 	new->nf_trace		= old->nf_trace;
536 #endif
537 #ifdef CONFIG_NET_SCHED
538 	new->tc_index		= old->tc_index;
539 #ifdef CONFIG_NET_CLS_ACT
540 	new->tc_verd		= old->tc_verd;
541 #endif
542 #endif
543 	new->vlan_tci		= old->vlan_tci;
544 
545 	skb_copy_secmark(new, old);
546 }
547 
548 /*
549  * You should not add any new code to this function.  Add it to
550  * __copy_skb_header above instead.
551  */
__skb_clone(struct sk_buff * n,struct sk_buff * skb)552 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
553 {
554 #define C(x) n->x = skb->x
555 
556 	n->next = n->prev = NULL;
557 	n->sk = NULL;
558 	__copy_skb_header(n, skb);
559 
560 	C(len);
561 	C(data_len);
562 	C(mac_len);
563 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
564 	n->cloned = 1;
565 	n->nohdr = 0;
566 	n->destructor = NULL;
567 	C(tail);
568 	C(end);
569 	C(head);
570 	C(data);
571 	C(truesize);
572 	atomic_set(&n->users, 1);
573 
574 	atomic_inc(&(skb_shinfo(skb)->dataref));
575 	skb->cloned = 1;
576 
577 	return n;
578 #undef C
579 }
580 
581 /**
582  *	skb_morph	-	morph one skb into another
583  *	@dst: the skb to receive the contents
584  *	@src: the skb to supply the contents
585  *
586  *	This is identical to skb_clone except that the target skb is
587  *	supplied by the user.
588  *
589  *	The target skb is returned upon exit.
590  */
skb_morph(struct sk_buff * dst,struct sk_buff * src)591 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
592 {
593 	skb_release_all(dst);
594 	return __skb_clone(dst, src);
595 }
596 EXPORT_SYMBOL_GPL(skb_morph);
597 
598 /**
599  *	skb_clone	-	duplicate an sk_buff
600  *	@skb: buffer to clone
601  *	@gfp_mask: allocation priority
602  *
603  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
604  *	copies share the same packet data but not structure. The new
605  *	buffer has a reference count of 1. If the allocation fails the
606  *	function returns %NULL otherwise the new buffer is returned.
607  *
608  *	If this function is called from an interrupt gfp_mask() must be
609  *	%GFP_ATOMIC.
610  */
611 
skb_clone(struct sk_buff * skb,gfp_t gfp_mask)612 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
613 {
614 	struct sk_buff *n;
615 
616 	n = skb + 1;
617 	if (skb->fclone == SKB_FCLONE_ORIG &&
618 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
619 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
620 		n->fclone = SKB_FCLONE_CLONE;
621 		atomic_inc(fclone_ref);
622 	} else {
623 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
624 		if (!n)
625 			return NULL;
626 
627 		kmemcheck_annotate_bitfield(n, flags1);
628 		kmemcheck_annotate_bitfield(n, flags2);
629 		n->fclone = SKB_FCLONE_UNAVAILABLE;
630 	}
631 
632 	return __skb_clone(n, skb);
633 }
634 EXPORT_SYMBOL(skb_clone);
635 
copy_skb_header(struct sk_buff * new,const struct sk_buff * old)636 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
637 {
638 #ifndef NET_SKBUFF_DATA_USES_OFFSET
639 	/*
640 	 *	Shift between the two data areas in bytes
641 	 */
642 	unsigned long offset = new->data - old->data;
643 #endif
644 
645 	__copy_skb_header(new, old);
646 
647 #ifndef NET_SKBUFF_DATA_USES_OFFSET
648 	/* {transport,network,mac}_header are relative to skb->head */
649 	new->transport_header += offset;
650 	new->network_header   += offset;
651 	if (skb_mac_header_was_set(new))
652 		new->mac_header	      += offset;
653 #endif
654 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
655 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
656 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
657 }
658 
659 /**
660  *	skb_copy	-	create private copy of an sk_buff
661  *	@skb: buffer to copy
662  *	@gfp_mask: allocation priority
663  *
664  *	Make a copy of both an &sk_buff and its data. This is used when the
665  *	caller wishes to modify the data and needs a private copy of the
666  *	data to alter. Returns %NULL on failure or the pointer to the buffer
667  *	on success. The returned buffer has a reference count of 1.
668  *
669  *	As by-product this function converts non-linear &sk_buff to linear
670  *	one, so that &sk_buff becomes completely private and caller is allowed
671  *	to modify all the data of returned buffer. This means that this
672  *	function is not recommended for use in circumstances when only
673  *	header is going to be modified. Use pskb_copy() instead.
674  */
675 
skb_copy(const struct sk_buff * skb,gfp_t gfp_mask)676 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
677 {
678 	int headerlen = skb_headroom(skb);
679 	unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
680 	struct sk_buff *n = alloc_skb(size, gfp_mask);
681 
682 	if (!n)
683 		return NULL;
684 
685 	/* Set the data pointer */
686 	skb_reserve(n, headerlen);
687 	/* Set the tail pointer and length */
688 	skb_put(n, skb->len);
689 
690 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
691 		BUG();
692 
693 	copy_skb_header(n, skb);
694 	return n;
695 }
696 EXPORT_SYMBOL(skb_copy);
697 
698 /**
699  *	pskb_copy	-	create copy of an sk_buff with private head.
700  *	@skb: buffer to copy
701  *	@gfp_mask: allocation priority
702  *
703  *	Make a copy of both an &sk_buff and part of its data, located
704  *	in header. Fragmented data remain shared. This is used when
705  *	the caller wishes to modify only header of &sk_buff and needs
706  *	private copy of the header to alter. Returns %NULL on failure
707  *	or the pointer to the buffer on success.
708  *	The returned buffer has a reference count of 1.
709  */
710 
pskb_copy(struct sk_buff * skb,gfp_t gfp_mask)711 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
712 {
713 	unsigned int size = skb_end_pointer(skb) - skb->head;
714 	struct sk_buff *n = alloc_skb(size, gfp_mask);
715 
716 	if (!n)
717 		goto out;
718 
719 	/* Set the data pointer */
720 	skb_reserve(n, skb_headroom(skb));
721 	/* Set the tail pointer and length */
722 	skb_put(n, skb_headlen(skb));
723 	/* Copy the bytes */
724 	skb_copy_from_linear_data(skb, n->data, n->len);
725 
726 	n->truesize += skb->data_len;
727 	n->data_len  = skb->data_len;
728 	n->len	     = skb->len;
729 
730 	if (skb_shinfo(skb)->nr_frags) {
731 		int i;
732 
733 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
734 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
735 			get_page(skb_shinfo(n)->frags[i].page);
736 		}
737 		skb_shinfo(n)->nr_frags = i;
738 	}
739 
740 	if (skb_has_frag_list(skb)) {
741 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
742 		skb_clone_fraglist(n);
743 	}
744 
745 	copy_skb_header(n, skb);
746 out:
747 	return n;
748 }
749 EXPORT_SYMBOL(pskb_copy);
750 
751 /**
752  *	pskb_expand_head - reallocate header of &sk_buff
753  *	@skb: buffer to reallocate
754  *	@nhead: room to add at head
755  *	@ntail: room to add at tail
756  *	@gfp_mask: allocation priority
757  *
758  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
759  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
760  *	reference count of 1. Returns zero in the case of success or error,
761  *	if expansion failed. In the last case, &sk_buff is not changed.
762  *
763  *	All the pointers pointing into skb header may change and must be
764  *	reloaded after call to this function.
765  */
766 
pskb_expand_head(struct sk_buff * skb,int nhead,int ntail,gfp_t gfp_mask)767 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
768 		     gfp_t gfp_mask)
769 {
770 	int i;
771 	u8 *data;
772 	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
773 	long off;
774 	bool fastpath;
775 
776 	BUG_ON(nhead < 0);
777 
778 	if (skb_shared(skb))
779 		BUG();
780 
781 	size = SKB_DATA_ALIGN(size);
782 
783 	/* Check if we can avoid taking references on fragments if we own
784 	 * the last reference on skb->head. (see skb_release_data())
785 	 */
786 	if (!skb->cloned)
787 		fastpath = true;
788 	else {
789 		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
790 
791 		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
792 	}
793 
794 	if (fastpath &&
795 	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
796 		memmove(skb->head + size, skb_shinfo(skb),
797 			offsetof(struct skb_shared_info,
798 				 frags[skb_shinfo(skb)->nr_frags]));
799 		memmove(skb->head + nhead, skb->head,
800 			skb_tail_pointer(skb) - skb->head);
801 		off = nhead;
802 		goto adjust_others;
803 	}
804 
805 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
806 	if (!data)
807 		goto nodata;
808 
809 	/* Copy only real data... and, alas, header. This should be
810 	 * optimized for the cases when header is void.
811 	 */
812 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
813 
814 	memcpy((struct skb_shared_info *)(data + size),
815 	       skb_shinfo(skb),
816 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
817 
818 	if (fastpath) {
819 		kfree(skb->head);
820 	} else {
821 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
822 			get_page(skb_shinfo(skb)->frags[i].page);
823 
824 		if (skb_has_frag_list(skb))
825 			skb_clone_fraglist(skb);
826 
827 		skb_release_data(skb);
828 	}
829 	off = (data + nhead) - skb->head;
830 
831 	skb->head     = data;
832 adjust_others:
833 	skb->data    += off;
834 #ifdef NET_SKBUFF_DATA_USES_OFFSET
835 	skb->end      = size;
836 	off           = nhead;
837 #else
838 	skb->end      = skb->head + size;
839 #endif
840 	/* {transport,network,mac}_header and tail are relative to skb->head */
841 	skb->tail	      += off;
842 	skb->transport_header += off;
843 	skb->network_header   += off;
844 	if (skb_mac_header_was_set(skb))
845 		skb->mac_header += off;
846 	/* Only adjust this if it actually is csum_start rather than csum */
847 	if (skb->ip_summed == CHECKSUM_PARTIAL)
848 		skb->csum_start += nhead;
849 	skb->cloned   = 0;
850 	skb->hdr_len  = 0;
851 	skb->nohdr    = 0;
852 	atomic_set(&skb_shinfo(skb)->dataref, 1);
853 	return 0;
854 
855 nodata:
856 	return -ENOMEM;
857 }
858 EXPORT_SYMBOL(pskb_expand_head);
859 
860 /* Make private copy of skb with writable head and some headroom */
861 
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)862 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
863 {
864 	struct sk_buff *skb2;
865 	int delta = headroom - skb_headroom(skb);
866 
867 	if (delta <= 0)
868 		skb2 = pskb_copy(skb, GFP_ATOMIC);
869 	else {
870 		skb2 = skb_clone(skb, GFP_ATOMIC);
871 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
872 					     GFP_ATOMIC)) {
873 			kfree_skb(skb2);
874 			skb2 = NULL;
875 		}
876 	}
877 	return skb2;
878 }
879 EXPORT_SYMBOL(skb_realloc_headroom);
880 
881 /**
882  *	skb_copy_expand	-	copy and expand sk_buff
883  *	@skb: buffer to copy
884  *	@newheadroom: new free bytes at head
885  *	@newtailroom: new free bytes at tail
886  *	@gfp_mask: allocation priority
887  *
888  *	Make a copy of both an &sk_buff and its data and while doing so
889  *	allocate additional space.
890  *
891  *	This is used when the caller wishes to modify the data and needs a
892  *	private copy of the data to alter as well as more space for new fields.
893  *	Returns %NULL on failure or the pointer to the buffer
894  *	on success. The returned buffer has a reference count of 1.
895  *
896  *	You must pass %GFP_ATOMIC as the allocation priority if this function
897  *	is called from an interrupt.
898  */
skb_copy_expand(const struct sk_buff * skb,int newheadroom,int newtailroom,gfp_t gfp_mask)899 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
900 				int newheadroom, int newtailroom,
901 				gfp_t gfp_mask)
902 {
903 	/*
904 	 *	Allocate the copy buffer
905 	 */
906 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
907 				      gfp_mask);
908 	int oldheadroom = skb_headroom(skb);
909 	int head_copy_len, head_copy_off;
910 	int off;
911 
912 	if (!n)
913 		return NULL;
914 
915 	skb_reserve(n, newheadroom);
916 
917 	/* Set the tail pointer and length */
918 	skb_put(n, skb->len);
919 
920 	head_copy_len = oldheadroom;
921 	head_copy_off = 0;
922 	if (newheadroom <= head_copy_len)
923 		head_copy_len = newheadroom;
924 	else
925 		head_copy_off = newheadroom - head_copy_len;
926 
927 	/* Copy the linear header and data. */
928 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
929 			  skb->len + head_copy_len))
930 		BUG();
931 
932 	copy_skb_header(n, skb);
933 
934 	off                  = newheadroom - oldheadroom;
935 	if (n->ip_summed == CHECKSUM_PARTIAL)
936 		n->csum_start += off;
937 #ifdef NET_SKBUFF_DATA_USES_OFFSET
938 	n->transport_header += off;
939 	n->network_header   += off;
940 	if (skb_mac_header_was_set(skb))
941 		n->mac_header += off;
942 #endif
943 
944 	return n;
945 }
946 EXPORT_SYMBOL(skb_copy_expand);
947 
948 /**
949  *	skb_pad			-	zero pad the tail of an skb
950  *	@skb: buffer to pad
951  *	@pad: space to pad
952  *
953  *	Ensure that a buffer is followed by a padding area that is zero
954  *	filled. Used by network drivers which may DMA or transfer data
955  *	beyond the buffer end onto the wire.
956  *
957  *	May return error in out of memory cases. The skb is freed on error.
958  */
959 
skb_pad(struct sk_buff * skb,int pad)960 int skb_pad(struct sk_buff *skb, int pad)
961 {
962 	int err;
963 	int ntail;
964 
965 	/* If the skbuff is non linear tailroom is always zero.. */
966 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
967 		memset(skb->data+skb->len, 0, pad);
968 		return 0;
969 	}
970 
971 	ntail = skb->data_len + pad - (skb->end - skb->tail);
972 	if (likely(skb_cloned(skb) || ntail > 0)) {
973 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
974 		if (unlikely(err))
975 			goto free_skb;
976 	}
977 
978 	/* FIXME: The use of this function with non-linear skb's really needs
979 	 * to be audited.
980 	 */
981 	err = skb_linearize(skb);
982 	if (unlikely(err))
983 		goto free_skb;
984 
985 	memset(skb->data + skb->len, 0, pad);
986 	return 0;
987 
988 free_skb:
989 	kfree_skb(skb);
990 	return err;
991 }
992 EXPORT_SYMBOL(skb_pad);
993 
994 /**
995  *	skb_put - add data to a buffer
996  *	@skb: buffer to use
997  *	@len: amount of data to add
998  *
999  *	This function extends the used data area of the buffer. If this would
1000  *	exceed the total buffer size the kernel will panic. A pointer to the
1001  *	first byte of the extra data is returned.
1002  */
skb_put(struct sk_buff * skb,unsigned int len)1003 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1004 {
1005 	unsigned char *tmp = skb_tail_pointer(skb);
1006 	SKB_LINEAR_ASSERT(skb);
1007 	skb->tail += len;
1008 	skb->len  += len;
1009 	if (unlikely(skb->tail > skb->end))
1010 		skb_over_panic(skb, len, __builtin_return_address(0));
1011 	return tmp;
1012 }
1013 EXPORT_SYMBOL(skb_put);
1014 
1015 /**
1016  *	skb_push - add data to the start of a buffer
1017  *	@skb: buffer to use
1018  *	@len: amount of data to add
1019  *
1020  *	This function extends the used data area of the buffer at the buffer
1021  *	start. If this would exceed the total buffer headroom the kernel will
1022  *	panic. A pointer to the first byte of the extra data is returned.
1023  */
skb_push(struct sk_buff * skb,unsigned int len)1024 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1025 {
1026 	skb->data -= len;
1027 	skb->len  += len;
1028 	if (unlikely(skb->data<skb->head))
1029 		skb_under_panic(skb, len, __builtin_return_address(0));
1030 	return skb->data;
1031 }
1032 EXPORT_SYMBOL(skb_push);
1033 
1034 /**
1035  *	skb_pull - remove data from the start of a buffer
1036  *	@skb: buffer to use
1037  *	@len: amount of data to remove
1038  *
1039  *	This function removes data from the start of a buffer, returning
1040  *	the memory to the headroom. A pointer to the next data in the buffer
1041  *	is returned. Once the data has been pulled future pushes will overwrite
1042  *	the old data.
1043  */
skb_pull(struct sk_buff * skb,unsigned int len)1044 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1045 {
1046 	return skb_pull_inline(skb, len);
1047 }
1048 EXPORT_SYMBOL(skb_pull);
1049 
1050 /**
1051  *	skb_trim - remove end from a buffer
1052  *	@skb: buffer to alter
1053  *	@len: new length
1054  *
1055  *	Cut the length of a buffer down by removing data from the tail. If
1056  *	the buffer is already under the length specified it is not modified.
1057  *	The skb must be linear.
1058  */
skb_trim(struct sk_buff * skb,unsigned int len)1059 void skb_trim(struct sk_buff *skb, unsigned int len)
1060 {
1061 	if (skb->len > len)
1062 		__skb_trim(skb, len);
1063 }
1064 EXPORT_SYMBOL(skb_trim);
1065 
1066 /* Trims skb to length len. It can change skb pointers.
1067  */
1068 
___pskb_trim(struct sk_buff * skb,unsigned int len)1069 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1070 {
1071 	struct sk_buff **fragp;
1072 	struct sk_buff *frag;
1073 	int offset = skb_headlen(skb);
1074 	int nfrags = skb_shinfo(skb)->nr_frags;
1075 	int i;
1076 	int err;
1077 
1078 	if (skb_cloned(skb) &&
1079 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1080 		return err;
1081 
1082 	i = 0;
1083 	if (offset >= len)
1084 		goto drop_pages;
1085 
1086 	for (; i < nfrags; i++) {
1087 		int end = offset + skb_shinfo(skb)->frags[i].size;
1088 
1089 		if (end < len) {
1090 			offset = end;
1091 			continue;
1092 		}
1093 
1094 		skb_shinfo(skb)->frags[i++].size = len - offset;
1095 
1096 drop_pages:
1097 		skb_shinfo(skb)->nr_frags = i;
1098 
1099 		for (; i < nfrags; i++)
1100 			put_page(skb_shinfo(skb)->frags[i].page);
1101 
1102 		if (skb_has_frag_list(skb))
1103 			skb_drop_fraglist(skb);
1104 		goto done;
1105 	}
1106 
1107 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1108 	     fragp = &frag->next) {
1109 		int end = offset + frag->len;
1110 
1111 		if (skb_shared(frag)) {
1112 			struct sk_buff *nfrag;
1113 
1114 			nfrag = skb_clone(frag, GFP_ATOMIC);
1115 			if (unlikely(!nfrag))
1116 				return -ENOMEM;
1117 
1118 			nfrag->next = frag->next;
1119 			kfree_skb(frag);
1120 			frag = nfrag;
1121 			*fragp = frag;
1122 		}
1123 
1124 		if (end < len) {
1125 			offset = end;
1126 			continue;
1127 		}
1128 
1129 		if (end > len &&
1130 		    unlikely((err = pskb_trim(frag, len - offset))))
1131 			return err;
1132 
1133 		if (frag->next)
1134 			skb_drop_list(&frag->next);
1135 		break;
1136 	}
1137 
1138 done:
1139 	if (len > skb_headlen(skb)) {
1140 		skb->data_len -= skb->len - len;
1141 		skb->len       = len;
1142 	} else {
1143 		skb->len       = len;
1144 		skb->data_len  = 0;
1145 		skb_set_tail_pointer(skb, len);
1146 	}
1147 
1148 	return 0;
1149 }
1150 EXPORT_SYMBOL(___pskb_trim);
1151 
1152 /**
1153  *	__pskb_pull_tail - advance tail of skb header
1154  *	@skb: buffer to reallocate
1155  *	@delta: number of bytes to advance tail
1156  *
1157  *	The function makes a sense only on a fragmented &sk_buff,
1158  *	it expands header moving its tail forward and copying necessary
1159  *	data from fragmented part.
1160  *
1161  *	&sk_buff MUST have reference count of 1.
1162  *
1163  *	Returns %NULL (and &sk_buff does not change) if pull failed
1164  *	or value of new tail of skb in the case of success.
1165  *
1166  *	All the pointers pointing into skb header may change and must be
1167  *	reloaded after call to this function.
1168  */
1169 
1170 /* Moves tail of skb head forward, copying data from fragmented part,
1171  * when it is necessary.
1172  * 1. It may fail due to malloc failure.
1173  * 2. It may change skb pointers.
1174  *
1175  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1176  */
__pskb_pull_tail(struct sk_buff * skb,int delta)1177 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1178 {
1179 	/* If skb has not enough free space at tail, get new one
1180 	 * plus 128 bytes for future expansions. If we have enough
1181 	 * room at tail, reallocate without expansion only if skb is cloned.
1182 	 */
1183 	int i, k, eat = (skb->tail + delta) - skb->end;
1184 
1185 	if (eat > 0 || skb_cloned(skb)) {
1186 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1187 				     GFP_ATOMIC))
1188 			return NULL;
1189 	}
1190 
1191 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1192 		BUG();
1193 
1194 	/* Optimization: no fragments, no reasons to preestimate
1195 	 * size of pulled pages. Superb.
1196 	 */
1197 	if (!skb_has_frag_list(skb))
1198 		goto pull_pages;
1199 
1200 	/* Estimate size of pulled pages. */
1201 	eat = delta;
1202 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1203 		if (skb_shinfo(skb)->frags[i].size >= eat)
1204 			goto pull_pages;
1205 		eat -= skb_shinfo(skb)->frags[i].size;
1206 	}
1207 
1208 	/* If we need update frag list, we are in troubles.
1209 	 * Certainly, it possible to add an offset to skb data,
1210 	 * but taking into account that pulling is expected to
1211 	 * be very rare operation, it is worth to fight against
1212 	 * further bloating skb head and crucify ourselves here instead.
1213 	 * Pure masohism, indeed. 8)8)
1214 	 */
1215 	if (eat) {
1216 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1217 		struct sk_buff *clone = NULL;
1218 		struct sk_buff *insp = NULL;
1219 
1220 		do {
1221 			BUG_ON(!list);
1222 
1223 			if (list->len <= eat) {
1224 				/* Eaten as whole. */
1225 				eat -= list->len;
1226 				list = list->next;
1227 				insp = list;
1228 			} else {
1229 				/* Eaten partially. */
1230 
1231 				if (skb_shared(list)) {
1232 					/* Sucks! We need to fork list. :-( */
1233 					clone = skb_clone(list, GFP_ATOMIC);
1234 					if (!clone)
1235 						return NULL;
1236 					insp = list->next;
1237 					list = clone;
1238 				} else {
1239 					/* This may be pulled without
1240 					 * problems. */
1241 					insp = list;
1242 				}
1243 				if (!pskb_pull(list, eat)) {
1244 					kfree_skb(clone);
1245 					return NULL;
1246 				}
1247 				break;
1248 			}
1249 		} while (eat);
1250 
1251 		/* Free pulled out fragments. */
1252 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1253 			skb_shinfo(skb)->frag_list = list->next;
1254 			kfree_skb(list);
1255 		}
1256 		/* And insert new clone at head. */
1257 		if (clone) {
1258 			clone->next = list;
1259 			skb_shinfo(skb)->frag_list = clone;
1260 		}
1261 	}
1262 	/* Success! Now we may commit changes to skb data. */
1263 
1264 pull_pages:
1265 	eat = delta;
1266 	k = 0;
1267 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1268 		if (skb_shinfo(skb)->frags[i].size <= eat) {
1269 			put_page(skb_shinfo(skb)->frags[i].page);
1270 			eat -= skb_shinfo(skb)->frags[i].size;
1271 		} else {
1272 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1273 			if (eat) {
1274 				skb_shinfo(skb)->frags[k].page_offset += eat;
1275 				skb_shinfo(skb)->frags[k].size -= eat;
1276 				eat = 0;
1277 			}
1278 			k++;
1279 		}
1280 	}
1281 	skb_shinfo(skb)->nr_frags = k;
1282 
1283 	skb->tail     += delta;
1284 	skb->data_len -= delta;
1285 
1286 	return skb_tail_pointer(skb);
1287 }
1288 EXPORT_SYMBOL(__pskb_pull_tail);
1289 
1290 /* Copy some data bits from skb to kernel buffer. */
1291 
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)1292 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1293 {
1294 	int start = skb_headlen(skb);
1295 	struct sk_buff *frag_iter;
1296 	int i, copy;
1297 
1298 	if (offset > (int)skb->len - len)
1299 		goto fault;
1300 
1301 	/* Copy header. */
1302 	if ((copy = start - offset) > 0) {
1303 		if (copy > len)
1304 			copy = len;
1305 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1306 		if ((len -= copy) == 0)
1307 			return 0;
1308 		offset += copy;
1309 		to     += copy;
1310 	}
1311 
1312 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1313 		int end;
1314 
1315 		WARN_ON(start > offset + len);
1316 
1317 		end = start + skb_shinfo(skb)->frags[i].size;
1318 		if ((copy = end - offset) > 0) {
1319 			u8 *vaddr;
1320 
1321 			if (copy > len)
1322 				copy = len;
1323 
1324 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1325 			memcpy(to,
1326 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1327 			       offset - start, copy);
1328 			kunmap_skb_frag(vaddr);
1329 
1330 			if ((len -= copy) == 0)
1331 				return 0;
1332 			offset += copy;
1333 			to     += copy;
1334 		}
1335 		start = end;
1336 	}
1337 
1338 	skb_walk_frags(skb, frag_iter) {
1339 		int end;
1340 
1341 		WARN_ON(start > offset + len);
1342 
1343 		end = start + frag_iter->len;
1344 		if ((copy = end - offset) > 0) {
1345 			if (copy > len)
1346 				copy = len;
1347 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1348 				goto fault;
1349 			if ((len -= copy) == 0)
1350 				return 0;
1351 			offset += copy;
1352 			to     += copy;
1353 		}
1354 		start = end;
1355 	}
1356 	if (!len)
1357 		return 0;
1358 
1359 fault:
1360 	return -EFAULT;
1361 }
1362 EXPORT_SYMBOL(skb_copy_bits);
1363 
1364 /*
1365  * Callback from splice_to_pipe(), if we need to release some pages
1366  * at the end of the spd in case we error'ed out in filling the pipe.
1367  */
sock_spd_release(struct splice_pipe_desc * spd,unsigned int i)1368 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1369 {
1370 	put_page(spd->pages[i]);
1371 }
1372 
linear_to_page(struct page * page,unsigned int * len,unsigned int * offset,struct sk_buff * skb,struct sock * sk)1373 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1374 					  unsigned int *offset,
1375 					  struct sk_buff *skb, struct sock *sk)
1376 {
1377 	struct page *p = sk->sk_sndmsg_page;
1378 	unsigned int off;
1379 
1380 	if (!p) {
1381 new_page:
1382 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1383 		if (!p)
1384 			return NULL;
1385 
1386 		off = sk->sk_sndmsg_off = 0;
1387 		/* hold one ref to this page until it's full */
1388 	} else {
1389 		unsigned int mlen;
1390 
1391 		off = sk->sk_sndmsg_off;
1392 		mlen = PAGE_SIZE - off;
1393 		if (mlen < 64 && mlen < *len) {
1394 			put_page(p);
1395 			goto new_page;
1396 		}
1397 
1398 		*len = min_t(unsigned int, *len, mlen);
1399 	}
1400 
1401 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1402 	sk->sk_sndmsg_off += *len;
1403 	*offset = off;
1404 	get_page(p);
1405 
1406 	return p;
1407 }
1408 
1409 /*
1410  * Fill page/offset/length into spd, if it can hold more pages.
1411  */
spd_fill_page(struct splice_pipe_desc * spd,struct pipe_inode_info * pipe,struct page * page,unsigned int * len,unsigned int offset,struct sk_buff * skb,int linear,struct sock * sk)1412 static inline int spd_fill_page(struct splice_pipe_desc *spd,
1413 				struct pipe_inode_info *pipe, struct page *page,
1414 				unsigned int *len, unsigned int offset,
1415 				struct sk_buff *skb, int linear,
1416 				struct sock *sk)
1417 {
1418 	if (unlikely(spd->nr_pages == pipe->buffers))
1419 		return 1;
1420 
1421 	if (linear) {
1422 		page = linear_to_page(page, len, &offset, skb, sk);
1423 		if (!page)
1424 			return 1;
1425 	} else
1426 		get_page(page);
1427 
1428 	spd->pages[spd->nr_pages] = page;
1429 	spd->partial[spd->nr_pages].len = *len;
1430 	spd->partial[spd->nr_pages].offset = offset;
1431 	spd->nr_pages++;
1432 
1433 	return 0;
1434 }
1435 
__segment_seek(struct page ** page,unsigned int * poff,unsigned int * plen,unsigned int off)1436 static inline void __segment_seek(struct page **page, unsigned int *poff,
1437 				  unsigned int *plen, unsigned int off)
1438 {
1439 	unsigned long n;
1440 
1441 	*poff += off;
1442 	n = *poff / PAGE_SIZE;
1443 	if (n)
1444 		*page = nth_page(*page, n);
1445 
1446 	*poff = *poff % PAGE_SIZE;
1447 	*plen -= off;
1448 }
1449 
__splice_segment(struct page * page,unsigned int poff,unsigned int plen,unsigned int * off,unsigned int * len,struct sk_buff * skb,struct splice_pipe_desc * spd,int linear,struct sock * sk,struct pipe_inode_info * pipe)1450 static inline int __splice_segment(struct page *page, unsigned int poff,
1451 				   unsigned int plen, unsigned int *off,
1452 				   unsigned int *len, struct sk_buff *skb,
1453 				   struct splice_pipe_desc *spd, int linear,
1454 				   struct sock *sk,
1455 				   struct pipe_inode_info *pipe)
1456 {
1457 	if (!*len)
1458 		return 1;
1459 
1460 	/* skip this segment if already processed */
1461 	if (*off >= plen) {
1462 		*off -= plen;
1463 		return 0;
1464 	}
1465 
1466 	/* ignore any bits we already processed */
1467 	if (*off) {
1468 		__segment_seek(&page, &poff, &plen, *off);
1469 		*off = 0;
1470 	}
1471 
1472 	do {
1473 		unsigned int flen = min(*len, plen);
1474 
1475 		/* the linear region may spread across several pages  */
1476 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1477 
1478 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1479 			return 1;
1480 
1481 		__segment_seek(&page, &poff, &plen, flen);
1482 		*len -= flen;
1483 
1484 	} while (*len && plen);
1485 
1486 	return 0;
1487 }
1488 
1489 /*
1490  * Map linear and fragment data from the skb to spd. It reports failure if the
1491  * pipe is full or if we already spliced the requested length.
1492  */
__skb_splice_bits(struct sk_buff * skb,struct pipe_inode_info * pipe,unsigned int * offset,unsigned int * len,struct splice_pipe_desc * spd,struct sock * sk)1493 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1494 			     unsigned int *offset, unsigned int *len,
1495 			     struct splice_pipe_desc *spd, struct sock *sk)
1496 {
1497 	int seg;
1498 
1499 	/*
1500 	 * map the linear part
1501 	 */
1502 	if (__splice_segment(virt_to_page(skb->data),
1503 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1504 			     skb_headlen(skb),
1505 			     offset, len, skb, spd, 1, sk, pipe))
1506 		return 1;
1507 
1508 	/*
1509 	 * then map the fragments
1510 	 */
1511 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1512 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1513 
1514 		if (__splice_segment(f->page, f->page_offset, f->size,
1515 				     offset, len, skb, spd, 0, sk, pipe))
1516 			return 1;
1517 	}
1518 
1519 	return 0;
1520 }
1521 
1522 /*
1523  * Map data from the skb to a pipe. Should handle both the linear part,
1524  * the fragments, and the frag list. It does NOT handle frag lists within
1525  * the frag list, if such a thing exists. We'd probably need to recurse to
1526  * handle that cleanly.
1527  */
skb_splice_bits(struct sk_buff * skb,unsigned int offset,struct pipe_inode_info * pipe,unsigned int tlen,unsigned int flags)1528 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1529 		    struct pipe_inode_info *pipe, unsigned int tlen,
1530 		    unsigned int flags)
1531 {
1532 	struct partial_page partial[PIPE_DEF_BUFFERS];
1533 	struct page *pages[PIPE_DEF_BUFFERS];
1534 	struct splice_pipe_desc spd = {
1535 		.pages = pages,
1536 		.partial = partial,
1537 		.flags = flags,
1538 		.ops = &sock_pipe_buf_ops,
1539 		.spd_release = sock_spd_release,
1540 	};
1541 	struct sk_buff *frag_iter;
1542 	struct sock *sk = skb->sk;
1543 	int ret = 0;
1544 
1545 	if (splice_grow_spd(pipe, &spd))
1546 		return -ENOMEM;
1547 
1548 	/*
1549 	 * __skb_splice_bits() only fails if the output has no room left,
1550 	 * so no point in going over the frag_list for the error case.
1551 	 */
1552 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1553 		goto done;
1554 	else if (!tlen)
1555 		goto done;
1556 
1557 	/*
1558 	 * now see if we have a frag_list to map
1559 	 */
1560 	skb_walk_frags(skb, frag_iter) {
1561 		if (!tlen)
1562 			break;
1563 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1564 			break;
1565 	}
1566 
1567 done:
1568 	if (spd.nr_pages) {
1569 		/*
1570 		 * Drop the socket lock, otherwise we have reverse
1571 		 * locking dependencies between sk_lock and i_mutex
1572 		 * here as compared to sendfile(). We enter here
1573 		 * with the socket lock held, and splice_to_pipe() will
1574 		 * grab the pipe inode lock. For sendfile() emulation,
1575 		 * we call into ->sendpage() with the i_mutex lock held
1576 		 * and networking will grab the socket lock.
1577 		 */
1578 		release_sock(sk);
1579 		ret = splice_to_pipe(pipe, &spd);
1580 		lock_sock(sk);
1581 	}
1582 
1583 	splice_shrink_spd(pipe, &spd);
1584 	return ret;
1585 }
1586 
1587 /**
1588  *	skb_store_bits - store bits from kernel buffer to skb
1589  *	@skb: destination buffer
1590  *	@offset: offset in destination
1591  *	@from: source buffer
1592  *	@len: number of bytes to copy
1593  *
1594  *	Copy the specified number of bytes from the source buffer to the
1595  *	destination skb.  This function handles all the messy bits of
1596  *	traversing fragment lists and such.
1597  */
1598 
skb_store_bits(struct sk_buff * skb,int offset,const void * from,int len)1599 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1600 {
1601 	int start = skb_headlen(skb);
1602 	struct sk_buff *frag_iter;
1603 	int i, copy;
1604 
1605 	if (offset > (int)skb->len - len)
1606 		goto fault;
1607 
1608 	if ((copy = start - offset) > 0) {
1609 		if (copy > len)
1610 			copy = len;
1611 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1612 		if ((len -= copy) == 0)
1613 			return 0;
1614 		offset += copy;
1615 		from += copy;
1616 	}
1617 
1618 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1619 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1620 		int end;
1621 
1622 		WARN_ON(start > offset + len);
1623 
1624 		end = start + frag->size;
1625 		if ((copy = end - offset) > 0) {
1626 			u8 *vaddr;
1627 
1628 			if (copy > len)
1629 				copy = len;
1630 
1631 			vaddr = kmap_skb_frag(frag);
1632 			memcpy(vaddr + frag->page_offset + offset - start,
1633 			       from, copy);
1634 			kunmap_skb_frag(vaddr);
1635 
1636 			if ((len -= copy) == 0)
1637 				return 0;
1638 			offset += copy;
1639 			from += copy;
1640 		}
1641 		start = end;
1642 	}
1643 
1644 	skb_walk_frags(skb, frag_iter) {
1645 		int end;
1646 
1647 		WARN_ON(start > offset + len);
1648 
1649 		end = start + frag_iter->len;
1650 		if ((copy = end - offset) > 0) {
1651 			if (copy > len)
1652 				copy = len;
1653 			if (skb_store_bits(frag_iter, offset - start,
1654 					   from, copy))
1655 				goto fault;
1656 			if ((len -= copy) == 0)
1657 				return 0;
1658 			offset += copy;
1659 			from += copy;
1660 		}
1661 		start = end;
1662 	}
1663 	if (!len)
1664 		return 0;
1665 
1666 fault:
1667 	return -EFAULT;
1668 }
1669 EXPORT_SYMBOL(skb_store_bits);
1670 
1671 /* Checksum skb data. */
1672 
skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum)1673 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1674 			  int len, __wsum csum)
1675 {
1676 	int start = skb_headlen(skb);
1677 	int i, copy = start - offset;
1678 	struct sk_buff *frag_iter;
1679 	int pos = 0;
1680 
1681 	/* Checksum header. */
1682 	if (copy > 0) {
1683 		if (copy > len)
1684 			copy = len;
1685 		csum = csum_partial(skb->data + offset, copy, csum);
1686 		if ((len -= copy) == 0)
1687 			return csum;
1688 		offset += copy;
1689 		pos	= copy;
1690 	}
1691 
1692 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1693 		int end;
1694 
1695 		WARN_ON(start > offset + len);
1696 
1697 		end = start + skb_shinfo(skb)->frags[i].size;
1698 		if ((copy = end - offset) > 0) {
1699 			__wsum csum2;
1700 			u8 *vaddr;
1701 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1702 
1703 			if (copy > len)
1704 				copy = len;
1705 			vaddr = kmap_skb_frag(frag);
1706 			csum2 = csum_partial(vaddr + frag->page_offset +
1707 					     offset - start, copy, 0);
1708 			kunmap_skb_frag(vaddr);
1709 			csum = csum_block_add(csum, csum2, pos);
1710 			if (!(len -= copy))
1711 				return csum;
1712 			offset += copy;
1713 			pos    += copy;
1714 		}
1715 		start = end;
1716 	}
1717 
1718 	skb_walk_frags(skb, frag_iter) {
1719 		int end;
1720 
1721 		WARN_ON(start > offset + len);
1722 
1723 		end = start + frag_iter->len;
1724 		if ((copy = end - offset) > 0) {
1725 			__wsum csum2;
1726 			if (copy > len)
1727 				copy = len;
1728 			csum2 = skb_checksum(frag_iter, offset - start,
1729 					     copy, 0);
1730 			csum = csum_block_add(csum, csum2, pos);
1731 			if ((len -= copy) == 0)
1732 				return csum;
1733 			offset += copy;
1734 			pos    += copy;
1735 		}
1736 		start = end;
1737 	}
1738 	BUG_ON(len);
1739 
1740 	return csum;
1741 }
1742 EXPORT_SYMBOL(skb_checksum);
1743 
1744 /* Both of above in one bottle. */
1745 
skb_copy_and_csum_bits(const struct sk_buff * skb,int offset,u8 * to,int len,__wsum csum)1746 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1747 				    u8 *to, int len, __wsum csum)
1748 {
1749 	int start = skb_headlen(skb);
1750 	int i, copy = start - offset;
1751 	struct sk_buff *frag_iter;
1752 	int pos = 0;
1753 
1754 	/* Copy header. */
1755 	if (copy > 0) {
1756 		if (copy > len)
1757 			copy = len;
1758 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1759 						 copy, csum);
1760 		if ((len -= copy) == 0)
1761 			return csum;
1762 		offset += copy;
1763 		to     += copy;
1764 		pos	= copy;
1765 	}
1766 
1767 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1768 		int end;
1769 
1770 		WARN_ON(start > offset + len);
1771 
1772 		end = start + skb_shinfo(skb)->frags[i].size;
1773 		if ((copy = end - offset) > 0) {
1774 			__wsum csum2;
1775 			u8 *vaddr;
1776 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1777 
1778 			if (copy > len)
1779 				copy = len;
1780 			vaddr = kmap_skb_frag(frag);
1781 			csum2 = csum_partial_copy_nocheck(vaddr +
1782 							  frag->page_offset +
1783 							  offset - start, to,
1784 							  copy, 0);
1785 			kunmap_skb_frag(vaddr);
1786 			csum = csum_block_add(csum, csum2, pos);
1787 			if (!(len -= copy))
1788 				return csum;
1789 			offset += copy;
1790 			to     += copy;
1791 			pos    += copy;
1792 		}
1793 		start = end;
1794 	}
1795 
1796 	skb_walk_frags(skb, frag_iter) {
1797 		__wsum csum2;
1798 		int end;
1799 
1800 		WARN_ON(start > offset + len);
1801 
1802 		end = start + frag_iter->len;
1803 		if ((copy = end - offset) > 0) {
1804 			if (copy > len)
1805 				copy = len;
1806 			csum2 = skb_copy_and_csum_bits(frag_iter,
1807 						       offset - start,
1808 						       to, copy, 0);
1809 			csum = csum_block_add(csum, csum2, pos);
1810 			if ((len -= copy) == 0)
1811 				return csum;
1812 			offset += copy;
1813 			to     += copy;
1814 			pos    += copy;
1815 		}
1816 		start = end;
1817 	}
1818 	BUG_ON(len);
1819 	return csum;
1820 }
1821 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1822 
skb_copy_and_csum_dev(const struct sk_buff * skb,u8 * to)1823 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1824 {
1825 	__wsum csum;
1826 	long csstart;
1827 
1828 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1829 		csstart = skb_checksum_start_offset(skb);
1830 	else
1831 		csstart = skb_headlen(skb);
1832 
1833 	BUG_ON(csstart > skb_headlen(skb));
1834 
1835 	skb_copy_from_linear_data(skb, to, csstart);
1836 
1837 	csum = 0;
1838 	if (csstart != skb->len)
1839 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1840 					      skb->len - csstart, 0);
1841 
1842 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1843 		long csstuff = csstart + skb->csum_offset;
1844 
1845 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1846 	}
1847 }
1848 EXPORT_SYMBOL(skb_copy_and_csum_dev);
1849 
1850 /**
1851  *	skb_dequeue - remove from the head of the queue
1852  *	@list: list to dequeue from
1853  *
1854  *	Remove the head of the list. The list lock is taken so the function
1855  *	may be used safely with other locking list functions. The head item is
1856  *	returned or %NULL if the list is empty.
1857  */
1858 
skb_dequeue(struct sk_buff_head * list)1859 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1860 {
1861 	unsigned long flags;
1862 	struct sk_buff *result;
1863 
1864 	spin_lock_irqsave(&list->lock, flags);
1865 	result = __skb_dequeue(list);
1866 	spin_unlock_irqrestore(&list->lock, flags);
1867 	return result;
1868 }
1869 EXPORT_SYMBOL(skb_dequeue);
1870 
1871 /**
1872  *	skb_dequeue_tail - remove from the tail of the queue
1873  *	@list: list to dequeue from
1874  *
1875  *	Remove the tail of the list. The list lock is taken so the function
1876  *	may be used safely with other locking list functions. The tail item is
1877  *	returned or %NULL if the list is empty.
1878  */
skb_dequeue_tail(struct sk_buff_head * list)1879 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1880 {
1881 	unsigned long flags;
1882 	struct sk_buff *result;
1883 
1884 	spin_lock_irqsave(&list->lock, flags);
1885 	result = __skb_dequeue_tail(list);
1886 	spin_unlock_irqrestore(&list->lock, flags);
1887 	return result;
1888 }
1889 EXPORT_SYMBOL(skb_dequeue_tail);
1890 
1891 /**
1892  *	skb_queue_purge - empty a list
1893  *	@list: list to empty
1894  *
1895  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1896  *	the list and one reference dropped. This function takes the list
1897  *	lock and is atomic with respect to other list locking functions.
1898  */
skb_queue_purge(struct sk_buff_head * list)1899 void skb_queue_purge(struct sk_buff_head *list)
1900 {
1901 	struct sk_buff *skb;
1902 	while ((skb = skb_dequeue(list)) != NULL)
1903 		kfree_skb(skb);
1904 }
1905 EXPORT_SYMBOL(skb_queue_purge);
1906 
1907 /**
1908  *	skb_queue_head - queue a buffer at the list head
1909  *	@list: list to use
1910  *	@newsk: buffer to queue
1911  *
1912  *	Queue a buffer at the start of the list. This function takes the
1913  *	list lock and can be used safely with other locking &sk_buff functions
1914  *	safely.
1915  *
1916  *	A buffer cannot be placed on two lists at the same time.
1917  */
skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)1918 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1919 {
1920 	unsigned long flags;
1921 
1922 	spin_lock_irqsave(&list->lock, flags);
1923 	__skb_queue_head(list, newsk);
1924 	spin_unlock_irqrestore(&list->lock, flags);
1925 }
1926 EXPORT_SYMBOL(skb_queue_head);
1927 
1928 /**
1929  *	skb_queue_tail - queue a buffer at the list tail
1930  *	@list: list to use
1931  *	@newsk: buffer to queue
1932  *
1933  *	Queue a buffer at the tail of the list. This function takes the
1934  *	list lock and can be used safely with other locking &sk_buff functions
1935  *	safely.
1936  *
1937  *	A buffer cannot be placed on two lists at the same time.
1938  */
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)1939 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1940 {
1941 	unsigned long flags;
1942 
1943 	spin_lock_irqsave(&list->lock, flags);
1944 	__skb_queue_tail(list, newsk);
1945 	spin_unlock_irqrestore(&list->lock, flags);
1946 }
1947 EXPORT_SYMBOL(skb_queue_tail);
1948 
1949 /**
1950  *	skb_unlink	-	remove a buffer from a list
1951  *	@skb: buffer to remove
1952  *	@list: list to use
1953  *
1954  *	Remove a packet from a list. The list locks are taken and this
1955  *	function is atomic with respect to other list locked calls
1956  *
1957  *	You must know what list the SKB is on.
1958  */
skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)1959 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1960 {
1961 	unsigned long flags;
1962 
1963 	spin_lock_irqsave(&list->lock, flags);
1964 	__skb_unlink(skb, list);
1965 	spin_unlock_irqrestore(&list->lock, flags);
1966 }
1967 EXPORT_SYMBOL(skb_unlink);
1968 
1969 /**
1970  *	skb_append	-	append a buffer
1971  *	@old: buffer to insert after
1972  *	@newsk: buffer to insert
1973  *	@list: list to use
1974  *
1975  *	Place a packet after a given packet in a list. The list locks are taken
1976  *	and this function is atomic with respect to other list locked calls.
1977  *	A buffer cannot be placed on two lists at the same time.
1978  */
skb_append(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)1979 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1980 {
1981 	unsigned long flags;
1982 
1983 	spin_lock_irqsave(&list->lock, flags);
1984 	__skb_queue_after(list, old, newsk);
1985 	spin_unlock_irqrestore(&list->lock, flags);
1986 }
1987 EXPORT_SYMBOL(skb_append);
1988 
1989 /**
1990  *	skb_insert	-	insert a buffer
1991  *	@old: buffer to insert before
1992  *	@newsk: buffer to insert
1993  *	@list: list to use
1994  *
1995  *	Place a packet before a given packet in a list. The list locks are
1996  * 	taken and this function is atomic with respect to other list locked
1997  *	calls.
1998  *
1999  *	A buffer cannot be placed on two lists at the same time.
2000  */
skb_insert(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)2001 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2002 {
2003 	unsigned long flags;
2004 
2005 	spin_lock_irqsave(&list->lock, flags);
2006 	__skb_insert(newsk, old->prev, old, list);
2007 	spin_unlock_irqrestore(&list->lock, flags);
2008 }
2009 EXPORT_SYMBOL(skb_insert);
2010 
skb_split_inside_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,const int pos)2011 static inline void skb_split_inside_header(struct sk_buff *skb,
2012 					   struct sk_buff* skb1,
2013 					   const u32 len, const int pos)
2014 {
2015 	int i;
2016 
2017 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2018 					 pos - len);
2019 	/* And move data appendix as is. */
2020 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2021 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2022 
2023 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2024 	skb_shinfo(skb)->nr_frags  = 0;
2025 	skb1->data_len		   = skb->data_len;
2026 	skb1->len		   += skb1->data_len;
2027 	skb->data_len		   = 0;
2028 	skb->len		   = len;
2029 	skb_set_tail_pointer(skb, len);
2030 }
2031 
skb_split_no_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,int pos)2032 static inline void skb_split_no_header(struct sk_buff *skb,
2033 				       struct sk_buff* skb1,
2034 				       const u32 len, int pos)
2035 {
2036 	int i, k = 0;
2037 	const int nfrags = skb_shinfo(skb)->nr_frags;
2038 
2039 	skb_shinfo(skb)->nr_frags = 0;
2040 	skb1->len		  = skb1->data_len = skb->len - len;
2041 	skb->len		  = len;
2042 	skb->data_len		  = len - pos;
2043 
2044 	for (i = 0; i < nfrags; i++) {
2045 		int size = skb_shinfo(skb)->frags[i].size;
2046 
2047 		if (pos + size > len) {
2048 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2049 
2050 			if (pos < len) {
2051 				/* Split frag.
2052 				 * We have two variants in this case:
2053 				 * 1. Move all the frag to the second
2054 				 *    part, if it is possible. F.e.
2055 				 *    this approach is mandatory for TUX,
2056 				 *    where splitting is expensive.
2057 				 * 2. Split is accurately. We make this.
2058 				 */
2059 				get_page(skb_shinfo(skb)->frags[i].page);
2060 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2061 				skb_shinfo(skb1)->frags[0].size -= len - pos;
2062 				skb_shinfo(skb)->frags[i].size	= len - pos;
2063 				skb_shinfo(skb)->nr_frags++;
2064 			}
2065 			k++;
2066 		} else
2067 			skb_shinfo(skb)->nr_frags++;
2068 		pos += size;
2069 	}
2070 	skb_shinfo(skb1)->nr_frags = k;
2071 }
2072 
2073 /**
2074  * skb_split - Split fragmented skb to two parts at length len.
2075  * @skb: the buffer to split
2076  * @skb1: the buffer to receive the second part
2077  * @len: new length for skb
2078  */
skb_split(struct sk_buff * skb,struct sk_buff * skb1,const u32 len)2079 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2080 {
2081 	int pos = skb_headlen(skb);
2082 
2083 	if (len < pos)	/* Split line is inside header. */
2084 		skb_split_inside_header(skb, skb1, len, pos);
2085 	else		/* Second chunk has no header, nothing to copy. */
2086 		skb_split_no_header(skb, skb1, len, pos);
2087 }
2088 EXPORT_SYMBOL(skb_split);
2089 
2090 /* Shifting from/to a cloned skb is a no-go.
2091  *
2092  * Caller cannot keep skb_shinfo related pointers past calling here!
2093  */
skb_prepare_for_shift(struct sk_buff * skb)2094 static int skb_prepare_for_shift(struct sk_buff *skb)
2095 {
2096 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2097 }
2098 
2099 /**
2100  * skb_shift - Shifts paged data partially from skb to another
2101  * @tgt: buffer into which tail data gets added
2102  * @skb: buffer from which the paged data comes from
2103  * @shiftlen: shift up to this many bytes
2104  *
2105  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2106  * the length of the skb, from tgt to skb. Returns number bytes shifted.
2107  * It's up to caller to free skb if everything was shifted.
2108  *
2109  * If @tgt runs out of frags, the whole operation is aborted.
2110  *
2111  * Skb cannot include anything else but paged data while tgt is allowed
2112  * to have non-paged data as well.
2113  *
2114  * TODO: full sized shift could be optimized but that would need
2115  * specialized skb free'er to handle frags without up-to-date nr_frags.
2116  */
skb_shift(struct sk_buff * tgt,struct sk_buff * skb,int shiftlen)2117 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2118 {
2119 	int from, to, merge, todo;
2120 	struct skb_frag_struct *fragfrom, *fragto;
2121 
2122 	BUG_ON(shiftlen > skb->len);
2123 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2124 
2125 	todo = shiftlen;
2126 	from = 0;
2127 	to = skb_shinfo(tgt)->nr_frags;
2128 	fragfrom = &skb_shinfo(skb)->frags[from];
2129 
2130 	/* Actual merge is delayed until the point when we know we can
2131 	 * commit all, so that we don't have to undo partial changes
2132 	 */
2133 	if (!to ||
2134 	    !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2135 		merge = -1;
2136 	} else {
2137 		merge = to - 1;
2138 
2139 		todo -= fragfrom->size;
2140 		if (todo < 0) {
2141 			if (skb_prepare_for_shift(skb) ||
2142 			    skb_prepare_for_shift(tgt))
2143 				return 0;
2144 
2145 			/* All previous frag pointers might be stale! */
2146 			fragfrom = &skb_shinfo(skb)->frags[from];
2147 			fragto = &skb_shinfo(tgt)->frags[merge];
2148 
2149 			fragto->size += shiftlen;
2150 			fragfrom->size -= shiftlen;
2151 			fragfrom->page_offset += shiftlen;
2152 
2153 			goto onlymerged;
2154 		}
2155 
2156 		from++;
2157 	}
2158 
2159 	/* Skip full, not-fitting skb to avoid expensive operations */
2160 	if ((shiftlen == skb->len) &&
2161 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2162 		return 0;
2163 
2164 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2165 		return 0;
2166 
2167 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2168 		if (to == MAX_SKB_FRAGS)
2169 			return 0;
2170 
2171 		fragfrom = &skb_shinfo(skb)->frags[from];
2172 		fragto = &skb_shinfo(tgt)->frags[to];
2173 
2174 		if (todo >= fragfrom->size) {
2175 			*fragto = *fragfrom;
2176 			todo -= fragfrom->size;
2177 			from++;
2178 			to++;
2179 
2180 		} else {
2181 			get_page(fragfrom->page);
2182 			fragto->page = fragfrom->page;
2183 			fragto->page_offset = fragfrom->page_offset;
2184 			fragto->size = todo;
2185 
2186 			fragfrom->page_offset += todo;
2187 			fragfrom->size -= todo;
2188 			todo = 0;
2189 
2190 			to++;
2191 			break;
2192 		}
2193 	}
2194 
2195 	/* Ready to "commit" this state change to tgt */
2196 	skb_shinfo(tgt)->nr_frags = to;
2197 
2198 	if (merge >= 0) {
2199 		fragfrom = &skb_shinfo(skb)->frags[0];
2200 		fragto = &skb_shinfo(tgt)->frags[merge];
2201 
2202 		fragto->size += fragfrom->size;
2203 		put_page(fragfrom->page);
2204 	}
2205 
2206 	/* Reposition in the original skb */
2207 	to = 0;
2208 	while (from < skb_shinfo(skb)->nr_frags)
2209 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2210 	skb_shinfo(skb)->nr_frags = to;
2211 
2212 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2213 
2214 onlymerged:
2215 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2216 	 * the other hand might need it if it needs to be resent
2217 	 */
2218 	tgt->ip_summed = CHECKSUM_PARTIAL;
2219 	skb->ip_summed = CHECKSUM_PARTIAL;
2220 
2221 	/* Yak, is it really working this way? Some helper please? */
2222 	skb->len -= shiftlen;
2223 	skb->data_len -= shiftlen;
2224 	skb->truesize -= shiftlen;
2225 	tgt->len += shiftlen;
2226 	tgt->data_len += shiftlen;
2227 	tgt->truesize += shiftlen;
2228 
2229 	return shiftlen;
2230 }
2231 
2232 /**
2233  * skb_prepare_seq_read - Prepare a sequential read of skb data
2234  * @skb: the buffer to read
2235  * @from: lower offset of data to be read
2236  * @to: upper offset of data to be read
2237  * @st: state variable
2238  *
2239  * Initializes the specified state variable. Must be called before
2240  * invoking skb_seq_read() for the first time.
2241  */
skb_prepare_seq_read(struct sk_buff * skb,unsigned int from,unsigned int to,struct skb_seq_state * st)2242 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2243 			  unsigned int to, struct skb_seq_state *st)
2244 {
2245 	st->lower_offset = from;
2246 	st->upper_offset = to;
2247 	st->root_skb = st->cur_skb = skb;
2248 	st->frag_idx = st->stepped_offset = 0;
2249 	st->frag_data = NULL;
2250 }
2251 EXPORT_SYMBOL(skb_prepare_seq_read);
2252 
2253 /**
2254  * skb_seq_read - Sequentially read skb data
2255  * @consumed: number of bytes consumed by the caller so far
2256  * @data: destination pointer for data to be returned
2257  * @st: state variable
2258  *
2259  * Reads a block of skb data at &consumed relative to the
2260  * lower offset specified to skb_prepare_seq_read(). Assigns
2261  * the head of the data block to &data and returns the length
2262  * of the block or 0 if the end of the skb data or the upper
2263  * offset has been reached.
2264  *
2265  * The caller is not required to consume all of the data
2266  * returned, i.e. &consumed is typically set to the number
2267  * of bytes already consumed and the next call to
2268  * skb_seq_read() will return the remaining part of the block.
2269  *
2270  * Note 1: The size of each block of data returned can be arbitrary,
2271  *       this limitation is the cost for zerocopy seqeuental
2272  *       reads of potentially non linear data.
2273  *
2274  * Note 2: Fragment lists within fragments are not implemented
2275  *       at the moment, state->root_skb could be replaced with
2276  *       a stack for this purpose.
2277  */
skb_seq_read(unsigned int consumed,const u8 ** data,struct skb_seq_state * st)2278 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2279 			  struct skb_seq_state *st)
2280 {
2281 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2282 	skb_frag_t *frag;
2283 
2284 	if (unlikely(abs_offset >= st->upper_offset))
2285 		return 0;
2286 
2287 next_skb:
2288 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2289 
2290 	if (abs_offset < block_limit && !st->frag_data) {
2291 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2292 		return block_limit - abs_offset;
2293 	}
2294 
2295 	if (st->frag_idx == 0 && !st->frag_data)
2296 		st->stepped_offset += skb_headlen(st->cur_skb);
2297 
2298 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2299 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2300 		block_limit = frag->size + st->stepped_offset;
2301 
2302 		if (abs_offset < block_limit) {
2303 			if (!st->frag_data)
2304 				st->frag_data = kmap_skb_frag(frag);
2305 
2306 			*data = (u8 *) st->frag_data + frag->page_offset +
2307 				(abs_offset - st->stepped_offset);
2308 
2309 			return block_limit - abs_offset;
2310 		}
2311 
2312 		if (st->frag_data) {
2313 			kunmap_skb_frag(st->frag_data);
2314 			st->frag_data = NULL;
2315 		}
2316 
2317 		st->frag_idx++;
2318 		st->stepped_offset += frag->size;
2319 	}
2320 
2321 	if (st->frag_data) {
2322 		kunmap_skb_frag(st->frag_data);
2323 		st->frag_data = NULL;
2324 	}
2325 
2326 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2327 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2328 		st->frag_idx = 0;
2329 		goto next_skb;
2330 	} else if (st->cur_skb->next) {
2331 		st->cur_skb = st->cur_skb->next;
2332 		st->frag_idx = 0;
2333 		goto next_skb;
2334 	}
2335 
2336 	return 0;
2337 }
2338 EXPORT_SYMBOL(skb_seq_read);
2339 
2340 /**
2341  * skb_abort_seq_read - Abort a sequential read of skb data
2342  * @st: state variable
2343  *
2344  * Must be called if skb_seq_read() was not called until it
2345  * returned 0.
2346  */
skb_abort_seq_read(struct skb_seq_state * st)2347 void skb_abort_seq_read(struct skb_seq_state *st)
2348 {
2349 	if (st->frag_data)
2350 		kunmap_skb_frag(st->frag_data);
2351 }
2352 EXPORT_SYMBOL(skb_abort_seq_read);
2353 
2354 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2355 
skb_ts_get_next_block(unsigned int offset,const u8 ** text,struct ts_config * conf,struct ts_state * state)2356 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2357 					  struct ts_config *conf,
2358 					  struct ts_state *state)
2359 {
2360 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2361 }
2362 
skb_ts_finish(struct ts_config * conf,struct ts_state * state)2363 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2364 {
2365 	skb_abort_seq_read(TS_SKB_CB(state));
2366 }
2367 
2368 /**
2369  * skb_find_text - Find a text pattern in skb data
2370  * @skb: the buffer to look in
2371  * @from: search offset
2372  * @to: search limit
2373  * @config: textsearch configuration
2374  * @state: uninitialized textsearch state variable
2375  *
2376  * Finds a pattern in the skb data according to the specified
2377  * textsearch configuration. Use textsearch_next() to retrieve
2378  * subsequent occurrences of the pattern. Returns the offset
2379  * to the first occurrence or UINT_MAX if no match was found.
2380  */
skb_find_text(struct sk_buff * skb,unsigned int from,unsigned int to,struct ts_config * config,struct ts_state * state)2381 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2382 			   unsigned int to, struct ts_config *config,
2383 			   struct ts_state *state)
2384 {
2385 	unsigned int ret;
2386 
2387 	config->get_next_block = skb_ts_get_next_block;
2388 	config->finish = skb_ts_finish;
2389 
2390 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2391 
2392 	ret = textsearch_find(config, state);
2393 	return (ret <= to - from ? ret : UINT_MAX);
2394 }
2395 EXPORT_SYMBOL(skb_find_text);
2396 
2397 /**
2398  * skb_append_datato_frags: - append the user data to a skb
2399  * @sk: sock  structure
2400  * @skb: skb structure to be appened with user data.
2401  * @getfrag: call back function to be used for getting the user data
2402  * @from: pointer to user message iov
2403  * @length: length of the iov message
2404  *
2405  * Description: This procedure append the user data in the fragment part
2406  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2407  */
skb_append_datato_frags(struct sock * sk,struct sk_buff * skb,int (* getfrag)(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length)2408 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2409 			int (*getfrag)(void *from, char *to, int offset,
2410 					int len, int odd, struct sk_buff *skb),
2411 			void *from, int length)
2412 {
2413 	int frg_cnt = 0;
2414 	skb_frag_t *frag = NULL;
2415 	struct page *page = NULL;
2416 	int copy, left;
2417 	int offset = 0;
2418 	int ret;
2419 
2420 	do {
2421 		/* Return error if we don't have space for new frag */
2422 		frg_cnt = skb_shinfo(skb)->nr_frags;
2423 		if (frg_cnt >= MAX_SKB_FRAGS)
2424 			return -EFAULT;
2425 
2426 		/* allocate a new page for next frag */
2427 		page = alloc_pages(sk->sk_allocation, 0);
2428 
2429 		/* If alloc_page fails just return failure and caller will
2430 		 * free previous allocated pages by doing kfree_skb()
2431 		 */
2432 		if (page == NULL)
2433 			return -ENOMEM;
2434 
2435 		/* initialize the next frag */
2436 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2437 		skb->truesize += PAGE_SIZE;
2438 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2439 
2440 		/* get the new initialized frag */
2441 		frg_cnt = skb_shinfo(skb)->nr_frags;
2442 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2443 
2444 		/* copy the user data to page */
2445 		left = PAGE_SIZE - frag->page_offset;
2446 		copy = (length > left)? left : length;
2447 
2448 		ret = getfrag(from, (page_address(frag->page) +
2449 			    frag->page_offset + frag->size),
2450 			    offset, copy, 0, skb);
2451 		if (ret < 0)
2452 			return -EFAULT;
2453 
2454 		/* copy was successful so update the size parameters */
2455 		frag->size += copy;
2456 		skb->len += copy;
2457 		skb->data_len += copy;
2458 		offset += copy;
2459 		length -= copy;
2460 
2461 	} while (length > 0);
2462 
2463 	return 0;
2464 }
2465 EXPORT_SYMBOL(skb_append_datato_frags);
2466 
2467 /**
2468  *	skb_pull_rcsum - pull skb and update receive checksum
2469  *	@skb: buffer to update
2470  *	@len: length of data pulled
2471  *
2472  *	This function performs an skb_pull on the packet and updates
2473  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2474  *	receive path processing instead of skb_pull unless you know
2475  *	that the checksum difference is zero (e.g., a valid IP header)
2476  *	or you are setting ip_summed to CHECKSUM_NONE.
2477  */
skb_pull_rcsum(struct sk_buff * skb,unsigned int len)2478 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2479 {
2480 	BUG_ON(len > skb->len);
2481 	skb->len -= len;
2482 	BUG_ON(skb->len < skb->data_len);
2483 	skb_postpull_rcsum(skb, skb->data, len);
2484 	return skb->data += len;
2485 }
2486 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2487 
2488 /**
2489  *	skb_segment - Perform protocol segmentation on skb.
2490  *	@skb: buffer to segment
2491  *	@features: features for the output path (see dev->features)
2492  *
2493  *	This function performs segmentation on the given skb.  It returns
2494  *	a pointer to the first in a list of new skbs for the segments.
2495  *	In case of error it returns ERR_PTR(err).
2496  */
skb_segment(struct sk_buff * skb,u32 features)2497 struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2498 {
2499 	struct sk_buff *segs = NULL;
2500 	struct sk_buff *tail = NULL;
2501 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2502 	unsigned int mss = skb_shinfo(skb)->gso_size;
2503 	unsigned int doffset = skb->data - skb_mac_header(skb);
2504 	unsigned int offset = doffset;
2505 	unsigned int headroom;
2506 	unsigned int len;
2507 	int sg = !!(features & NETIF_F_SG);
2508 	int nfrags = skb_shinfo(skb)->nr_frags;
2509 	int err = -ENOMEM;
2510 	int i = 0;
2511 	int pos;
2512 
2513 	__skb_push(skb, doffset);
2514 	headroom = skb_headroom(skb);
2515 	pos = skb_headlen(skb);
2516 
2517 	do {
2518 		struct sk_buff *nskb;
2519 		skb_frag_t *frag;
2520 		int hsize;
2521 		int size;
2522 
2523 		len = skb->len - offset;
2524 		if (len > mss)
2525 			len = mss;
2526 
2527 		hsize = skb_headlen(skb) - offset;
2528 		if (hsize < 0)
2529 			hsize = 0;
2530 		if (hsize > len || !sg)
2531 			hsize = len;
2532 
2533 		if (!hsize && i >= nfrags) {
2534 			BUG_ON(fskb->len != len);
2535 
2536 			pos += len;
2537 			nskb = skb_clone(fskb, GFP_ATOMIC);
2538 			fskb = fskb->next;
2539 
2540 			if (unlikely(!nskb))
2541 				goto err;
2542 
2543 			hsize = skb_end_pointer(nskb) - nskb->head;
2544 			if (skb_cow_head(nskb, doffset + headroom)) {
2545 				kfree_skb(nskb);
2546 				goto err;
2547 			}
2548 
2549 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2550 					  hsize;
2551 			skb_release_head_state(nskb);
2552 			__skb_push(nskb, doffset);
2553 		} else {
2554 			nskb = alloc_skb(hsize + doffset + headroom,
2555 					 GFP_ATOMIC);
2556 
2557 			if (unlikely(!nskb))
2558 				goto err;
2559 
2560 			skb_reserve(nskb, headroom);
2561 			__skb_put(nskb, doffset);
2562 		}
2563 
2564 		if (segs)
2565 			tail->next = nskb;
2566 		else
2567 			segs = nskb;
2568 		tail = nskb;
2569 
2570 		__copy_skb_header(nskb, skb);
2571 		nskb->mac_len = skb->mac_len;
2572 
2573 		/* nskb and skb might have different headroom */
2574 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
2575 			nskb->csum_start += skb_headroom(nskb) - headroom;
2576 
2577 		skb_reset_mac_header(nskb);
2578 		skb_set_network_header(nskb, skb->mac_len);
2579 		nskb->transport_header = (nskb->network_header +
2580 					  skb_network_header_len(skb));
2581 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2582 
2583 		if (fskb != skb_shinfo(skb)->frag_list)
2584 			continue;
2585 
2586 		if (!sg) {
2587 			nskb->ip_summed = CHECKSUM_NONE;
2588 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2589 							    skb_put(nskb, len),
2590 							    len, 0);
2591 			continue;
2592 		}
2593 
2594 		frag = skb_shinfo(nskb)->frags;
2595 
2596 		skb_copy_from_linear_data_offset(skb, offset,
2597 						 skb_put(nskb, hsize), hsize);
2598 
2599 		while (pos < offset + len && i < nfrags) {
2600 			*frag = skb_shinfo(skb)->frags[i];
2601 			get_page(frag->page);
2602 			size = frag->size;
2603 
2604 			if (pos < offset) {
2605 				frag->page_offset += offset - pos;
2606 				frag->size -= offset - pos;
2607 			}
2608 
2609 			skb_shinfo(nskb)->nr_frags++;
2610 
2611 			if (pos + size <= offset + len) {
2612 				i++;
2613 				pos += size;
2614 			} else {
2615 				frag->size -= pos + size - (offset + len);
2616 				goto skip_fraglist;
2617 			}
2618 
2619 			frag++;
2620 		}
2621 
2622 		if (pos < offset + len) {
2623 			struct sk_buff *fskb2 = fskb;
2624 
2625 			BUG_ON(pos + fskb->len != offset + len);
2626 
2627 			pos += fskb->len;
2628 			fskb = fskb->next;
2629 
2630 			if (fskb2->next) {
2631 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2632 				if (!fskb2)
2633 					goto err;
2634 			} else
2635 				skb_get(fskb2);
2636 
2637 			SKB_FRAG_ASSERT(nskb);
2638 			skb_shinfo(nskb)->frag_list = fskb2;
2639 		}
2640 
2641 skip_fraglist:
2642 		nskb->data_len = len - hsize;
2643 		nskb->len += nskb->data_len;
2644 		nskb->truesize += nskb->data_len;
2645 	} while ((offset += len) < skb->len);
2646 
2647 	return segs;
2648 
2649 err:
2650 	while ((skb = segs)) {
2651 		segs = skb->next;
2652 		kfree_skb(skb);
2653 	}
2654 	return ERR_PTR(err);
2655 }
2656 EXPORT_SYMBOL_GPL(skb_segment);
2657 
skb_gro_receive(struct sk_buff ** head,struct sk_buff * skb)2658 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2659 {
2660 	struct sk_buff *p = *head;
2661 	struct sk_buff *nskb;
2662 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2663 	struct skb_shared_info *pinfo = skb_shinfo(p);
2664 	unsigned int headroom;
2665 	unsigned int len = skb_gro_len(skb);
2666 	unsigned int offset = skb_gro_offset(skb);
2667 	unsigned int headlen = skb_headlen(skb);
2668 
2669 	if (p->len + len >= 65536)
2670 		return -E2BIG;
2671 
2672 	if (pinfo->frag_list)
2673 		goto merge;
2674 	else if (headlen <= offset) {
2675 		skb_frag_t *frag;
2676 		skb_frag_t *frag2;
2677 		int i = skbinfo->nr_frags;
2678 		int nr_frags = pinfo->nr_frags + i;
2679 
2680 		offset -= headlen;
2681 
2682 		if (nr_frags > MAX_SKB_FRAGS)
2683 			return -E2BIG;
2684 
2685 		pinfo->nr_frags = nr_frags;
2686 		skbinfo->nr_frags = 0;
2687 
2688 		frag = pinfo->frags + nr_frags;
2689 		frag2 = skbinfo->frags + i;
2690 		do {
2691 			*--frag = *--frag2;
2692 		} while (--i);
2693 
2694 		frag->page_offset += offset;
2695 		frag->size -= offset;
2696 
2697 		skb->truesize -= skb->data_len;
2698 		skb->len -= skb->data_len;
2699 		skb->data_len = 0;
2700 
2701 		NAPI_GRO_CB(skb)->free = 1;
2702 		goto done;
2703 	} else if (skb_gro_len(p) != pinfo->gso_size)
2704 		return -E2BIG;
2705 
2706 	headroom = skb_headroom(p);
2707 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2708 	if (unlikely(!nskb))
2709 		return -ENOMEM;
2710 
2711 	__copy_skb_header(nskb, p);
2712 	nskb->mac_len = p->mac_len;
2713 
2714 	skb_reserve(nskb, headroom);
2715 	__skb_put(nskb, skb_gro_offset(p));
2716 
2717 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2718 	skb_set_network_header(nskb, skb_network_offset(p));
2719 	skb_set_transport_header(nskb, skb_transport_offset(p));
2720 
2721 	__skb_pull(p, skb_gro_offset(p));
2722 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2723 	       p->data - skb_mac_header(p));
2724 
2725 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2726 	skb_shinfo(nskb)->frag_list = p;
2727 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2728 	pinfo->gso_size = 0;
2729 	skb_header_release(p);
2730 	nskb->prev = p;
2731 
2732 	nskb->data_len += p->len;
2733 	nskb->truesize += p->len;
2734 	nskb->len += p->len;
2735 
2736 	*head = nskb;
2737 	nskb->next = p->next;
2738 	p->next = NULL;
2739 
2740 	p = nskb;
2741 
2742 merge:
2743 	if (offset > headlen) {
2744 		unsigned int eat = offset - headlen;
2745 
2746 		skbinfo->frags[0].page_offset += eat;
2747 		skbinfo->frags[0].size -= eat;
2748 		skb->data_len -= eat;
2749 		skb->len -= eat;
2750 		offset = headlen;
2751 	}
2752 
2753 	__skb_pull(skb, offset);
2754 
2755 	p->prev->next = skb;
2756 	p->prev = skb;
2757 	skb_header_release(skb);
2758 
2759 done:
2760 	NAPI_GRO_CB(p)->count++;
2761 	p->data_len += len;
2762 	p->truesize += len;
2763 	p->len += len;
2764 
2765 	NAPI_GRO_CB(skb)->same_flow = 1;
2766 	return 0;
2767 }
2768 EXPORT_SYMBOL_GPL(skb_gro_receive);
2769 
skb_init(void)2770 void __init skb_init(void)
2771 {
2772 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2773 					      sizeof(struct sk_buff),
2774 					      0,
2775 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2776 					      NULL);
2777 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2778 						(2*sizeof(struct sk_buff)) +
2779 						sizeof(atomic_t),
2780 						0,
2781 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2782 						NULL);
2783 }
2784 
2785 /**
2786  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2787  *	@skb: Socket buffer containing the buffers to be mapped
2788  *	@sg: The scatter-gather list to map into
2789  *	@offset: The offset into the buffer's contents to start mapping
2790  *	@len: Length of buffer space to be mapped
2791  *
2792  *	Fill the specified scatter-gather list with mappings/pointers into a
2793  *	region of the buffer space attached to a socket buffer.
2794  */
2795 static int
__skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)2796 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2797 {
2798 	int start = skb_headlen(skb);
2799 	int i, copy = start - offset;
2800 	struct sk_buff *frag_iter;
2801 	int elt = 0;
2802 
2803 	if (copy > 0) {
2804 		if (copy > len)
2805 			copy = len;
2806 		sg_set_buf(sg, skb->data + offset, copy);
2807 		elt++;
2808 		if ((len -= copy) == 0)
2809 			return elt;
2810 		offset += copy;
2811 	}
2812 
2813 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2814 		int end;
2815 
2816 		WARN_ON(start > offset + len);
2817 
2818 		end = start + skb_shinfo(skb)->frags[i].size;
2819 		if ((copy = end - offset) > 0) {
2820 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2821 
2822 			if (copy > len)
2823 				copy = len;
2824 			sg_set_page(&sg[elt], frag->page, copy,
2825 					frag->page_offset+offset-start);
2826 			elt++;
2827 			if (!(len -= copy))
2828 				return elt;
2829 			offset += copy;
2830 		}
2831 		start = end;
2832 	}
2833 
2834 	skb_walk_frags(skb, frag_iter) {
2835 		int end;
2836 
2837 		WARN_ON(start > offset + len);
2838 
2839 		end = start + frag_iter->len;
2840 		if ((copy = end - offset) > 0) {
2841 			if (copy > len)
2842 				copy = len;
2843 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2844 					      copy);
2845 			if ((len -= copy) == 0)
2846 				return elt;
2847 			offset += copy;
2848 		}
2849 		start = end;
2850 	}
2851 	BUG_ON(len);
2852 	return elt;
2853 }
2854 
skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)2855 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2856 {
2857 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
2858 
2859 	sg_mark_end(&sg[nsg - 1]);
2860 
2861 	return nsg;
2862 }
2863 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2864 
2865 /**
2866  *	skb_cow_data - Check that a socket buffer's data buffers are writable
2867  *	@skb: The socket buffer to check.
2868  *	@tailbits: Amount of trailing space to be added
2869  *	@trailer: Returned pointer to the skb where the @tailbits space begins
2870  *
2871  *	Make sure that the data buffers attached to a socket buffer are
2872  *	writable. If they are not, private copies are made of the data buffers
2873  *	and the socket buffer is set to use these instead.
2874  *
2875  *	If @tailbits is given, make sure that there is space to write @tailbits
2876  *	bytes of data beyond current end of socket buffer.  @trailer will be
2877  *	set to point to the skb in which this space begins.
2878  *
2879  *	The number of scatterlist elements required to completely map the
2880  *	COW'd and extended socket buffer will be returned.
2881  */
skb_cow_data(struct sk_buff * skb,int tailbits,struct sk_buff ** trailer)2882 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2883 {
2884 	int copyflag;
2885 	int elt;
2886 	struct sk_buff *skb1, **skb_p;
2887 
2888 	/* If skb is cloned or its head is paged, reallocate
2889 	 * head pulling out all the pages (pages are considered not writable
2890 	 * at the moment even if they are anonymous).
2891 	 */
2892 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2893 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2894 		return -ENOMEM;
2895 
2896 	/* Easy case. Most of packets will go this way. */
2897 	if (!skb_has_frag_list(skb)) {
2898 		/* A little of trouble, not enough of space for trailer.
2899 		 * This should not happen, when stack is tuned to generate
2900 		 * good frames. OK, on miss we reallocate and reserve even more
2901 		 * space, 128 bytes is fair. */
2902 
2903 		if (skb_tailroom(skb) < tailbits &&
2904 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2905 			return -ENOMEM;
2906 
2907 		/* Voila! */
2908 		*trailer = skb;
2909 		return 1;
2910 	}
2911 
2912 	/* Misery. We are in troubles, going to mincer fragments... */
2913 
2914 	elt = 1;
2915 	skb_p = &skb_shinfo(skb)->frag_list;
2916 	copyflag = 0;
2917 
2918 	while ((skb1 = *skb_p) != NULL) {
2919 		int ntail = 0;
2920 
2921 		/* The fragment is partially pulled by someone,
2922 		 * this can happen on input. Copy it and everything
2923 		 * after it. */
2924 
2925 		if (skb_shared(skb1))
2926 			copyflag = 1;
2927 
2928 		/* If the skb is the last, worry about trailer. */
2929 
2930 		if (skb1->next == NULL && tailbits) {
2931 			if (skb_shinfo(skb1)->nr_frags ||
2932 			    skb_has_frag_list(skb1) ||
2933 			    skb_tailroom(skb1) < tailbits)
2934 				ntail = tailbits + 128;
2935 		}
2936 
2937 		if (copyflag ||
2938 		    skb_cloned(skb1) ||
2939 		    ntail ||
2940 		    skb_shinfo(skb1)->nr_frags ||
2941 		    skb_has_frag_list(skb1)) {
2942 			struct sk_buff *skb2;
2943 
2944 			/* Fuck, we are miserable poor guys... */
2945 			if (ntail == 0)
2946 				skb2 = skb_copy(skb1, GFP_ATOMIC);
2947 			else
2948 				skb2 = skb_copy_expand(skb1,
2949 						       skb_headroom(skb1),
2950 						       ntail,
2951 						       GFP_ATOMIC);
2952 			if (unlikely(skb2 == NULL))
2953 				return -ENOMEM;
2954 
2955 			if (skb1->sk)
2956 				skb_set_owner_w(skb2, skb1->sk);
2957 
2958 			/* Looking around. Are we still alive?
2959 			 * OK, link new skb, drop old one */
2960 
2961 			skb2->next = skb1->next;
2962 			*skb_p = skb2;
2963 			kfree_skb(skb1);
2964 			skb1 = skb2;
2965 		}
2966 		elt++;
2967 		*trailer = skb1;
2968 		skb_p = &skb1->next;
2969 	}
2970 
2971 	return elt;
2972 }
2973 EXPORT_SYMBOL_GPL(skb_cow_data);
2974 
sock_rmem_free(struct sk_buff * skb)2975 static void sock_rmem_free(struct sk_buff *skb)
2976 {
2977 	struct sock *sk = skb->sk;
2978 
2979 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2980 }
2981 
2982 /*
2983  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2984  */
sock_queue_err_skb(struct sock * sk,struct sk_buff * skb)2985 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2986 {
2987 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2988 	    (unsigned)sk->sk_rcvbuf)
2989 		return -ENOMEM;
2990 
2991 	skb_orphan(skb);
2992 	skb->sk = sk;
2993 	skb->destructor = sock_rmem_free;
2994 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2995 
2996 	skb_queue_tail(&sk->sk_error_queue, skb);
2997 	if (!sock_flag(sk, SOCK_DEAD))
2998 		sk->sk_data_ready(sk, skb->len);
2999 	return 0;
3000 }
3001 EXPORT_SYMBOL(sock_queue_err_skb);
3002 
skb_tstamp_tx(struct sk_buff * orig_skb,struct skb_shared_hwtstamps * hwtstamps)3003 void skb_tstamp_tx(struct sk_buff *orig_skb,
3004 		struct skb_shared_hwtstamps *hwtstamps)
3005 {
3006 	struct sock *sk = orig_skb->sk;
3007 	struct sock_exterr_skb *serr;
3008 	struct sk_buff *skb;
3009 	int err;
3010 
3011 	if (!sk)
3012 		return;
3013 
3014 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3015 	if (!skb)
3016 		return;
3017 
3018 	if (hwtstamps) {
3019 		*skb_hwtstamps(skb) =
3020 			*hwtstamps;
3021 	} else {
3022 		/*
3023 		 * no hardware time stamps available,
3024 		 * so keep the shared tx_flags and only
3025 		 * store software time stamp
3026 		 */
3027 		skb->tstamp = ktime_get_real();
3028 	}
3029 
3030 	serr = SKB_EXT_ERR(skb);
3031 	memset(serr, 0, sizeof(*serr));
3032 	serr->ee.ee_errno = ENOMSG;
3033 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3034 
3035 	err = sock_queue_err_skb(sk, skb);
3036 
3037 	if (err)
3038 		kfree_skb(skb);
3039 }
3040 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3041 
3042 
3043 /**
3044  * skb_partial_csum_set - set up and verify partial csum values for packet
3045  * @skb: the skb to set
3046  * @start: the number of bytes after skb->data to start checksumming.
3047  * @off: the offset from start to place the checksum.
3048  *
3049  * For untrusted partially-checksummed packets, we need to make sure the values
3050  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3051  *
3052  * This function checks and sets those values and skb->ip_summed: if this
3053  * returns false you should drop the packet.
3054  */
skb_partial_csum_set(struct sk_buff * skb,u16 start,u16 off)3055 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3056 {
3057 	if (unlikely(start > skb_headlen(skb)) ||
3058 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3059 		if (net_ratelimit())
3060 			printk(KERN_WARNING
3061 			       "bad partial csum: csum=%u/%u len=%u\n",
3062 			       start, off, skb_headlen(skb));
3063 		return false;
3064 	}
3065 	skb->ip_summed = CHECKSUM_PARTIAL;
3066 	skb->csum_start = skb_headroom(skb) + start;
3067 	skb->csum_offset = off;
3068 	return true;
3069 }
3070 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3071 
__skb_warn_lro_forwarding(const struct sk_buff * skb)3072 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3073 {
3074 	if (net_ratelimit())
3075 		pr_warning("%s: received packets cannot be forwarded"
3076 			   " while LRO is enabled\n", skb->dev->name);
3077 }
3078 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3079