1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 
7 #define MAX_GRO_SKBS 8
8 
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11 
12 static DEFINE_SPINLOCK(offload_lock);
13 struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16 
17 /**
18  *	dev_add_offload - register offload handlers
19  *	@po: protocol offload declaration
20  *
21  *	Add protocol offload handlers to the networking stack. The passed
22  *	&proto_offload is linked into kernel lists and may not be freed until
23  *	it has been removed from the kernel lists.
24  *
25  *	This call does not sleep therefore it can not
26  *	guarantee all CPU's that are in middle of receiving packets
27  *	will see the new offload handlers (until the next received packet).
28  */
dev_add_offload(struct packet_offload * po)29 void dev_add_offload(struct packet_offload *po)
30 {
31 	struct packet_offload *elem;
32 
33 	spin_lock(&offload_lock);
34 	list_for_each_entry(elem, &offload_base, list) {
35 		if (po->priority < elem->priority)
36 			break;
37 	}
38 	list_add_rcu(&po->list, elem->list.prev);
39 	spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42 
43 /**
44  *	__dev_remove_offload	 - remove offload handler
45  *	@po: packet offload declaration
46  *
47  *	Remove a protocol offload handler that was previously added to the
48  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *	is removed from the kernel lists and can be freed or reused once this
50  *	function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *	and must not be freed until after all the CPU's have gone
54  *	through a quiescent state.
55  */
__dev_remove_offload(struct packet_offload * po)56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58 	struct list_head *head = &offload_base;
59 	struct packet_offload *po1;
60 
61 	spin_lock(&offload_lock);
62 
63 	list_for_each_entry(po1, head, list) {
64 		if (po == po1) {
65 			list_del_rcu(&po->list);
66 			goto out;
67 		}
68 	}
69 
70 	pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72 	spin_unlock(&offload_lock);
73 }
74 
75 /**
76  *	dev_remove_offload	 - remove packet offload handler
77  *	@po: packet offload declaration
78  *
79  *	Remove a packet offload handler that was previously added to the kernel
80  *	offload handlers by dev_add_offload(). The passed &offload_type is
81  *	removed from the kernel lists and can be freed or reused once this
82  *	function returns.
83  *
84  *	This call sleeps to guarantee that no CPU is looking at the packet
85  *	type after return.
86  */
dev_remove_offload(struct packet_offload * po)87 void dev_remove_offload(struct packet_offload *po)
88 {
89 	__dev_remove_offload(po);
90 
91 	synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94 
95 
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)96 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
97 {
98 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
99 	unsigned int offset = skb_gro_offset(skb);
100 	unsigned int headlen = skb_headlen(skb);
101 	unsigned int len = skb_gro_len(skb);
102 	unsigned int delta_truesize;
103 	unsigned int gro_max_size;
104 	unsigned int new_truesize;
105 	struct sk_buff *lp;
106 	int segs;
107 
108 	/* Do not splice page pool based packets w/ non-page pool
109 	 * packets. This can result in reference count issues as page
110 	 * pool pages will not decrement the reference count and will
111 	 * instead be immediately returned to the pool or have frag
112 	 * count decremented.
113 	 */
114 	if (p->pp_recycle != skb->pp_recycle)
115 		return -ETOOMANYREFS;
116 
117 	/* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
118 	gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
119 			READ_ONCE(p->dev->gro_max_size) :
120 			READ_ONCE(p->dev->gro_ipv4_max_size);
121 
122 	if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
123 		return -E2BIG;
124 
125 	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
126 		if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
127 		    (p->protocol == htons(ETH_P_IPV6) &&
128 		     skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
129 		    p->encapsulation)
130 			return -E2BIG;
131 	}
132 
133 	segs = NAPI_GRO_CB(skb)->count;
134 	lp = NAPI_GRO_CB(p)->last;
135 	pinfo = skb_shinfo(lp);
136 
137 	if (headlen <= offset) {
138 		skb_frag_t *frag;
139 		skb_frag_t *frag2;
140 		int i = skbinfo->nr_frags;
141 		int nr_frags = pinfo->nr_frags + i;
142 
143 		if (nr_frags > MAX_SKB_FRAGS)
144 			goto merge;
145 
146 		offset -= headlen;
147 		pinfo->nr_frags = nr_frags;
148 		skbinfo->nr_frags = 0;
149 
150 		frag = pinfo->frags + nr_frags;
151 		frag2 = skbinfo->frags + i;
152 		do {
153 			*--frag = *--frag2;
154 		} while (--i);
155 
156 		skb_frag_off_add(frag, offset);
157 		skb_frag_size_sub(frag, offset);
158 
159 		/* all fragments truesize : remove (head size + sk_buff) */
160 		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
161 		delta_truesize = skb->truesize - new_truesize;
162 
163 		skb->truesize = new_truesize;
164 		skb->len -= skb->data_len;
165 		skb->data_len = 0;
166 
167 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
168 		goto done;
169 	} else if (skb->head_frag) {
170 		int nr_frags = pinfo->nr_frags;
171 		skb_frag_t *frag = pinfo->frags + nr_frags;
172 		struct page *page = virt_to_head_page(skb->head);
173 		unsigned int first_size = headlen - offset;
174 		unsigned int first_offset;
175 
176 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
177 			goto merge;
178 
179 		first_offset = skb->data -
180 			       (unsigned char *)page_address(page) +
181 			       offset;
182 
183 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
184 
185 		skb_frag_fill_page_desc(frag, page, first_offset, first_size);
186 
187 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
188 		/* We dont need to clear skbinfo->nr_frags here */
189 
190 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
191 		delta_truesize = skb->truesize - new_truesize;
192 		skb->truesize = new_truesize;
193 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
194 		goto done;
195 	}
196 
197 merge:
198 	/* sk owenrship - if any - completely transferred to the aggregated packet */
199 	skb->destructor = NULL;
200 	delta_truesize = skb->truesize;
201 	if (offset > headlen) {
202 		unsigned int eat = offset - headlen;
203 
204 		skb_frag_off_add(&skbinfo->frags[0], eat);
205 		skb_frag_size_sub(&skbinfo->frags[0], eat);
206 		skb->data_len -= eat;
207 		skb->len -= eat;
208 		offset = headlen;
209 	}
210 
211 	__skb_pull(skb, offset);
212 
213 	if (NAPI_GRO_CB(p)->last == p)
214 		skb_shinfo(p)->frag_list = skb;
215 	else
216 		NAPI_GRO_CB(p)->last->next = skb;
217 	NAPI_GRO_CB(p)->last = skb;
218 	__skb_header_release(skb);
219 	lp = p;
220 
221 done:
222 	NAPI_GRO_CB(p)->count += segs;
223 	p->data_len += len;
224 	p->truesize += delta_truesize;
225 	p->len += len;
226 	if (lp != p) {
227 		lp->data_len += len;
228 		lp->truesize += delta_truesize;
229 		lp->len += len;
230 	}
231 	NAPI_GRO_CB(skb)->same_flow = 1;
232 	return 0;
233 }
234 
235 
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)236 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
237 {
238 	struct packet_offload *ptype;
239 	__be16 type = skb->protocol;
240 	struct list_head *head = &offload_base;
241 	int err = -ENOENT;
242 
243 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
244 
245 	if (NAPI_GRO_CB(skb)->count == 1) {
246 		skb_shinfo(skb)->gso_size = 0;
247 		goto out;
248 	}
249 
250 	rcu_read_lock();
251 	list_for_each_entry_rcu(ptype, head, list) {
252 		if (ptype->type != type || !ptype->callbacks.gro_complete)
253 			continue;
254 
255 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
256 					 ipv6_gro_complete, inet_gro_complete,
257 					 skb, 0);
258 		break;
259 	}
260 	rcu_read_unlock();
261 
262 	if (err) {
263 		WARN_ON(&ptype->list == head);
264 		kfree_skb(skb);
265 		return;
266 	}
267 
268 out:
269 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
270 }
271 
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)272 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
273 				   bool flush_old)
274 {
275 	struct list_head *head = &napi->gro_hash[index].list;
276 	struct sk_buff *skb, *p;
277 
278 	list_for_each_entry_safe_reverse(skb, p, head, list) {
279 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
280 			return;
281 		skb_list_del_init(skb);
282 		napi_gro_complete(napi, skb);
283 		napi->gro_hash[index].count--;
284 	}
285 
286 	if (!napi->gro_hash[index].count)
287 		__clear_bit(index, &napi->gro_bitmask);
288 }
289 
290 /* napi->gro_hash[].list contains packets ordered by age.
291  * youngest packets at the head of it.
292  * Complete skbs in reverse order to reduce latencies.
293  */
napi_gro_flush(struct napi_struct * napi,bool flush_old)294 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
295 {
296 	unsigned long bitmask = napi->gro_bitmask;
297 	unsigned int i, base = ~0U;
298 
299 	while ((i = ffs(bitmask)) != 0) {
300 		bitmask >>= i;
301 		base += i;
302 		__napi_gro_flush_chain(napi, base, flush_old);
303 	}
304 }
305 EXPORT_SYMBOL(napi_gro_flush);
306 
gro_list_prepare_tc_ext(const struct sk_buff * skb,const struct sk_buff * p,unsigned long diffs)307 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
308 					     const struct sk_buff *p,
309 					     unsigned long diffs)
310 {
311 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
312 	struct tc_skb_ext *skb_ext;
313 	struct tc_skb_ext *p_ext;
314 
315 	skb_ext = skb_ext_find(skb, TC_SKB_EXT);
316 	p_ext = skb_ext_find(p, TC_SKB_EXT);
317 
318 	diffs |= (!!p_ext) ^ (!!skb_ext);
319 	if (!diffs && unlikely(skb_ext))
320 		diffs |= p_ext->chain ^ skb_ext->chain;
321 #endif
322 	return diffs;
323 }
324 
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)325 static void gro_list_prepare(const struct list_head *head,
326 			     const struct sk_buff *skb)
327 {
328 	unsigned int maclen = skb->dev->hard_header_len;
329 	u32 hash = skb_get_hash_raw(skb);
330 	struct sk_buff *p;
331 
332 	list_for_each_entry(p, head, list) {
333 		unsigned long diffs;
334 
335 		NAPI_GRO_CB(p)->flush = 0;
336 
337 		if (hash != skb_get_hash_raw(p)) {
338 			NAPI_GRO_CB(p)->same_flow = 0;
339 			continue;
340 		}
341 
342 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
343 		diffs |= p->vlan_all ^ skb->vlan_all;
344 		diffs |= skb_metadata_differs(p, skb);
345 		if (maclen == ETH_HLEN)
346 			diffs |= compare_ether_header(skb_mac_header(p),
347 						      skb_mac_header(skb));
348 		else if (!diffs)
349 			diffs = memcmp(skb_mac_header(p),
350 				       skb_mac_header(skb),
351 				       maclen);
352 
353 		/* in most common scenarions 'slow_gro' is 0
354 		 * otherwise we are already on some slower paths
355 		 * either skip all the infrequent tests altogether or
356 		 * avoid trying too hard to skip each of them individually
357 		 */
358 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
359 			diffs |= p->sk != skb->sk;
360 			diffs |= skb_metadata_dst_cmp(p, skb);
361 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
362 
363 			diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
364 		}
365 
366 		NAPI_GRO_CB(p)->same_flow = !diffs;
367 	}
368 }
369 
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)370 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
371 {
372 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
373 	const skb_frag_t *frag0 = &pinfo->frags[0];
374 
375 	NAPI_GRO_CB(skb)->data_offset = 0;
376 	NAPI_GRO_CB(skb)->frag0 = NULL;
377 	NAPI_GRO_CB(skb)->frag0_len = 0;
378 
379 	if (!skb_headlen(skb) && pinfo->nr_frags &&
380 	    !PageHighMem(skb_frag_page(frag0)) &&
381 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
382 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
383 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
384 						    skb_frag_size(frag0),
385 						    skb->end - skb->tail);
386 	}
387 }
388 
gro_pull_from_frag0(struct sk_buff * skb,int grow)389 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
390 {
391 	struct skb_shared_info *pinfo = skb_shinfo(skb);
392 
393 	BUG_ON(skb->end - skb->tail < grow);
394 
395 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
396 
397 	skb->data_len -= grow;
398 	skb->tail += grow;
399 
400 	skb_frag_off_add(&pinfo->frags[0], grow);
401 	skb_frag_size_sub(&pinfo->frags[0], grow);
402 
403 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
404 		skb_frag_unref(skb, 0);
405 		memmove(pinfo->frags, pinfo->frags + 1,
406 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
407 	}
408 }
409 
gro_try_pull_from_frag0(struct sk_buff * skb)410 static void gro_try_pull_from_frag0(struct sk_buff *skb)
411 {
412 	int grow = skb_gro_offset(skb) - skb_headlen(skb);
413 
414 	if (grow > 0)
415 		gro_pull_from_frag0(skb, grow);
416 }
417 
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)418 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
419 {
420 	struct sk_buff *oldest;
421 
422 	oldest = list_last_entry(head, struct sk_buff, list);
423 
424 	/* We are called with head length >= MAX_GRO_SKBS, so this is
425 	 * impossible.
426 	 */
427 	if (WARN_ON_ONCE(!oldest))
428 		return;
429 
430 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
431 	 * SKB to the chain.
432 	 */
433 	skb_list_del_init(oldest);
434 	napi_gro_complete(napi, oldest);
435 }
436 
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)437 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
438 {
439 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
440 	struct gro_list *gro_list = &napi->gro_hash[bucket];
441 	struct list_head *head = &offload_base;
442 	struct packet_offload *ptype;
443 	__be16 type = skb->protocol;
444 	struct sk_buff *pp = NULL;
445 	enum gro_result ret;
446 	int same_flow;
447 
448 	if (netif_elide_gro(skb->dev))
449 		goto normal;
450 
451 	gro_list_prepare(&gro_list->list, skb);
452 
453 	rcu_read_lock();
454 	list_for_each_entry_rcu(ptype, head, list) {
455 		if (ptype->type == type && ptype->callbacks.gro_receive)
456 			goto found_ptype;
457 	}
458 	rcu_read_unlock();
459 	goto normal;
460 
461 found_ptype:
462 	skb_set_network_header(skb, skb_gro_offset(skb));
463 	skb_reset_mac_len(skb);
464 	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
465 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
466 					sizeof(u32))); /* Avoid slow unaligned acc */
467 	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
468 	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
469 	NAPI_GRO_CB(skb)->is_atomic = 1;
470 	NAPI_GRO_CB(skb)->count = 1;
471 	if (unlikely(skb_is_gso(skb))) {
472 		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
473 		/* Only support TCP and non DODGY users. */
474 		if (!skb_is_gso_tcp(skb) ||
475 		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
476 			NAPI_GRO_CB(skb)->flush = 1;
477 	}
478 
479 	/* Setup for GRO checksum validation */
480 	switch (skb->ip_summed) {
481 	case CHECKSUM_COMPLETE:
482 		NAPI_GRO_CB(skb)->csum = skb->csum;
483 		NAPI_GRO_CB(skb)->csum_valid = 1;
484 		break;
485 	case CHECKSUM_UNNECESSARY:
486 		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
487 		break;
488 	}
489 
490 	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
491 				ipv6_gro_receive, inet_gro_receive,
492 				&gro_list->list, skb);
493 
494 	rcu_read_unlock();
495 
496 	if (PTR_ERR(pp) == -EINPROGRESS) {
497 		ret = GRO_CONSUMED;
498 		goto ok;
499 	}
500 
501 	same_flow = NAPI_GRO_CB(skb)->same_flow;
502 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
503 
504 	if (pp) {
505 		skb_list_del_init(pp);
506 		napi_gro_complete(napi, pp);
507 		gro_list->count--;
508 	}
509 
510 	if (same_flow)
511 		goto ok;
512 
513 	if (NAPI_GRO_CB(skb)->flush)
514 		goto normal;
515 
516 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
517 		gro_flush_oldest(napi, &gro_list->list);
518 	else
519 		gro_list->count++;
520 
521 	/* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
522 	gro_try_pull_from_frag0(skb);
523 	NAPI_GRO_CB(skb)->age = jiffies;
524 	NAPI_GRO_CB(skb)->last = skb;
525 	if (!skb_is_gso(skb))
526 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
527 	list_add(&skb->list, &gro_list->list);
528 	ret = GRO_HELD;
529 ok:
530 	if (gro_list->count) {
531 		if (!test_bit(bucket, &napi->gro_bitmask))
532 			__set_bit(bucket, &napi->gro_bitmask);
533 	} else if (test_bit(bucket, &napi->gro_bitmask)) {
534 		__clear_bit(bucket, &napi->gro_bitmask);
535 	}
536 
537 	return ret;
538 
539 normal:
540 	ret = GRO_NORMAL;
541 	gro_try_pull_from_frag0(skb);
542 	goto ok;
543 }
544 
gro_find_receive_by_type(__be16 type)545 struct packet_offload *gro_find_receive_by_type(__be16 type)
546 {
547 	struct list_head *offload_head = &offload_base;
548 	struct packet_offload *ptype;
549 
550 	list_for_each_entry_rcu(ptype, offload_head, list) {
551 		if (ptype->type != type || !ptype->callbacks.gro_receive)
552 			continue;
553 		return ptype;
554 	}
555 	return NULL;
556 }
557 EXPORT_SYMBOL(gro_find_receive_by_type);
558 
gro_find_complete_by_type(__be16 type)559 struct packet_offload *gro_find_complete_by_type(__be16 type)
560 {
561 	struct list_head *offload_head = &offload_base;
562 	struct packet_offload *ptype;
563 
564 	list_for_each_entry_rcu(ptype, offload_head, list) {
565 		if (ptype->type != type || !ptype->callbacks.gro_complete)
566 			continue;
567 		return ptype;
568 	}
569 	return NULL;
570 }
571 EXPORT_SYMBOL(gro_find_complete_by_type);
572 
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)573 static gro_result_t napi_skb_finish(struct napi_struct *napi,
574 				    struct sk_buff *skb,
575 				    gro_result_t ret)
576 {
577 	switch (ret) {
578 	case GRO_NORMAL:
579 		gro_normal_one(napi, skb, 1);
580 		break;
581 
582 	case GRO_MERGED_FREE:
583 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
584 			napi_skb_free_stolen_head(skb);
585 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
586 			__kfree_skb(skb);
587 		else
588 			__napi_kfree_skb(skb, SKB_CONSUMED);
589 		break;
590 
591 	case GRO_HELD:
592 	case GRO_MERGED:
593 	case GRO_CONSUMED:
594 		break;
595 	}
596 
597 	return ret;
598 }
599 
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)600 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
601 {
602 	gro_result_t ret;
603 
604 	skb_mark_napi_id(skb, napi);
605 	trace_napi_gro_receive_entry(skb);
606 
607 	skb_gro_reset_offset(skb, 0);
608 
609 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
610 	trace_napi_gro_receive_exit(ret);
611 
612 	return ret;
613 }
614 EXPORT_SYMBOL(napi_gro_receive);
615 
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)616 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
617 {
618 	if (unlikely(skb->pfmemalloc)) {
619 		consume_skb(skb);
620 		return;
621 	}
622 	__skb_pull(skb, skb_headlen(skb));
623 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
624 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
625 	__vlan_hwaccel_clear_tag(skb);
626 	skb->dev = napi->dev;
627 	skb->skb_iif = 0;
628 
629 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
630 	skb->pkt_type = PACKET_HOST;
631 
632 	skb->encapsulation = 0;
633 	skb_shinfo(skb)->gso_type = 0;
634 	skb_shinfo(skb)->gso_size = 0;
635 	if (unlikely(skb->slow_gro)) {
636 		skb_orphan(skb);
637 		skb_ext_reset(skb);
638 		nf_reset_ct(skb);
639 		skb->slow_gro = 0;
640 	}
641 
642 	napi->skb = skb;
643 }
644 
napi_get_frags(struct napi_struct * napi)645 struct sk_buff *napi_get_frags(struct napi_struct *napi)
646 {
647 	struct sk_buff *skb = napi->skb;
648 
649 	if (!skb) {
650 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
651 		if (skb) {
652 			napi->skb = skb;
653 			skb_mark_napi_id(skb, napi);
654 		}
655 	}
656 	return skb;
657 }
658 EXPORT_SYMBOL(napi_get_frags);
659 
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)660 static gro_result_t napi_frags_finish(struct napi_struct *napi,
661 				      struct sk_buff *skb,
662 				      gro_result_t ret)
663 {
664 	switch (ret) {
665 	case GRO_NORMAL:
666 	case GRO_HELD:
667 		__skb_push(skb, ETH_HLEN);
668 		skb->protocol = eth_type_trans(skb, skb->dev);
669 		if (ret == GRO_NORMAL)
670 			gro_normal_one(napi, skb, 1);
671 		break;
672 
673 	case GRO_MERGED_FREE:
674 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
675 			napi_skb_free_stolen_head(skb);
676 		else
677 			napi_reuse_skb(napi, skb);
678 		break;
679 
680 	case GRO_MERGED:
681 	case GRO_CONSUMED:
682 		break;
683 	}
684 
685 	return ret;
686 }
687 
688 /* Upper GRO stack assumes network header starts at gro_offset=0
689  * Drivers could call both napi_gro_frags() and napi_gro_receive()
690  * We copy ethernet header into skb->data to have a common layout.
691  */
napi_frags_skb(struct napi_struct * napi)692 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
693 {
694 	struct sk_buff *skb = napi->skb;
695 	const struct ethhdr *eth;
696 	unsigned int hlen = sizeof(*eth);
697 
698 	napi->skb = NULL;
699 
700 	skb_reset_mac_header(skb);
701 	skb_gro_reset_offset(skb, hlen);
702 
703 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
704 		eth = skb_gro_header_slow(skb, hlen, 0);
705 		if (unlikely(!eth)) {
706 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
707 					     __func__, napi->dev->name);
708 			napi_reuse_skb(napi, skb);
709 			return NULL;
710 		}
711 	} else {
712 		eth = (const struct ethhdr *)skb->data;
713 		gro_pull_from_frag0(skb, hlen);
714 		NAPI_GRO_CB(skb)->frag0 += hlen;
715 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
716 	}
717 	__skb_pull(skb, hlen);
718 
719 	/*
720 	 * This works because the only protocols we care about don't require
721 	 * special handling.
722 	 * We'll fix it up properly in napi_frags_finish()
723 	 */
724 	skb->protocol = eth->h_proto;
725 
726 	return skb;
727 }
728 
napi_gro_frags(struct napi_struct * napi)729 gro_result_t napi_gro_frags(struct napi_struct *napi)
730 {
731 	gro_result_t ret;
732 	struct sk_buff *skb = napi_frags_skb(napi);
733 
734 	trace_napi_gro_frags_entry(skb);
735 
736 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
737 	trace_napi_gro_frags_exit(ret);
738 
739 	return ret;
740 }
741 EXPORT_SYMBOL(napi_gro_frags);
742 
743 /* Compute the checksum from gro_offset and return the folded value
744  * after adding in any pseudo checksum.
745  */
__skb_gro_checksum_complete(struct sk_buff * skb)746 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
747 {
748 	__wsum wsum;
749 	__sum16 sum;
750 
751 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
752 
753 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
754 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
755 	/* See comments in __skb_checksum_complete(). */
756 	if (likely(!sum)) {
757 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
758 		    !skb->csum_complete_sw)
759 			netdev_rx_csum_fault(skb->dev, skb);
760 	}
761 
762 	NAPI_GRO_CB(skb)->csum = wsum;
763 	NAPI_GRO_CB(skb)->csum_valid = 1;
764 
765 	return sum;
766 }
767 EXPORT_SYMBOL(__skb_gro_checksum_complete);
768