1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16
17 /**
18 * dev_add_offload - register offload handlers
19 * @po: protocol offload declaration
20 *
21 * Add protocol offload handlers to the networking stack. The passed
22 * &proto_offload is linked into kernel lists and may not be freed until
23 * it has been removed from the kernel lists.
24 *
25 * This call does not sleep therefore it can not
26 * guarantee all CPU's that are in middle of receiving packets
27 * will see the new offload handlers (until the next received packet).
28 */
dev_add_offload(struct packet_offload * po)29 void dev_add_offload(struct packet_offload *po)
30 {
31 struct packet_offload *elem;
32
33 spin_lock(&offload_lock);
34 list_for_each_entry(elem, &offload_base, list) {
35 if (po->priority < elem->priority)
36 break;
37 }
38 list_add_rcu(&po->list, elem->list.prev);
39 spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42
43 /**
44 * __dev_remove_offload - remove offload handler
45 * @po: packet offload declaration
46 *
47 * Remove a protocol offload handler that was previously added to the
48 * kernel offload handlers by dev_add_offload(). The passed &offload_type
49 * is removed from the kernel lists and can be freed or reused once this
50 * function returns.
51 *
52 * The packet type might still be in use by receivers
53 * and must not be freed until after all the CPU's have gone
54 * through a quiescent state.
55 */
__dev_remove_offload(struct packet_offload * po)56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58 struct list_head *head = &offload_base;
59 struct packet_offload *po1;
60
61 spin_lock(&offload_lock);
62
63 list_for_each_entry(po1, head, list) {
64 if (po == po1) {
65 list_del_rcu(&po->list);
66 goto out;
67 }
68 }
69
70 pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72 spin_unlock(&offload_lock);
73 }
74
75 /**
76 * dev_remove_offload - remove packet offload handler
77 * @po: packet offload declaration
78 *
79 * Remove a packet offload handler that was previously added to the kernel
80 * offload handlers by dev_add_offload(). The passed &offload_type is
81 * removed from the kernel lists and can be freed or reused once this
82 * function returns.
83 *
84 * This call sleeps to guarantee that no CPU is looking at the packet
85 * type after return.
86 */
dev_remove_offload(struct packet_offload * po)87 void dev_remove_offload(struct packet_offload *po)
88 {
89 __dev_remove_offload(po);
90
91 synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94
95 /**
96 * skb_eth_gso_segment - segmentation handler for ethernet protocols.
97 * @skb: buffer to segment
98 * @features: features for the output path (see dev->features)
99 * @type: Ethernet Protocol ID
100 */
skb_eth_gso_segment(struct sk_buff * skb,netdev_features_t features,__be16 type)101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102 netdev_features_t features, __be16 type)
103 {
104 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105 struct packet_offload *ptype;
106
107 rcu_read_lock();
108 list_for_each_entry_rcu(ptype, &offload_base, list) {
109 if (ptype->type == type && ptype->callbacks.gso_segment) {
110 segs = ptype->callbacks.gso_segment(skb, features);
111 break;
112 }
113 }
114 rcu_read_unlock();
115
116 return segs;
117 }
118 EXPORT_SYMBOL(skb_eth_gso_segment);
119
120 /**
121 * skb_mac_gso_segment - mac layer segmentation handler.
122 * @skb: buffer to segment
123 * @features: features for the output path (see dev->features)
124 */
skb_mac_gso_segment(struct sk_buff * skb,netdev_features_t features)125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126 netdev_features_t features)
127 {
128 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129 struct packet_offload *ptype;
130 int vlan_depth = skb->mac_len;
131 __be16 type = skb_network_protocol(skb, &vlan_depth);
132
133 if (unlikely(!type))
134 return ERR_PTR(-EINVAL);
135
136 __skb_pull(skb, vlan_depth);
137
138 rcu_read_lock();
139 list_for_each_entry_rcu(ptype, &offload_base, list) {
140 if (ptype->type == type && ptype->callbacks.gso_segment) {
141 segs = ptype->callbacks.gso_segment(skb, features);
142 break;
143 }
144 }
145 rcu_read_unlock();
146
147 __skb_push(skb, skb->data - skb_mac_header(skb));
148
149 return segs;
150 }
151 EXPORT_SYMBOL(skb_mac_gso_segment);
152
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
154 {
155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156 unsigned int offset = skb_gro_offset(skb);
157 unsigned int headlen = skb_headlen(skb);
158 unsigned int len = skb_gro_len(skb);
159 unsigned int delta_truesize;
160 unsigned int gro_max_size;
161 unsigned int new_truesize;
162 struct sk_buff *lp;
163 int segs;
164
165 /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
166 gro_max_size = READ_ONCE(p->dev->gro_max_size);
167
168 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
169 return -E2BIG;
170
171 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
172 if (p->protocol != htons(ETH_P_IPV6) ||
173 skb_headroom(p) < sizeof(struct hop_jumbo_hdr) ||
174 ipv6_hdr(p)->nexthdr != IPPROTO_TCP ||
175 p->encapsulation)
176 return -E2BIG;
177 }
178
179 segs = NAPI_GRO_CB(skb)->count;
180 lp = NAPI_GRO_CB(p)->last;
181 pinfo = skb_shinfo(lp);
182
183 if (headlen <= offset) {
184 skb_frag_t *frag;
185 skb_frag_t *frag2;
186 int i = skbinfo->nr_frags;
187 int nr_frags = pinfo->nr_frags + i;
188
189 if (nr_frags > MAX_SKB_FRAGS)
190 goto merge;
191
192 offset -= headlen;
193 pinfo->nr_frags = nr_frags;
194 skbinfo->nr_frags = 0;
195
196 frag = pinfo->frags + nr_frags;
197 frag2 = skbinfo->frags + i;
198 do {
199 *--frag = *--frag2;
200 } while (--i);
201
202 skb_frag_off_add(frag, offset);
203 skb_frag_size_sub(frag, offset);
204
205 /* all fragments truesize : remove (head size + sk_buff) */
206 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
207 delta_truesize = skb->truesize - new_truesize;
208
209 skb->truesize = new_truesize;
210 skb->len -= skb->data_len;
211 skb->data_len = 0;
212
213 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
214 goto done;
215 } else if (skb->head_frag) {
216 int nr_frags = pinfo->nr_frags;
217 skb_frag_t *frag = pinfo->frags + nr_frags;
218 struct page *page = virt_to_head_page(skb->head);
219 unsigned int first_size = headlen - offset;
220 unsigned int first_offset;
221
222 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
223 goto merge;
224
225 first_offset = skb->data -
226 (unsigned char *)page_address(page) +
227 offset;
228
229 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
230
231 __skb_frag_set_page(frag, page);
232 skb_frag_off_set(frag, first_offset);
233 skb_frag_size_set(frag, first_size);
234
235 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
236 /* We dont need to clear skbinfo->nr_frags here */
237
238 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
239 delta_truesize = skb->truesize - new_truesize;
240 skb->truesize = new_truesize;
241 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
242 goto done;
243 }
244
245 merge:
246 /* sk owenrship - if any - completely transferred to the aggregated packet */
247 skb->destructor = NULL;
248 delta_truesize = skb->truesize;
249 if (offset > headlen) {
250 unsigned int eat = offset - headlen;
251
252 skb_frag_off_add(&skbinfo->frags[0], eat);
253 skb_frag_size_sub(&skbinfo->frags[0], eat);
254 skb->data_len -= eat;
255 skb->len -= eat;
256 offset = headlen;
257 }
258
259 __skb_pull(skb, offset);
260
261 if (NAPI_GRO_CB(p)->last == p)
262 skb_shinfo(p)->frag_list = skb;
263 else
264 NAPI_GRO_CB(p)->last->next = skb;
265 NAPI_GRO_CB(p)->last = skb;
266 __skb_header_release(skb);
267 lp = p;
268
269 done:
270 NAPI_GRO_CB(p)->count += segs;
271 p->data_len += len;
272 p->truesize += delta_truesize;
273 p->len += len;
274 if (lp != p) {
275 lp->data_len += len;
276 lp->truesize += delta_truesize;
277 lp->len += len;
278 }
279 NAPI_GRO_CB(skb)->same_flow = 1;
280 return 0;
281 }
282
283
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)284 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
285 {
286 struct packet_offload *ptype;
287 __be16 type = skb->protocol;
288 struct list_head *head = &offload_base;
289 int err = -ENOENT;
290
291 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
292
293 if (NAPI_GRO_CB(skb)->count == 1) {
294 skb_shinfo(skb)->gso_size = 0;
295 goto out;
296 }
297
298 rcu_read_lock();
299 list_for_each_entry_rcu(ptype, head, list) {
300 if (ptype->type != type || !ptype->callbacks.gro_complete)
301 continue;
302
303 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
304 ipv6_gro_complete, inet_gro_complete,
305 skb, 0);
306 break;
307 }
308 rcu_read_unlock();
309
310 if (err) {
311 WARN_ON(&ptype->list == head);
312 kfree_skb(skb);
313 return;
314 }
315
316 out:
317 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
318 }
319
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)320 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
321 bool flush_old)
322 {
323 struct list_head *head = &napi->gro_hash[index].list;
324 struct sk_buff *skb, *p;
325
326 list_for_each_entry_safe_reverse(skb, p, head, list) {
327 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
328 return;
329 skb_list_del_init(skb);
330 napi_gro_complete(napi, skb);
331 napi->gro_hash[index].count--;
332 }
333
334 if (!napi->gro_hash[index].count)
335 __clear_bit(index, &napi->gro_bitmask);
336 }
337
338 /* napi->gro_hash[].list contains packets ordered by age.
339 * youngest packets at the head of it.
340 * Complete skbs in reverse order to reduce latencies.
341 */
napi_gro_flush(struct napi_struct * napi,bool flush_old)342 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
343 {
344 unsigned long bitmask = napi->gro_bitmask;
345 unsigned int i, base = ~0U;
346
347 while ((i = ffs(bitmask)) != 0) {
348 bitmask >>= i;
349 base += i;
350 __napi_gro_flush_chain(napi, base, flush_old);
351 }
352 }
353 EXPORT_SYMBOL(napi_gro_flush);
354
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)355 static void gro_list_prepare(const struct list_head *head,
356 const struct sk_buff *skb)
357 {
358 unsigned int maclen = skb->dev->hard_header_len;
359 u32 hash = skb_get_hash_raw(skb);
360 struct sk_buff *p;
361
362 list_for_each_entry(p, head, list) {
363 unsigned long diffs;
364
365 NAPI_GRO_CB(p)->flush = 0;
366
367 if (hash != skb_get_hash_raw(p)) {
368 NAPI_GRO_CB(p)->same_flow = 0;
369 continue;
370 }
371
372 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
373 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
374 if (skb_vlan_tag_present(p))
375 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
376 diffs |= skb_metadata_differs(p, skb);
377 if (maclen == ETH_HLEN)
378 diffs |= compare_ether_header(skb_mac_header(p),
379 skb_mac_header(skb));
380 else if (!diffs)
381 diffs = memcmp(skb_mac_header(p),
382 skb_mac_header(skb),
383 maclen);
384
385 /* in most common scenarions 'slow_gro' is 0
386 * otherwise we are already on some slower paths
387 * either skip all the infrequent tests altogether or
388 * avoid trying too hard to skip each of them individually
389 */
390 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
391 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
392 struct tc_skb_ext *skb_ext;
393 struct tc_skb_ext *p_ext;
394 #endif
395
396 diffs |= p->sk != skb->sk;
397 diffs |= skb_metadata_dst_cmp(p, skb);
398 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
399
400 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
401 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
402 p_ext = skb_ext_find(p, TC_SKB_EXT);
403
404 diffs |= (!!p_ext) ^ (!!skb_ext);
405 if (!diffs && unlikely(skb_ext))
406 diffs |= p_ext->chain ^ skb_ext->chain;
407 #endif
408 }
409
410 NAPI_GRO_CB(p)->same_flow = !diffs;
411 }
412 }
413
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)414 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
415 {
416 const struct skb_shared_info *pinfo = skb_shinfo(skb);
417 const skb_frag_t *frag0 = &pinfo->frags[0];
418
419 NAPI_GRO_CB(skb)->data_offset = 0;
420 NAPI_GRO_CB(skb)->frag0 = NULL;
421 NAPI_GRO_CB(skb)->frag0_len = 0;
422
423 if (!skb_headlen(skb) && pinfo->nr_frags &&
424 !PageHighMem(skb_frag_page(frag0)) &&
425 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
426 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
427 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
428 skb_frag_size(frag0),
429 skb->end - skb->tail);
430 }
431 }
432
gro_pull_from_frag0(struct sk_buff * skb,int grow)433 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
434 {
435 struct skb_shared_info *pinfo = skb_shinfo(skb);
436
437 BUG_ON(skb->end - skb->tail < grow);
438
439 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
440
441 skb->data_len -= grow;
442 skb->tail += grow;
443
444 skb_frag_off_add(&pinfo->frags[0], grow);
445 skb_frag_size_sub(&pinfo->frags[0], grow);
446
447 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
448 skb_frag_unref(skb, 0);
449 memmove(pinfo->frags, pinfo->frags + 1,
450 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
451 }
452 }
453
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)454 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
455 {
456 struct sk_buff *oldest;
457
458 oldest = list_last_entry(head, struct sk_buff, list);
459
460 /* We are called with head length >= MAX_GRO_SKBS, so this is
461 * impossible.
462 */
463 if (WARN_ON_ONCE(!oldest))
464 return;
465
466 /* Do not adjust napi->gro_hash[].count, caller is adding a new
467 * SKB to the chain.
468 */
469 skb_list_del_init(oldest);
470 napi_gro_complete(napi, oldest);
471 }
472
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)473 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
474 {
475 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
476 struct gro_list *gro_list = &napi->gro_hash[bucket];
477 struct list_head *head = &offload_base;
478 struct packet_offload *ptype;
479 __be16 type = skb->protocol;
480 struct sk_buff *pp = NULL;
481 enum gro_result ret;
482 int same_flow;
483 int grow;
484
485 if (netif_elide_gro(skb->dev))
486 goto normal;
487
488 gro_list_prepare(&gro_list->list, skb);
489
490 rcu_read_lock();
491 list_for_each_entry_rcu(ptype, head, list) {
492 if (ptype->type == type && ptype->callbacks.gro_receive)
493 goto found_ptype;
494 }
495 rcu_read_unlock();
496 goto normal;
497
498 found_ptype:
499 skb_set_network_header(skb, skb_gro_offset(skb));
500 skb_reset_mac_len(skb);
501 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
502 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
503 sizeof(u32))); /* Avoid slow unaligned acc */
504 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
505 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
506 NAPI_GRO_CB(skb)->is_atomic = 1;
507 NAPI_GRO_CB(skb)->count = 1;
508 if (unlikely(skb_is_gso(skb))) {
509 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
510 /* Only support TCP and non DODGY users. */
511 if (!skb_is_gso_tcp(skb) ||
512 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
513 NAPI_GRO_CB(skb)->flush = 1;
514 }
515
516 /* Setup for GRO checksum validation */
517 switch (skb->ip_summed) {
518 case CHECKSUM_COMPLETE:
519 NAPI_GRO_CB(skb)->csum = skb->csum;
520 NAPI_GRO_CB(skb)->csum_valid = 1;
521 break;
522 case CHECKSUM_UNNECESSARY:
523 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
524 break;
525 }
526
527 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
528 ipv6_gro_receive, inet_gro_receive,
529 &gro_list->list, skb);
530
531 rcu_read_unlock();
532
533 if (PTR_ERR(pp) == -EINPROGRESS) {
534 ret = GRO_CONSUMED;
535 goto ok;
536 }
537
538 same_flow = NAPI_GRO_CB(skb)->same_flow;
539 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
540
541 if (pp) {
542 skb_list_del_init(pp);
543 napi_gro_complete(napi, pp);
544 gro_list->count--;
545 }
546
547 if (same_flow)
548 goto ok;
549
550 if (NAPI_GRO_CB(skb)->flush)
551 goto normal;
552
553 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
554 gro_flush_oldest(napi, &gro_list->list);
555 else
556 gro_list->count++;
557
558 NAPI_GRO_CB(skb)->age = jiffies;
559 NAPI_GRO_CB(skb)->last = skb;
560 if (!skb_is_gso(skb))
561 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
562 list_add(&skb->list, &gro_list->list);
563 ret = GRO_HELD;
564
565 pull:
566 grow = skb_gro_offset(skb) - skb_headlen(skb);
567 if (grow > 0)
568 gro_pull_from_frag0(skb, grow);
569 ok:
570 if (gro_list->count) {
571 if (!test_bit(bucket, &napi->gro_bitmask))
572 __set_bit(bucket, &napi->gro_bitmask);
573 } else if (test_bit(bucket, &napi->gro_bitmask)) {
574 __clear_bit(bucket, &napi->gro_bitmask);
575 }
576
577 return ret;
578
579 normal:
580 ret = GRO_NORMAL;
581 goto pull;
582 }
583
gro_find_receive_by_type(__be16 type)584 struct packet_offload *gro_find_receive_by_type(__be16 type)
585 {
586 struct list_head *offload_head = &offload_base;
587 struct packet_offload *ptype;
588
589 list_for_each_entry_rcu(ptype, offload_head, list) {
590 if (ptype->type != type || !ptype->callbacks.gro_receive)
591 continue;
592 return ptype;
593 }
594 return NULL;
595 }
596 EXPORT_SYMBOL(gro_find_receive_by_type);
597
gro_find_complete_by_type(__be16 type)598 struct packet_offload *gro_find_complete_by_type(__be16 type)
599 {
600 struct list_head *offload_head = &offload_base;
601 struct packet_offload *ptype;
602
603 list_for_each_entry_rcu(ptype, offload_head, list) {
604 if (ptype->type != type || !ptype->callbacks.gro_complete)
605 continue;
606 return ptype;
607 }
608 return NULL;
609 }
610 EXPORT_SYMBOL(gro_find_complete_by_type);
611
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)612 static gro_result_t napi_skb_finish(struct napi_struct *napi,
613 struct sk_buff *skb,
614 gro_result_t ret)
615 {
616 switch (ret) {
617 case GRO_NORMAL:
618 gro_normal_one(napi, skb, 1);
619 break;
620
621 case GRO_MERGED_FREE:
622 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
623 napi_skb_free_stolen_head(skb);
624 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
625 __kfree_skb(skb);
626 else
627 __kfree_skb_defer(skb);
628 break;
629
630 case GRO_HELD:
631 case GRO_MERGED:
632 case GRO_CONSUMED:
633 break;
634 }
635
636 return ret;
637 }
638
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)639 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
640 {
641 gro_result_t ret;
642
643 skb_mark_napi_id(skb, napi);
644 trace_napi_gro_receive_entry(skb);
645
646 skb_gro_reset_offset(skb, 0);
647
648 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
649 trace_napi_gro_receive_exit(ret);
650
651 return ret;
652 }
653 EXPORT_SYMBOL(napi_gro_receive);
654
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)655 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
656 {
657 if (unlikely(skb->pfmemalloc)) {
658 consume_skb(skb);
659 return;
660 }
661 __skb_pull(skb, skb_headlen(skb));
662 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
663 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
664 __vlan_hwaccel_clear_tag(skb);
665 skb->dev = napi->dev;
666 skb->skb_iif = 0;
667
668 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
669 skb->pkt_type = PACKET_HOST;
670
671 skb->encapsulation = 0;
672 skb_shinfo(skb)->gso_type = 0;
673 skb_shinfo(skb)->gso_size = 0;
674 if (unlikely(skb->slow_gro)) {
675 skb_orphan(skb);
676 skb_ext_reset(skb);
677 nf_reset_ct(skb);
678 skb->slow_gro = 0;
679 }
680
681 napi->skb = skb;
682 }
683
napi_get_frags(struct napi_struct * napi)684 struct sk_buff *napi_get_frags(struct napi_struct *napi)
685 {
686 struct sk_buff *skb = napi->skb;
687
688 if (!skb) {
689 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
690 if (skb) {
691 napi->skb = skb;
692 skb_mark_napi_id(skb, napi);
693 }
694 }
695 return skb;
696 }
697 EXPORT_SYMBOL(napi_get_frags);
698
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)699 static gro_result_t napi_frags_finish(struct napi_struct *napi,
700 struct sk_buff *skb,
701 gro_result_t ret)
702 {
703 switch (ret) {
704 case GRO_NORMAL:
705 case GRO_HELD:
706 __skb_push(skb, ETH_HLEN);
707 skb->protocol = eth_type_trans(skb, skb->dev);
708 if (ret == GRO_NORMAL)
709 gro_normal_one(napi, skb, 1);
710 break;
711
712 case GRO_MERGED_FREE:
713 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
714 napi_skb_free_stolen_head(skb);
715 else
716 napi_reuse_skb(napi, skb);
717 break;
718
719 case GRO_MERGED:
720 case GRO_CONSUMED:
721 break;
722 }
723
724 return ret;
725 }
726
727 /* Upper GRO stack assumes network header starts at gro_offset=0
728 * Drivers could call both napi_gro_frags() and napi_gro_receive()
729 * We copy ethernet header into skb->data to have a common layout.
730 */
napi_frags_skb(struct napi_struct * napi)731 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
732 {
733 struct sk_buff *skb = napi->skb;
734 const struct ethhdr *eth;
735 unsigned int hlen = sizeof(*eth);
736
737 napi->skb = NULL;
738
739 skb_reset_mac_header(skb);
740 skb_gro_reset_offset(skb, hlen);
741
742 if (unlikely(skb_gro_header_hard(skb, hlen))) {
743 eth = skb_gro_header_slow(skb, hlen, 0);
744 if (unlikely(!eth)) {
745 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
746 __func__, napi->dev->name);
747 napi_reuse_skb(napi, skb);
748 return NULL;
749 }
750 } else {
751 eth = (const struct ethhdr *)skb->data;
752 gro_pull_from_frag0(skb, hlen);
753 NAPI_GRO_CB(skb)->frag0 += hlen;
754 NAPI_GRO_CB(skb)->frag0_len -= hlen;
755 }
756 __skb_pull(skb, hlen);
757
758 /*
759 * This works because the only protocols we care about don't require
760 * special handling.
761 * We'll fix it up properly in napi_frags_finish()
762 */
763 skb->protocol = eth->h_proto;
764
765 return skb;
766 }
767
napi_gro_frags(struct napi_struct * napi)768 gro_result_t napi_gro_frags(struct napi_struct *napi)
769 {
770 gro_result_t ret;
771 struct sk_buff *skb = napi_frags_skb(napi);
772
773 trace_napi_gro_frags_entry(skb);
774
775 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
776 trace_napi_gro_frags_exit(ret);
777
778 return ret;
779 }
780 EXPORT_SYMBOL(napi_gro_frags);
781
782 /* Compute the checksum from gro_offset and return the folded value
783 * after adding in any pseudo checksum.
784 */
__skb_gro_checksum_complete(struct sk_buff * skb)785 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
786 {
787 __wsum wsum;
788 __sum16 sum;
789
790 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
791
792 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
793 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
794 /* See comments in __skb_checksum_complete(). */
795 if (likely(!sum)) {
796 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
797 !skb->csum_complete_sw)
798 netdev_rx_csum_fault(skb->dev, skb);
799 }
800
801 NAPI_GRO_CB(skb)->csum = wsum;
802 NAPI_GRO_CB(skb)->csum_valid = 1;
803
804 return sum;
805 }
806 EXPORT_SYMBOL(__skb_gro_checksum_complete);
807