1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6 #include <linux/bpf.h>
7 #include <linux/filter.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <linux/bug.h>
15 #include <net/page_pool.h>
16
17 #include <net/xdp.h>
18 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19 #include <trace/events/xdp.h>
20 #include <net/xdp_sock_drv.h>
21
22 #define REG_STATE_NEW 0x0
23 #define REG_STATE_REGISTERED 0x1
24 #define REG_STATE_UNREGISTERED 0x2
25 #define REG_STATE_UNUSED 0x3
26
27 static DEFINE_IDA(mem_id_pool);
28 static DEFINE_MUTEX(mem_id_lock);
29 #define MEM_ID_MAX 0xFFFE
30 #define MEM_ID_MIN 1
31 static int mem_id_next = MEM_ID_MIN;
32
33 static bool mem_id_init; /* false */
34 static struct rhashtable *mem_id_ht;
35
xdp_mem_id_hashfn(const void * data,u32 len,u32 seed)36 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37 {
38 const u32 *k = data;
39 const u32 key = *k;
40
41 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
42 != sizeof(u32));
43
44 /* Use cyclic increasing ID as direct hash key */
45 return key;
46 }
47
xdp_mem_id_cmp(struct rhashtable_compare_arg * arg,const void * ptr)48 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49 const void *ptr)
50 {
51 const struct xdp_mem_allocator *xa = ptr;
52 u32 mem_id = *(u32 *)arg->key;
53
54 return xa->mem.id != mem_id;
55 }
56
57 static const struct rhashtable_params mem_id_rht_params = {
58 .nelem_hint = 64,
59 .head_offset = offsetof(struct xdp_mem_allocator, node),
60 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
61 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
62 .max_size = MEM_ID_MAX,
63 .min_size = 8,
64 .automatic_shrinking = true,
65 .hashfn = xdp_mem_id_hashfn,
66 .obj_cmpfn = xdp_mem_id_cmp,
67 };
68
__xdp_mem_allocator_rcu_free(struct rcu_head * rcu)69 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70 {
71 struct xdp_mem_allocator *xa;
72
73 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74
75 /* Allow this ID to be reused */
76 ida_simple_remove(&mem_id_pool, xa->mem.id);
77
78 kfree(xa);
79 }
80
mem_xa_remove(struct xdp_mem_allocator * xa)81 static void mem_xa_remove(struct xdp_mem_allocator *xa)
82 {
83 trace_mem_disconnect(xa);
84
85 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
87 }
88
mem_allocator_disconnect(void * allocator)89 static void mem_allocator_disconnect(void *allocator)
90 {
91 struct xdp_mem_allocator *xa;
92 struct rhashtable_iter iter;
93
94 mutex_lock(&mem_id_lock);
95
96 rhashtable_walk_enter(mem_id_ht, &iter);
97 do {
98 rhashtable_walk_start(&iter);
99
100 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101 if (xa->allocator == allocator)
102 mem_xa_remove(xa);
103 }
104
105 rhashtable_walk_stop(&iter);
106
107 } while (xa == ERR_PTR(-EAGAIN));
108 rhashtable_walk_exit(&iter);
109
110 mutex_unlock(&mem_id_lock);
111 }
112
xdp_unreg_mem_model(struct xdp_mem_info * mem)113 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
114 {
115 struct xdp_mem_allocator *xa;
116 int type = mem->type;
117 int id = mem->id;
118
119 /* Reset mem info to defaults */
120 mem->id = 0;
121 mem->type = 0;
122
123 if (id == 0)
124 return;
125
126 if (type == MEM_TYPE_PAGE_POOL) {
127 rcu_read_lock();
128 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
129 page_pool_destroy(xa->page_pool);
130 rcu_read_unlock();
131 }
132 }
133 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
134
xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info * xdp_rxq)135 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
136 {
137 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
138 WARN(1, "Missing register, driver bug");
139 return;
140 }
141
142 xdp_unreg_mem_model(&xdp_rxq->mem);
143 }
144 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
145
xdp_rxq_info_unreg(struct xdp_rxq_info * xdp_rxq)146 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
147 {
148 /* Simplify driver cleanup code paths, allow unreg "unused" */
149 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
150 return;
151
152 xdp_rxq_info_unreg_mem_model(xdp_rxq);
153
154 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
155 xdp_rxq->dev = NULL;
156 }
157 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
158
xdp_rxq_info_init(struct xdp_rxq_info * xdp_rxq)159 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
160 {
161 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
162 }
163
164 /* Returns 0 on success, negative on failure */
__xdp_rxq_info_reg(struct xdp_rxq_info * xdp_rxq,struct net_device * dev,u32 queue_index,unsigned int napi_id,u32 frag_size)165 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
166 struct net_device *dev, u32 queue_index,
167 unsigned int napi_id, u32 frag_size)
168 {
169 if (!dev) {
170 WARN(1, "Missing net_device from driver");
171 return -ENODEV;
172 }
173
174 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
175 WARN(1, "Driver promised not to register this");
176 return -EINVAL;
177 }
178
179 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
180 WARN(1, "Missing unregister, handled but fix driver");
181 xdp_rxq_info_unreg(xdp_rxq);
182 }
183
184 /* State either UNREGISTERED or NEW */
185 xdp_rxq_info_init(xdp_rxq);
186 xdp_rxq->dev = dev;
187 xdp_rxq->queue_index = queue_index;
188 xdp_rxq->napi_id = napi_id;
189 xdp_rxq->frag_size = frag_size;
190
191 xdp_rxq->reg_state = REG_STATE_REGISTERED;
192 return 0;
193 }
194 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
195
xdp_rxq_info_unused(struct xdp_rxq_info * xdp_rxq)196 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
197 {
198 xdp_rxq->reg_state = REG_STATE_UNUSED;
199 }
200 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
201
xdp_rxq_info_is_reg(struct xdp_rxq_info * xdp_rxq)202 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
203 {
204 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
205 }
206 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
207
__mem_id_init_hash_table(void)208 static int __mem_id_init_hash_table(void)
209 {
210 struct rhashtable *rht;
211 int ret;
212
213 if (unlikely(mem_id_init))
214 return 0;
215
216 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
217 if (!rht)
218 return -ENOMEM;
219
220 ret = rhashtable_init(rht, &mem_id_rht_params);
221 if (ret < 0) {
222 kfree(rht);
223 return ret;
224 }
225 mem_id_ht = rht;
226 smp_mb(); /* mutex lock should provide enough pairing */
227 mem_id_init = true;
228
229 return 0;
230 }
231
232 /* Allocate a cyclic ID that maps to allocator pointer.
233 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
234 *
235 * Caller must lock mem_id_lock.
236 */
__mem_id_cyclic_get(gfp_t gfp)237 static int __mem_id_cyclic_get(gfp_t gfp)
238 {
239 int retries = 1;
240 int id;
241
242 again:
243 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
244 if (id < 0) {
245 if (id == -ENOSPC) {
246 /* Cyclic allocator, reset next id */
247 if (retries--) {
248 mem_id_next = MEM_ID_MIN;
249 goto again;
250 }
251 }
252 return id; /* errno */
253 }
254 mem_id_next = id + 1;
255
256 return id;
257 }
258
__is_supported_mem_type(enum xdp_mem_type type)259 static bool __is_supported_mem_type(enum xdp_mem_type type)
260 {
261 if (type == MEM_TYPE_PAGE_POOL)
262 return is_page_pool_compiled_in();
263
264 if (type >= MEM_TYPE_MAX)
265 return false;
266
267 return true;
268 }
269
__xdp_reg_mem_model(struct xdp_mem_info * mem,enum xdp_mem_type type,void * allocator)270 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
271 enum xdp_mem_type type,
272 void *allocator)
273 {
274 struct xdp_mem_allocator *xdp_alloc;
275 gfp_t gfp = GFP_KERNEL;
276 int id, errno, ret;
277 void *ptr;
278
279 if (!__is_supported_mem_type(type))
280 return ERR_PTR(-EOPNOTSUPP);
281
282 mem->type = type;
283
284 if (!allocator) {
285 if (type == MEM_TYPE_PAGE_POOL)
286 return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
287 return NULL;
288 }
289
290 /* Delay init of rhashtable to save memory if feature isn't used */
291 if (!mem_id_init) {
292 mutex_lock(&mem_id_lock);
293 ret = __mem_id_init_hash_table();
294 mutex_unlock(&mem_id_lock);
295 if (ret < 0) {
296 WARN_ON(1);
297 return ERR_PTR(ret);
298 }
299 }
300
301 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
302 if (!xdp_alloc)
303 return ERR_PTR(-ENOMEM);
304
305 mutex_lock(&mem_id_lock);
306 id = __mem_id_cyclic_get(gfp);
307 if (id < 0) {
308 errno = id;
309 goto err;
310 }
311 mem->id = id;
312 xdp_alloc->mem = *mem;
313 xdp_alloc->allocator = allocator;
314
315 /* Insert allocator into ID lookup table */
316 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
317 if (IS_ERR(ptr)) {
318 ida_simple_remove(&mem_id_pool, mem->id);
319 mem->id = 0;
320 errno = PTR_ERR(ptr);
321 goto err;
322 }
323
324 if (type == MEM_TYPE_PAGE_POOL)
325 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
326
327 mutex_unlock(&mem_id_lock);
328
329 return xdp_alloc;
330 err:
331 mutex_unlock(&mem_id_lock);
332 kfree(xdp_alloc);
333 return ERR_PTR(errno);
334 }
335
xdp_reg_mem_model(struct xdp_mem_info * mem,enum xdp_mem_type type,void * allocator)336 int xdp_reg_mem_model(struct xdp_mem_info *mem,
337 enum xdp_mem_type type, void *allocator)
338 {
339 struct xdp_mem_allocator *xdp_alloc;
340
341 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
342 if (IS_ERR(xdp_alloc))
343 return PTR_ERR(xdp_alloc);
344 return 0;
345 }
346 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
347
xdp_rxq_info_reg_mem_model(struct xdp_rxq_info * xdp_rxq,enum xdp_mem_type type,void * allocator)348 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
349 enum xdp_mem_type type, void *allocator)
350 {
351 struct xdp_mem_allocator *xdp_alloc;
352
353 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
354 WARN(1, "Missing register, driver bug");
355 return -EFAULT;
356 }
357
358 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
359 if (IS_ERR(xdp_alloc))
360 return PTR_ERR(xdp_alloc);
361
362 if (trace_mem_connect_enabled() && xdp_alloc)
363 trace_mem_connect(xdp_alloc, xdp_rxq);
364 return 0;
365 }
366
367 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
368
369 /* XDP RX runs under NAPI protection, and in different delivery error
370 * scenarios (e.g. queue full), it is possible to return the xdp_frame
371 * while still leveraging this protection. The @napi_direct boolean
372 * is used for those calls sites. Thus, allowing for faster recycling
373 * of xdp_frames/pages in those cases.
374 */
__xdp_return(void * data,struct xdp_mem_info * mem,bool napi_direct,struct xdp_buff * xdp)375 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
376 struct xdp_buff *xdp)
377 {
378 struct page *page;
379
380 switch (mem->type) {
381 case MEM_TYPE_PAGE_POOL:
382 page = virt_to_head_page(data);
383 if (napi_direct && xdp_return_frame_no_direct())
384 napi_direct = false;
385 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
386 * as mem->type knows this a page_pool page
387 */
388 page_pool_put_full_page(page->pp, page, napi_direct);
389 break;
390 case MEM_TYPE_PAGE_SHARED:
391 page_frag_free(data);
392 break;
393 case MEM_TYPE_PAGE_ORDER0:
394 page = virt_to_page(data); /* Assumes order0 page*/
395 put_page(page);
396 break;
397 case MEM_TYPE_XSK_BUFF_POOL:
398 /* NB! Only valid from an xdp_buff! */
399 xsk_buff_free(xdp);
400 break;
401 default:
402 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
403 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
404 break;
405 }
406 }
407
xdp_return_frame(struct xdp_frame * xdpf)408 void xdp_return_frame(struct xdp_frame *xdpf)
409 {
410 struct skb_shared_info *sinfo;
411 int i;
412
413 if (likely(!xdp_frame_has_frags(xdpf)))
414 goto out;
415
416 sinfo = xdp_get_shared_info_from_frame(xdpf);
417 for (i = 0; i < sinfo->nr_frags; i++) {
418 struct page *page = skb_frag_page(&sinfo->frags[i]);
419
420 __xdp_return(page_address(page), &xdpf->mem, false, NULL);
421 }
422 out:
423 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
424 }
425 EXPORT_SYMBOL_GPL(xdp_return_frame);
426
xdp_return_frame_rx_napi(struct xdp_frame * xdpf)427 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
428 {
429 struct skb_shared_info *sinfo;
430 int i;
431
432 if (likely(!xdp_frame_has_frags(xdpf)))
433 goto out;
434
435 sinfo = xdp_get_shared_info_from_frame(xdpf);
436 for (i = 0; i < sinfo->nr_frags; i++) {
437 struct page *page = skb_frag_page(&sinfo->frags[i]);
438
439 __xdp_return(page_address(page), &xdpf->mem, true, NULL);
440 }
441 out:
442 __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
443 }
444 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
445
446 /* XDP bulk APIs introduce a defer/flush mechanism to return
447 * pages belonging to the same xdp_mem_allocator object
448 * (identified via the mem.id field) in bulk to optimize
449 * I-cache and D-cache.
450 * The bulk queue size is set to 16 to be aligned to how
451 * XDP_REDIRECT bulking works. The bulk is flushed when
452 * it is full or when mem.id changes.
453 * xdp_frame_bulk is usually stored/allocated on the function
454 * call-stack to avoid locking penalties.
455 */
xdp_flush_frame_bulk(struct xdp_frame_bulk * bq)456 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
457 {
458 struct xdp_mem_allocator *xa = bq->xa;
459
460 if (unlikely(!xa || !bq->count))
461 return;
462
463 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
464 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
465 bq->count = 0;
466 }
467 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
468
469 /* Must be called with rcu_read_lock held */
xdp_return_frame_bulk(struct xdp_frame * xdpf,struct xdp_frame_bulk * bq)470 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
471 struct xdp_frame_bulk *bq)
472 {
473 struct xdp_mem_info *mem = &xdpf->mem;
474 struct xdp_mem_allocator *xa;
475
476 if (mem->type != MEM_TYPE_PAGE_POOL) {
477 xdp_return_frame(xdpf);
478 return;
479 }
480
481 xa = bq->xa;
482 if (unlikely(!xa)) {
483 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
484 bq->count = 0;
485 bq->xa = xa;
486 }
487
488 if (bq->count == XDP_BULK_QUEUE_SIZE)
489 xdp_flush_frame_bulk(bq);
490
491 if (unlikely(mem->id != xa->mem.id)) {
492 xdp_flush_frame_bulk(bq);
493 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
494 }
495
496 if (unlikely(xdp_frame_has_frags(xdpf))) {
497 struct skb_shared_info *sinfo;
498 int i;
499
500 sinfo = xdp_get_shared_info_from_frame(xdpf);
501 for (i = 0; i < sinfo->nr_frags; i++) {
502 skb_frag_t *frag = &sinfo->frags[i];
503
504 bq->q[bq->count++] = skb_frag_address(frag);
505 if (bq->count == XDP_BULK_QUEUE_SIZE)
506 xdp_flush_frame_bulk(bq);
507 }
508 }
509 bq->q[bq->count++] = xdpf->data;
510 }
511 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
512
xdp_return_buff(struct xdp_buff * xdp)513 void xdp_return_buff(struct xdp_buff *xdp)
514 {
515 struct skb_shared_info *sinfo;
516 int i;
517
518 if (likely(!xdp_buff_has_frags(xdp)))
519 goto out;
520
521 sinfo = xdp_get_shared_info_from_buff(xdp);
522 for (i = 0; i < sinfo->nr_frags; i++) {
523 struct page *page = skb_frag_page(&sinfo->frags[i]);
524
525 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
526 }
527 out:
528 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
529 }
530 EXPORT_SYMBOL_GPL(xdp_return_buff);
531
532 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
__xdp_release_frame(void * data,struct xdp_mem_info * mem)533 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
534 {
535 struct xdp_mem_allocator *xa;
536 struct page *page;
537
538 rcu_read_lock();
539 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
540 page = virt_to_head_page(data);
541 if (xa)
542 page_pool_release_page(xa->page_pool, page);
543 rcu_read_unlock();
544 }
545 EXPORT_SYMBOL_GPL(__xdp_release_frame);
546
xdp_attachment_setup(struct xdp_attachment_info * info,struct netdev_bpf * bpf)547 void xdp_attachment_setup(struct xdp_attachment_info *info,
548 struct netdev_bpf *bpf)
549 {
550 if (info->prog)
551 bpf_prog_put(info->prog);
552 info->prog = bpf->prog;
553 info->flags = bpf->flags;
554 }
555 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
556
xdp_convert_zc_to_xdp_frame(struct xdp_buff * xdp)557 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
558 {
559 unsigned int metasize, totsize;
560 void *addr, *data_to_copy;
561 struct xdp_frame *xdpf;
562 struct page *page;
563
564 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
565 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
566 xdp->data - xdp->data_meta;
567 totsize = xdp->data_end - xdp->data + metasize;
568
569 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
570 return NULL;
571
572 page = dev_alloc_page();
573 if (!page)
574 return NULL;
575
576 addr = page_to_virt(page);
577 xdpf = addr;
578 memset(xdpf, 0, sizeof(*xdpf));
579
580 addr += sizeof(*xdpf);
581 data_to_copy = metasize ? xdp->data_meta : xdp->data;
582 memcpy(addr, data_to_copy, totsize);
583
584 xdpf->data = addr + metasize;
585 xdpf->len = totsize - metasize;
586 xdpf->headroom = 0;
587 xdpf->metasize = metasize;
588 xdpf->frame_sz = PAGE_SIZE;
589 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
590
591 xsk_buff_free(xdp);
592 return xdpf;
593 }
594 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
595
596 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
xdp_warn(const char * msg,const char * func,const int line)597 void xdp_warn(const char *msg, const char *func, const int line)
598 {
599 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
600 };
601 EXPORT_SYMBOL_GPL(xdp_warn);
602
xdp_alloc_skb_bulk(void ** skbs,int n_skb,gfp_t gfp)603 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
604 {
605 n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
606 n_skb, skbs);
607 if (unlikely(!n_skb))
608 return -ENOMEM;
609
610 return 0;
611 }
612 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
613
__xdp_build_skb_from_frame(struct xdp_frame * xdpf,struct sk_buff * skb,struct net_device * dev)614 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
615 struct sk_buff *skb,
616 struct net_device *dev)
617 {
618 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
619 unsigned int headroom, frame_size;
620 void *hard_start;
621 u8 nr_frags;
622
623 /* xdp frags frame */
624 if (unlikely(xdp_frame_has_frags(xdpf)))
625 nr_frags = sinfo->nr_frags;
626
627 /* Part of headroom was reserved to xdpf */
628 headroom = sizeof(*xdpf) + xdpf->headroom;
629
630 /* Memory size backing xdp_frame data already have reserved
631 * room for build_skb to place skb_shared_info in tailroom.
632 */
633 frame_size = xdpf->frame_sz;
634
635 hard_start = xdpf->data - headroom;
636 skb = build_skb_around(skb, hard_start, frame_size);
637 if (unlikely(!skb))
638 return NULL;
639
640 skb_reserve(skb, headroom);
641 __skb_put(skb, xdpf->len);
642 if (xdpf->metasize)
643 skb_metadata_set(skb, xdpf->metasize);
644
645 if (unlikely(xdp_frame_has_frags(xdpf)))
646 xdp_update_skb_shared_info(skb, nr_frags,
647 sinfo->xdp_frags_size,
648 nr_frags * xdpf->frame_sz,
649 xdp_frame_is_frag_pfmemalloc(xdpf));
650
651 /* Essential SKB info: protocol and skb->dev */
652 skb->protocol = eth_type_trans(skb, dev);
653
654 /* Optional SKB info, currently missing:
655 * - HW checksum info (skb->ip_summed)
656 * - HW RX hash (skb_set_hash)
657 * - RX ring dev queue index (skb_record_rx_queue)
658 */
659
660 /* Until page_pool get SKB return path, release DMA here */
661 xdp_release_frame(xdpf);
662
663 /* Allow SKB to reuse area used by xdp_frame */
664 xdp_scrub_frame(xdpf);
665
666 return skb;
667 }
668 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
669
xdp_build_skb_from_frame(struct xdp_frame * xdpf,struct net_device * dev)670 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
671 struct net_device *dev)
672 {
673 struct sk_buff *skb;
674
675 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
676 if (unlikely(!skb))
677 return NULL;
678
679 memset(skb, 0, offsetof(struct sk_buff, tail));
680
681 return __xdp_build_skb_from_frame(xdpf, skb, dev);
682 }
683 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
684
xdpf_clone(struct xdp_frame * xdpf)685 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
686 {
687 unsigned int headroom, totalsize;
688 struct xdp_frame *nxdpf;
689 struct page *page;
690 void *addr;
691
692 headroom = xdpf->headroom + sizeof(*xdpf);
693 totalsize = headroom + xdpf->len;
694
695 if (unlikely(totalsize > PAGE_SIZE))
696 return NULL;
697 page = dev_alloc_page();
698 if (!page)
699 return NULL;
700 addr = page_to_virt(page);
701
702 memcpy(addr, xdpf, totalsize);
703
704 nxdpf = addr;
705 nxdpf->data = addr + headroom;
706 nxdpf->frame_sz = PAGE_SIZE;
707 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
708 nxdpf->mem.id = 0;
709
710 return nxdpf;
711 }
712