1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/btf_ids.h>
9 #include <linux/filter.h>
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/idr.h>
15 #include <linux/rhashtable.h>
16 #include <linux/bug.h>
17 #include <net/page_pool/helpers.h>
18
19 #include <net/xdp.h>
20 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
21 #include <trace/events/xdp.h>
22 #include <net/xdp_sock_drv.h>
23
24 #define REG_STATE_NEW 0x0
25 #define REG_STATE_REGISTERED 0x1
26 #define REG_STATE_UNREGISTERED 0x2
27 #define REG_STATE_UNUSED 0x3
28
29 static DEFINE_IDA(mem_id_pool);
30 static DEFINE_MUTEX(mem_id_lock);
31 #define MEM_ID_MAX 0xFFFE
32 #define MEM_ID_MIN 1
33 static int mem_id_next = MEM_ID_MIN;
34
35 static bool mem_id_init; /* false */
36 static struct rhashtable *mem_id_ht;
37
xdp_mem_id_hashfn(const void * data,u32 len,u32 seed)38 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
39 {
40 const u32 *k = data;
41 const u32 key = *k;
42
43 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
44 != sizeof(u32));
45
46 /* Use cyclic increasing ID as direct hash key */
47 return key;
48 }
49
xdp_mem_id_cmp(struct rhashtable_compare_arg * arg,const void * ptr)50 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
51 const void *ptr)
52 {
53 const struct xdp_mem_allocator *xa = ptr;
54 u32 mem_id = *(u32 *)arg->key;
55
56 return xa->mem.id != mem_id;
57 }
58
59 static const struct rhashtable_params mem_id_rht_params = {
60 .nelem_hint = 64,
61 .head_offset = offsetof(struct xdp_mem_allocator, node),
62 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
63 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
64 .max_size = MEM_ID_MAX,
65 .min_size = 8,
66 .automatic_shrinking = true,
67 .hashfn = xdp_mem_id_hashfn,
68 .obj_cmpfn = xdp_mem_id_cmp,
69 };
70
__xdp_mem_allocator_rcu_free(struct rcu_head * rcu)71 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
72 {
73 struct xdp_mem_allocator *xa;
74
75 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
76
77 /* Allow this ID to be reused */
78 ida_simple_remove(&mem_id_pool, xa->mem.id);
79
80 kfree(xa);
81 }
82
mem_xa_remove(struct xdp_mem_allocator * xa)83 static void mem_xa_remove(struct xdp_mem_allocator *xa)
84 {
85 trace_mem_disconnect(xa);
86
87 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
88 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
89 }
90
mem_allocator_disconnect(void * allocator)91 static void mem_allocator_disconnect(void *allocator)
92 {
93 struct xdp_mem_allocator *xa;
94 struct rhashtable_iter iter;
95
96 mutex_lock(&mem_id_lock);
97
98 rhashtable_walk_enter(mem_id_ht, &iter);
99 do {
100 rhashtable_walk_start(&iter);
101
102 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
103 if (xa->allocator == allocator)
104 mem_xa_remove(xa);
105 }
106
107 rhashtable_walk_stop(&iter);
108
109 } while (xa == ERR_PTR(-EAGAIN));
110 rhashtable_walk_exit(&iter);
111
112 mutex_unlock(&mem_id_lock);
113 }
114
xdp_unreg_mem_model(struct xdp_mem_info * mem)115 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
116 {
117 struct xdp_mem_allocator *xa;
118 int type = mem->type;
119 int id = mem->id;
120
121 /* Reset mem info to defaults */
122 mem->id = 0;
123 mem->type = 0;
124
125 if (id == 0)
126 return;
127
128 if (type == MEM_TYPE_PAGE_POOL) {
129 rcu_read_lock();
130 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
131 page_pool_destroy(xa->page_pool);
132 rcu_read_unlock();
133 }
134 }
135 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
136
xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info * xdp_rxq)137 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
138 {
139 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
140 WARN(1, "Missing register, driver bug");
141 return;
142 }
143
144 xdp_unreg_mem_model(&xdp_rxq->mem);
145 }
146 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
147
xdp_rxq_info_unreg(struct xdp_rxq_info * xdp_rxq)148 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
149 {
150 /* Simplify driver cleanup code paths, allow unreg "unused" */
151 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
152 return;
153
154 xdp_rxq_info_unreg_mem_model(xdp_rxq);
155
156 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
157 xdp_rxq->dev = NULL;
158 }
159 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
160
xdp_rxq_info_init(struct xdp_rxq_info * xdp_rxq)161 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
162 {
163 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
164 }
165
166 /* Returns 0 on success, negative on failure */
__xdp_rxq_info_reg(struct xdp_rxq_info * xdp_rxq,struct net_device * dev,u32 queue_index,unsigned int napi_id,u32 frag_size)167 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
168 struct net_device *dev, u32 queue_index,
169 unsigned int napi_id, u32 frag_size)
170 {
171 if (!dev) {
172 WARN(1, "Missing net_device from driver");
173 return -ENODEV;
174 }
175
176 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
177 WARN(1, "Driver promised not to register this");
178 return -EINVAL;
179 }
180
181 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
182 WARN(1, "Missing unregister, handled but fix driver");
183 xdp_rxq_info_unreg(xdp_rxq);
184 }
185
186 /* State either UNREGISTERED or NEW */
187 xdp_rxq_info_init(xdp_rxq);
188 xdp_rxq->dev = dev;
189 xdp_rxq->queue_index = queue_index;
190 xdp_rxq->napi_id = napi_id;
191 xdp_rxq->frag_size = frag_size;
192
193 xdp_rxq->reg_state = REG_STATE_REGISTERED;
194 return 0;
195 }
196 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
197
xdp_rxq_info_unused(struct xdp_rxq_info * xdp_rxq)198 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
199 {
200 xdp_rxq->reg_state = REG_STATE_UNUSED;
201 }
202 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
203
xdp_rxq_info_is_reg(struct xdp_rxq_info * xdp_rxq)204 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
205 {
206 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
207 }
208 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
209
__mem_id_init_hash_table(void)210 static int __mem_id_init_hash_table(void)
211 {
212 struct rhashtable *rht;
213 int ret;
214
215 if (unlikely(mem_id_init))
216 return 0;
217
218 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
219 if (!rht)
220 return -ENOMEM;
221
222 ret = rhashtable_init(rht, &mem_id_rht_params);
223 if (ret < 0) {
224 kfree(rht);
225 return ret;
226 }
227 mem_id_ht = rht;
228 smp_mb(); /* mutex lock should provide enough pairing */
229 mem_id_init = true;
230
231 return 0;
232 }
233
234 /* Allocate a cyclic ID that maps to allocator pointer.
235 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
236 *
237 * Caller must lock mem_id_lock.
238 */
__mem_id_cyclic_get(gfp_t gfp)239 static int __mem_id_cyclic_get(gfp_t gfp)
240 {
241 int retries = 1;
242 int id;
243
244 again:
245 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
246 if (id < 0) {
247 if (id == -ENOSPC) {
248 /* Cyclic allocator, reset next id */
249 if (retries--) {
250 mem_id_next = MEM_ID_MIN;
251 goto again;
252 }
253 }
254 return id; /* errno */
255 }
256 mem_id_next = id + 1;
257
258 return id;
259 }
260
__is_supported_mem_type(enum xdp_mem_type type)261 static bool __is_supported_mem_type(enum xdp_mem_type type)
262 {
263 if (type == MEM_TYPE_PAGE_POOL)
264 return is_page_pool_compiled_in();
265
266 if (type >= MEM_TYPE_MAX)
267 return false;
268
269 return true;
270 }
271
__xdp_reg_mem_model(struct xdp_mem_info * mem,enum xdp_mem_type type,void * allocator)272 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
273 enum xdp_mem_type type,
274 void *allocator)
275 {
276 struct xdp_mem_allocator *xdp_alloc;
277 gfp_t gfp = GFP_KERNEL;
278 int id, errno, ret;
279 void *ptr;
280
281 if (!__is_supported_mem_type(type))
282 return ERR_PTR(-EOPNOTSUPP);
283
284 mem->type = type;
285
286 if (!allocator) {
287 if (type == MEM_TYPE_PAGE_POOL)
288 return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
289 return NULL;
290 }
291
292 /* Delay init of rhashtable to save memory if feature isn't used */
293 if (!mem_id_init) {
294 mutex_lock(&mem_id_lock);
295 ret = __mem_id_init_hash_table();
296 mutex_unlock(&mem_id_lock);
297 if (ret < 0) {
298 WARN_ON(1);
299 return ERR_PTR(ret);
300 }
301 }
302
303 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
304 if (!xdp_alloc)
305 return ERR_PTR(-ENOMEM);
306
307 mutex_lock(&mem_id_lock);
308 id = __mem_id_cyclic_get(gfp);
309 if (id < 0) {
310 errno = id;
311 goto err;
312 }
313 mem->id = id;
314 xdp_alloc->mem = *mem;
315 xdp_alloc->allocator = allocator;
316
317 /* Insert allocator into ID lookup table */
318 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
319 if (IS_ERR(ptr)) {
320 ida_simple_remove(&mem_id_pool, mem->id);
321 mem->id = 0;
322 errno = PTR_ERR(ptr);
323 goto err;
324 }
325
326 if (type == MEM_TYPE_PAGE_POOL)
327 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
328
329 mutex_unlock(&mem_id_lock);
330
331 return xdp_alloc;
332 err:
333 mutex_unlock(&mem_id_lock);
334 kfree(xdp_alloc);
335 return ERR_PTR(errno);
336 }
337
xdp_reg_mem_model(struct xdp_mem_info * mem,enum xdp_mem_type type,void * allocator)338 int xdp_reg_mem_model(struct xdp_mem_info *mem,
339 enum xdp_mem_type type, void *allocator)
340 {
341 struct xdp_mem_allocator *xdp_alloc;
342
343 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
344 if (IS_ERR(xdp_alloc))
345 return PTR_ERR(xdp_alloc);
346 return 0;
347 }
348 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
349
xdp_rxq_info_reg_mem_model(struct xdp_rxq_info * xdp_rxq,enum xdp_mem_type type,void * allocator)350 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
351 enum xdp_mem_type type, void *allocator)
352 {
353 struct xdp_mem_allocator *xdp_alloc;
354
355 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
356 WARN(1, "Missing register, driver bug");
357 return -EFAULT;
358 }
359
360 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
361 if (IS_ERR(xdp_alloc))
362 return PTR_ERR(xdp_alloc);
363
364 if (trace_mem_connect_enabled() && xdp_alloc)
365 trace_mem_connect(xdp_alloc, xdp_rxq);
366 return 0;
367 }
368
369 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
370
371 /* XDP RX runs under NAPI protection, and in different delivery error
372 * scenarios (e.g. queue full), it is possible to return the xdp_frame
373 * while still leveraging this protection. The @napi_direct boolean
374 * is used for those calls sites. Thus, allowing for faster recycling
375 * of xdp_frames/pages in those cases.
376 */
__xdp_return(void * data,struct xdp_mem_info * mem,bool napi_direct,struct xdp_buff * xdp)377 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
378 struct xdp_buff *xdp)
379 {
380 struct page *page;
381
382 switch (mem->type) {
383 case MEM_TYPE_PAGE_POOL:
384 page = virt_to_head_page(data);
385 if (napi_direct && xdp_return_frame_no_direct())
386 napi_direct = false;
387 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
388 * as mem->type knows this a page_pool page
389 */
390 page_pool_put_full_page(page->pp, page, napi_direct);
391 break;
392 case MEM_TYPE_PAGE_SHARED:
393 page_frag_free(data);
394 break;
395 case MEM_TYPE_PAGE_ORDER0:
396 page = virt_to_page(data); /* Assumes order0 page*/
397 put_page(page);
398 break;
399 case MEM_TYPE_XSK_BUFF_POOL:
400 /* NB! Only valid from an xdp_buff! */
401 xsk_buff_free(xdp);
402 break;
403 default:
404 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
405 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
406 break;
407 }
408 }
409
xdp_return_frame(struct xdp_frame * xdpf)410 void xdp_return_frame(struct xdp_frame *xdpf)
411 {
412 struct skb_shared_info *sinfo;
413 int i;
414
415 if (likely(!xdp_frame_has_frags(xdpf)))
416 goto out;
417
418 sinfo = xdp_get_shared_info_from_frame(xdpf);
419 for (i = 0; i < sinfo->nr_frags; i++) {
420 struct page *page = skb_frag_page(&sinfo->frags[i]);
421
422 __xdp_return(page_address(page), &xdpf->mem, false, NULL);
423 }
424 out:
425 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
426 }
427 EXPORT_SYMBOL_GPL(xdp_return_frame);
428
xdp_return_frame_rx_napi(struct xdp_frame * xdpf)429 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
430 {
431 struct skb_shared_info *sinfo;
432 int i;
433
434 if (likely(!xdp_frame_has_frags(xdpf)))
435 goto out;
436
437 sinfo = xdp_get_shared_info_from_frame(xdpf);
438 for (i = 0; i < sinfo->nr_frags; i++) {
439 struct page *page = skb_frag_page(&sinfo->frags[i]);
440
441 __xdp_return(page_address(page), &xdpf->mem, true, NULL);
442 }
443 out:
444 __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
445 }
446 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
447
448 /* XDP bulk APIs introduce a defer/flush mechanism to return
449 * pages belonging to the same xdp_mem_allocator object
450 * (identified via the mem.id field) in bulk to optimize
451 * I-cache and D-cache.
452 * The bulk queue size is set to 16 to be aligned to how
453 * XDP_REDIRECT bulking works. The bulk is flushed when
454 * it is full or when mem.id changes.
455 * xdp_frame_bulk is usually stored/allocated on the function
456 * call-stack to avoid locking penalties.
457 */
xdp_flush_frame_bulk(struct xdp_frame_bulk * bq)458 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
459 {
460 struct xdp_mem_allocator *xa = bq->xa;
461
462 if (unlikely(!xa || !bq->count))
463 return;
464
465 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
466 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
467 bq->count = 0;
468 }
469 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
470
471 /* Must be called with rcu_read_lock held */
xdp_return_frame_bulk(struct xdp_frame * xdpf,struct xdp_frame_bulk * bq)472 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
473 struct xdp_frame_bulk *bq)
474 {
475 struct xdp_mem_info *mem = &xdpf->mem;
476 struct xdp_mem_allocator *xa;
477
478 if (mem->type != MEM_TYPE_PAGE_POOL) {
479 xdp_return_frame(xdpf);
480 return;
481 }
482
483 xa = bq->xa;
484 if (unlikely(!xa)) {
485 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
486 bq->count = 0;
487 bq->xa = xa;
488 }
489
490 if (bq->count == XDP_BULK_QUEUE_SIZE)
491 xdp_flush_frame_bulk(bq);
492
493 if (unlikely(mem->id != xa->mem.id)) {
494 xdp_flush_frame_bulk(bq);
495 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
496 }
497
498 if (unlikely(xdp_frame_has_frags(xdpf))) {
499 struct skb_shared_info *sinfo;
500 int i;
501
502 sinfo = xdp_get_shared_info_from_frame(xdpf);
503 for (i = 0; i < sinfo->nr_frags; i++) {
504 skb_frag_t *frag = &sinfo->frags[i];
505
506 bq->q[bq->count++] = skb_frag_address(frag);
507 if (bq->count == XDP_BULK_QUEUE_SIZE)
508 xdp_flush_frame_bulk(bq);
509 }
510 }
511 bq->q[bq->count++] = xdpf->data;
512 }
513 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
514
xdp_return_buff(struct xdp_buff * xdp)515 void xdp_return_buff(struct xdp_buff *xdp)
516 {
517 struct skb_shared_info *sinfo;
518 int i;
519
520 if (likely(!xdp_buff_has_frags(xdp)))
521 goto out;
522
523 sinfo = xdp_get_shared_info_from_buff(xdp);
524 for (i = 0; i < sinfo->nr_frags; i++) {
525 struct page *page = skb_frag_page(&sinfo->frags[i]);
526
527 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
528 }
529 out:
530 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
531 }
532 EXPORT_SYMBOL_GPL(xdp_return_buff);
533
xdp_attachment_setup(struct xdp_attachment_info * info,struct netdev_bpf * bpf)534 void xdp_attachment_setup(struct xdp_attachment_info *info,
535 struct netdev_bpf *bpf)
536 {
537 if (info->prog)
538 bpf_prog_put(info->prog);
539 info->prog = bpf->prog;
540 info->flags = bpf->flags;
541 }
542 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
543
xdp_convert_zc_to_xdp_frame(struct xdp_buff * xdp)544 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
545 {
546 unsigned int metasize, totsize;
547 void *addr, *data_to_copy;
548 struct xdp_frame *xdpf;
549 struct page *page;
550
551 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
552 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
553 xdp->data - xdp->data_meta;
554 totsize = xdp->data_end - xdp->data + metasize;
555
556 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
557 return NULL;
558
559 page = dev_alloc_page();
560 if (!page)
561 return NULL;
562
563 addr = page_to_virt(page);
564 xdpf = addr;
565 memset(xdpf, 0, sizeof(*xdpf));
566
567 addr += sizeof(*xdpf);
568 data_to_copy = metasize ? xdp->data_meta : xdp->data;
569 memcpy(addr, data_to_copy, totsize);
570
571 xdpf->data = addr + metasize;
572 xdpf->len = totsize - metasize;
573 xdpf->headroom = 0;
574 xdpf->metasize = metasize;
575 xdpf->frame_sz = PAGE_SIZE;
576 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
577
578 xsk_buff_free(xdp);
579 return xdpf;
580 }
581 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
582
583 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
xdp_warn(const char * msg,const char * func,const int line)584 void xdp_warn(const char *msg, const char *func, const int line)
585 {
586 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
587 };
588 EXPORT_SYMBOL_GPL(xdp_warn);
589
xdp_alloc_skb_bulk(void ** skbs,int n_skb,gfp_t gfp)590 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
591 {
592 n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
593 if (unlikely(!n_skb))
594 return -ENOMEM;
595
596 return 0;
597 }
598 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
599
__xdp_build_skb_from_frame(struct xdp_frame * xdpf,struct sk_buff * skb,struct net_device * dev)600 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
601 struct sk_buff *skb,
602 struct net_device *dev)
603 {
604 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
605 unsigned int headroom, frame_size;
606 void *hard_start;
607 u8 nr_frags;
608
609 /* xdp frags frame */
610 if (unlikely(xdp_frame_has_frags(xdpf)))
611 nr_frags = sinfo->nr_frags;
612
613 /* Part of headroom was reserved to xdpf */
614 headroom = sizeof(*xdpf) + xdpf->headroom;
615
616 /* Memory size backing xdp_frame data already have reserved
617 * room for build_skb to place skb_shared_info in tailroom.
618 */
619 frame_size = xdpf->frame_sz;
620
621 hard_start = xdpf->data - headroom;
622 skb = build_skb_around(skb, hard_start, frame_size);
623 if (unlikely(!skb))
624 return NULL;
625
626 skb_reserve(skb, headroom);
627 __skb_put(skb, xdpf->len);
628 if (xdpf->metasize)
629 skb_metadata_set(skb, xdpf->metasize);
630
631 if (unlikely(xdp_frame_has_frags(xdpf)))
632 xdp_update_skb_shared_info(skb, nr_frags,
633 sinfo->xdp_frags_size,
634 nr_frags * xdpf->frame_sz,
635 xdp_frame_is_frag_pfmemalloc(xdpf));
636
637 /* Essential SKB info: protocol and skb->dev */
638 skb->protocol = eth_type_trans(skb, dev);
639
640 /* Optional SKB info, currently missing:
641 * - HW checksum info (skb->ip_summed)
642 * - HW RX hash (skb_set_hash)
643 * - RX ring dev queue index (skb_record_rx_queue)
644 */
645
646 if (xdpf->mem.type == MEM_TYPE_PAGE_POOL)
647 skb_mark_for_recycle(skb);
648
649 /* Allow SKB to reuse area used by xdp_frame */
650 xdp_scrub_frame(xdpf);
651
652 return skb;
653 }
654 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
655
xdp_build_skb_from_frame(struct xdp_frame * xdpf,struct net_device * dev)656 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
657 struct net_device *dev)
658 {
659 struct sk_buff *skb;
660
661 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
662 if (unlikely(!skb))
663 return NULL;
664
665 memset(skb, 0, offsetof(struct sk_buff, tail));
666
667 return __xdp_build_skb_from_frame(xdpf, skb, dev);
668 }
669 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
670
xdpf_clone(struct xdp_frame * xdpf)671 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
672 {
673 unsigned int headroom, totalsize;
674 struct xdp_frame *nxdpf;
675 struct page *page;
676 void *addr;
677
678 headroom = xdpf->headroom + sizeof(*xdpf);
679 totalsize = headroom + xdpf->len;
680
681 if (unlikely(totalsize > PAGE_SIZE))
682 return NULL;
683 page = dev_alloc_page();
684 if (!page)
685 return NULL;
686 addr = page_to_virt(page);
687
688 memcpy(addr, xdpf, totalsize);
689
690 nxdpf = addr;
691 nxdpf->data = addr + headroom;
692 nxdpf->frame_sz = PAGE_SIZE;
693 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
694 nxdpf->mem.id = 0;
695
696 return nxdpf;
697 }
698
699 __diag_push();
700 __diag_ignore_all("-Wmissing-prototypes",
701 "Global functions as their definitions will be in vmlinux BTF");
702
703 /**
704 * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
705 * @ctx: XDP context pointer.
706 * @timestamp: Return value pointer.
707 *
708 * Return:
709 * * Returns 0 on success or ``-errno`` on error.
710 * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
711 * * ``-ENODATA`` : means no RX-timestamp available for this frame
712 */
bpf_xdp_metadata_rx_timestamp(const struct xdp_md * ctx,u64 * timestamp)713 __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
714 {
715 return -EOPNOTSUPP;
716 }
717
718 /**
719 * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
720 * @ctx: XDP context pointer.
721 * @hash: Return value pointer.
722 * @rss_type: Return value pointer for RSS type.
723 *
724 * The RSS hash type (@rss_type) specifies what portion of packet headers NIC
725 * hardware used when calculating RSS hash value. The RSS type can be decoded
726 * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits
727 * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types*
728 * ``XDP_RSS_TYPE_L*``.
729 *
730 * Return:
731 * * Returns 0 on success or ``-errno`` on error.
732 * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
733 * * ``-ENODATA`` : means no RX-hash available for this frame
734 */
bpf_xdp_metadata_rx_hash(const struct xdp_md * ctx,u32 * hash,enum xdp_rss_hash_type * rss_type)735 __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
736 enum xdp_rss_hash_type *rss_type)
737 {
738 return -EOPNOTSUPP;
739 }
740
741 __diag_pop();
742
743 BTF_SET8_START(xdp_metadata_kfunc_ids)
744 #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
745 XDP_METADATA_KFUNC_xxx
746 #undef XDP_METADATA_KFUNC
747 BTF_SET8_END(xdp_metadata_kfunc_ids)
748
749 static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
750 .owner = THIS_MODULE,
751 .set = &xdp_metadata_kfunc_ids,
752 };
753
BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)754 BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
755 #define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str)
756 XDP_METADATA_KFUNC_xxx
757 #undef XDP_METADATA_KFUNC
758
759 u32 bpf_xdp_metadata_kfunc_id(int id)
760 {
761 /* xdp_metadata_kfunc_ids is sorted and can't be used */
762 return xdp_metadata_kfunc_ids_unsorted[id];
763 }
764
bpf_dev_bound_kfunc_id(u32 btf_id)765 bool bpf_dev_bound_kfunc_id(u32 btf_id)
766 {
767 return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id);
768 }
769
xdp_metadata_init(void)770 static int __init xdp_metadata_init(void)
771 {
772 return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
773 }
774 late_initcall(xdp_metadata_init);
775
xdp_set_features_flag(struct net_device * dev,xdp_features_t val)776 void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
777 {
778 val &= NETDEV_XDP_ACT_MASK;
779 if (dev->xdp_features == val)
780 return;
781
782 dev->xdp_features = val;
783
784 if (dev->reg_state == NETREG_REGISTERED)
785 call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
786 }
787 EXPORT_SYMBOL_GPL(xdp_set_features_flag);
788
xdp_features_set_redirect_target(struct net_device * dev,bool support_sg)789 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
790 {
791 xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT);
792
793 if (support_sg)
794 val |= NETDEV_XDP_ACT_NDO_XMIT_SG;
795 xdp_set_features_flag(dev, val);
796 }
797 EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
798
xdp_features_clear_redirect_target(struct net_device * dev)799 void xdp_features_clear_redirect_target(struct net_device *dev)
800 {
801 xdp_features_t val = dev->xdp_features;
802
803 val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG);
804 xdp_set_features_flag(dev, val);
805 }
806 EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);
807