/linux-6.6.21/include/net/ |
D | xdp.h | 91 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) in xdp_buff_has_frags() argument 93 return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); in xdp_buff_has_frags() 96 static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) in xdp_buff_set_frags_flag() argument 98 xdp->flags |= XDP_FLAGS_HAS_FRAGS; in xdp_buff_set_frags_flag() 101 static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) in xdp_buff_clear_frags_flag() argument 103 xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; in xdp_buff_clear_frags_flag() 106 static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) in xdp_buff_is_frag_pfmemalloc() argument 108 return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); in xdp_buff_is_frag_pfmemalloc() 111 static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) in xdp_buff_set_frag_pfmemalloc() argument 113 xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; in xdp_buff_set_frag_pfmemalloc() [all …]
|
D | xdp_sock_drv.h | 53 return pool->heads[0].xdp.rxq->napi_id; in xsk_pool_get_napi_id() 73 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 75 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_dma() 80 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument 82 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_frame_dma() 98 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) in xsk_buff_alloc_batch() argument 100 return xp_alloc_batch(pool, xdp, max); in xsk_buff_alloc_batch() 108 static inline void xsk_buff_free(struct xdp_buff *xdp) in xsk_buff_free() argument 110 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_free() 114 if (likely(!xdp_buff_has_frags(xdp))) in xsk_buff_free() [all …]
|
/linux-6.6.21/Documentation/bpf/ |
D | redirect.rst | 25 :doc: xdp redirect 29 those that do, not all of them support non-linear frames. Non-linear xdp 45 sudo bpftrace -e 'tracepoint:xdp:* { @cnt[probe] = count(); }' 49 @cnt[tracepoint:xdp:mem_connect]: 18 50 @cnt[tracepoint:xdp:mem_disconnect]: 18 51 @cnt[tracepoint:xdp:xdp_exception]: 19605 52 @cnt[tracepoint:xdp:xdp_devmap_xmit]: 1393604 53 @cnt[tracepoint:xdp:xdp_redirect]: 22292200 56 The various xdp tracepoints can be found in ``source/include/trace/events/xdp.h`` 64 'tracepoint:xdp:xdp_redirect*_err {@redir_errno[-args->err] = count();} [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/progs/ |
D | xdp_features.c | 65 xdp_process_echo_packet(struct xdp_md *xdp, bool dut) in xdp_process_echo_packet() argument 67 void *data_end = (void *)(long)xdp->data_end; in xdp_process_echo_packet() 68 void *data = (void *)(long)xdp->data; in xdp_process_echo_packet() 135 xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut) in xdp_update_stats() argument 139 if (xdp_process_echo_packet(xdp, tx)) in xdp_update_stats() 156 int xdp_tester_check_tx(struct xdp_md *xdp) in xdp_tester_check_tx() argument 158 xdp_update_stats(xdp, true, false); in xdp_tester_check_tx() 164 int xdp_tester_check_rx(struct xdp_md *xdp) in xdp_tester_check_rx() argument 166 xdp_update_stats(xdp, false, false); in xdp_tester_check_rx() 174 int xdp_do_pass(struct xdp_md *xdp) in xdp_do_pass() argument [all …]
|
D | test_xdp.c | 78 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 80 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 81 void *data = (void *)(long)xdp->data; in handle_ipv4() 111 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 114 data = (void *)(long)xdp->data; in handle_ipv4() 115 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 151 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 153 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 154 void *data = (void *)(long)xdp->data; in handle_ipv6() 181 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
D | test_xdp_loop.c | 74 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 76 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 77 void *data = (void *)(long)xdp->data; in handle_ipv4() 107 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 110 data = (void *)(long)xdp->data; in handle_ipv4() 111 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 147 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 149 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 150 void *data = (void *)(long)xdp->data; in handle_ipv6() 177 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
D | test_xdp_do_redirect.c | 29 int xdp_redirect(struct xdp_md *xdp) in xdp_redirect() argument 31 __u32 *metadata = (void *)(long)xdp->data_meta; in xdp_redirect() 32 void *data_end = (void *)(long)xdp->data_end; in xdp_redirect() 33 void *data = (void *)(long)xdp->data; in xdp_redirect() 41 if (xdp->ingress_ifindex != ifindex_in) in xdp_redirect() 55 if (bpf_xdp_adjust_meta(xdp, sizeof(__u64))) in xdp_redirect() 86 int xdp_count_pkts(struct xdp_md *xdp) in xdp_count_pkts() argument 88 void *data = (void *)(long)xdp->data; in xdp_count_pkts() 89 void *data_end = (void *)(long)xdp->data_end; in xdp_count_pkts()
|
D | xsk_xdp_progs.c | 18 SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp) in xsk_def_prog() argument 23 SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp) in xsk_xdp_drop() argument 32 SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp) in xsk_xdp_populate_metadata() argument 39 err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info)); in xsk_xdp_populate_metadata() 43 data = (void *)(long)xdp->data; in xsk_xdp_populate_metadata() 44 data_meta = (void *)(long)xdp->data_meta; in xsk_xdp_populate_metadata()
|
D | test_xdp_bpf2bpf.c | 45 int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) in BPF_PROG() argument 49 meta.ifindex = xdp->rxq->dev->ifindex; in BPF_PROG() 50 meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp); in BPF_PROG() 51 bpf_xdp_output(xdp, &perf_buf_map, in BPF_PROG() 56 test_result_fentry = xdp->rxq->dev->ifindex; in BPF_PROG() 62 int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret) in BPF_PROG() argument
|
D | test_xdp_adjust_tail_shrink.c | 13 int _xdp_adjust_tail_shrink(struct xdp_md *xdp) in _xdp_adjust_tail_shrink() argument 15 __u8 *data_end = (void *)(long)xdp->data_end; in _xdp_adjust_tail_shrink() 16 __u8 *data = (void *)(long)xdp->data; in _xdp_adjust_tail_shrink() 19 switch (bpf_xdp_get_buff_len(xdp)) { in _xdp_adjust_tail_shrink() 47 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in _xdp_adjust_tail_shrink()
|
/linux-6.6.21/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 28 struct xdp_buff *xdp) in bnxt_xmit_bd() argument 38 if (xdp && xdp_buff_has_frags(xdp)) { in bnxt_xmit_bd() 39 sinfo = xdp_get_shared_info_from_buff(xdp); in bnxt_xmit_bd() 47 if (xdp) in bnxt_xmit_bd() 48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd() 97 struct xdp_buff *xdp) in __bnxt_xmit_xdp() argument 101 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() 184 struct xdp_buff *xdp) in bnxt_xdp_buff_init() argument 199 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init() 200 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); in bnxt_xdp_buff_init() [all …]
|
D | bnxt_xdp.h | 18 struct xdp_buff *xdp); 21 struct xdp_buff xdp, struct page *page, u8 **data_ptr, 23 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); 31 struct xdp_buff *xdp); 33 struct xdp_buff *xdp); 36 struct xdp_buff *xdp,
|
/linux-6.6.21/samples/bpf/ |
D | xdp_adjust_tail_kern.c | 70 static __always_inline int send_icmp4_too_big(struct xdp_md *xdp) in send_icmp4_too_big() argument 74 if (bpf_xdp_adjust_head(xdp, 0 - headroom)) in send_icmp4_too_big() 76 void *data = (void *)(long)xdp->data; in send_icmp4_too_big() 77 void *data_end = (void *)(long)xdp->data_end; in send_icmp4_too_big() 120 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 122 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 123 void *data = (void *)(long)xdp->data; in handle_ipv4() 129 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in handle_ipv4() 131 return send_icmp4_too_big(xdp); in handle_ipv4() 137 int _xdp_icmp(struct xdp_md *xdp) in _xdp_icmp() argument [all …]
|
D | xdp_tx_iptunnel_kern.c | 77 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 79 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 80 void *data = (void *)(long)xdp->data; in handle_ipv4() 112 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 115 data = (void *)(long)xdp->data; in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 152 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 154 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 155 void *data = (void *)(long)xdp->data; in handle_ipv6() 184 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
/linux-6.6.21/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 99 struct xdp_buff *xdp) in ixgbe_run_xdp_zc() argument 108 act = bpf_prog_run_xdp(xdp_prog, xdp); in ixgbe_run_xdp_zc() 111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 125 xdpf = xdp_convert_buff_to_frame(xdp); in ixgbe_run_xdp_zc() 168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 169 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc() 174 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc() 214 const struct xdp_buff *xdp) in ixgbe_construct_skb_zc() argument 216 unsigned int totalsize = xdp->data_end - xdp->data_meta; in ixgbe_construct_skb_zc() 217 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbe_construct_skb_zc() [all …]
|
/linux-6.6.21/drivers/net/ethernet/microchip/lan966x/ |
D | lan966x_xdp.c | 9 static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp_setup() argument 18 NL_SET_ERR_MSG_MOD(xdp->extack, in lan966x_xdp_setup() 24 old_prog = xchg(&port->xdp_prog, xdp->prog); in lan966x_xdp_setup() 43 int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp() argument 45 switch (xdp->command) { in lan966x_xdp() 47 return lan966x_xdp_setup(dev, xdp); in lan966x_xdp() 79 struct xdp_buff xdp; in lan966x_xdp_run() local 82 xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order, in lan966x_xdp_run() 84 xdp_prepare_buff(&xdp, page_address(page), in lan966x_xdp_run() 87 act = bpf_prog_run_xdp(xdp_prog, &xdp); in lan966x_xdp_run() [all …]
|
/linux-6.6.21/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 454 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, in ice_fill_rx_descs() argument 461 buffs = xsk_buff_alloc_batch(pool, xdp, count); in ice_fill_rx_descs() 463 dma = xsk_buff_xdp_get_dma(*xdp); in ice_fill_rx_descs() 468 xdp++; in ice_fill_rx_descs() 491 struct xdp_buff **xdp; in __ice_alloc_rx_bufs_zc() local 494 xdp = ice_xdp_buf(rx_ring, ntu); in __ice_alloc_rx_bufs_zc() 497 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, in __ice_alloc_rx_bufs_zc() 505 xdp = ice_xdp_buf(rx_ring, 0); in __ice_alloc_rx_bufs_zc() 511 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count); in __ice_alloc_rx_bufs_zc() 558 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_construct_skb_zc() argument [all …]
|
D | ice_txrx.c | 385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring() local 399 if (xdp->data) { in ice_clean_rx_ring() 400 xdp_return_buff(xdp); in ice_clean_rx_ring() 401 xdp->data = NULL; in ice_clean_rx_ring() 559 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument 569 act = bpf_prog_run_xdp(xdp_prog, xdp); in ice_run_xdp() 576 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); in ice_run_xdp() 583 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp() 598 ice_set_rx_bufs_act(xdp, rx_ring, ret); in ice_run_xdp() 609 struct xdp_buff xdp; in ice_xmit_xdp_ring() local [all …]
|
/linux-6.6.21/drivers/net/vmxnet3/ |
D | vmxnet3_xdp.c | 251 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, in vmxnet3_run_xdp() argument 260 act = bpf_prog_run_xdp(prog, xdp); in vmxnet3_run_xdp() 261 page = virt_to_page(xdp->data_hard_start); in vmxnet3_run_xdp() 267 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); in vmxnet3_run_xdp() 276 xdpf = xdp_convert_buff_to_frame(xdp); in vmxnet3_run_xdp() 304 const struct xdp_buff *xdp) in vmxnet3_build_skb() argument 316 skb_reserve(skb, xdp->data - xdp->data_hard_start); in vmxnet3_build_skb() 317 skb_put(skb, xdp->data_end - xdp->data); in vmxnet3_build_skb() 331 struct xdp_buff xdp; in vmxnet3_process_xdp_small() local 341 xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); in vmxnet3_process_xdp_small() [all …]
|
/linux-6.6.21/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_xsk.c | 18 struct xdp_buff *xdp) in nfp_net_xsk_rx_bufs_stash() argument 27 rx_ring->xsk_rxbufs[idx].xdp = xdp; in nfp_net_xsk_rx_bufs_stash() 29 xsk_buff_xdp_get_frame_dma(xdp) + headroom; in nfp_net_xsk_rx_bufs_stash() 35 rxbuf->xdp = NULL; in nfp_net_xsk_rx_unstash() 40 if (rxbuf->xdp) in nfp_net_xsk_rx_free() 41 xsk_buff_free(rxbuf->xdp); in nfp_net_xsk_rx_free() 62 struct xdp_buff *xdp; in nfp_net_xsk_rx_ring_fill_freelist() local 67 xdp = xsk_buff_alloc(pool); in nfp_net_xsk_rx_ring_fill_freelist() 68 if (!xdp) in nfp_net_xsk_rx_ring_fill_freelist() 71 nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp); in nfp_net_xsk_rx_ring_fill_freelist()
|
/linux-6.6.21/drivers/net/ethernet/netronome/nfp/nfd3/ |
D | xsk.c | 35 txbuf->xdp = xrxbuf->xdp; in nfp_nfd3_xsk_tx_xdp() 87 skb_put_data(skb, xrxbuf->xdp->data, pkt_len); in nfp_nfd3_xsk_rx_skb() 105 xrxbuf->xdp->data - xrxbuf->xdp->data_meta); in nfp_nfd3_xsk_rx_skb() 184 xrxbuf->xdp->data += meta_len; in nfp_nfd3_xsk_rx() 185 xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len; in nfp_nfd3_xsk_rx() 186 xdp_set_data_meta_invalid(xrxbuf->xdp); in nfp_nfd3_xsk_rx() 187 xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool); in nfp_nfd3_xsk_rx() 188 net_prefetch(xrxbuf->xdp->data); in nfp_nfd3_xsk_rx() 192 xrxbuf->xdp->data - in nfp_nfd3_xsk_rx() 194 xrxbuf->xdp->data, in nfp_nfd3_xsk_rx() [all …]
|
/linux-6.6.21/include/trace/events/ |
D | xdp.h | 3 #define TRACE_SYSTEM xdp 32 const struct bpf_prog *xdp, u32 act), 34 TP_ARGS(dev, xdp, act), 43 __entry->prog_id = xdp->aux->id; 93 const struct bpf_prog *xdp, 98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 124 __entry->prog_id = xdp->aux->id; 143 const struct bpf_prog *xdp, 147 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) 152 const struct bpf_prog *xdp, [all …]
|
/linux-6.6.21/drivers/net/hyperv/ |
D | netvsc_bpf.c | 25 struct xdp_buff *xdp) in netvsc_run_xdp() argument 35 xdp->data_hard_start = NULL; in netvsc_run_xdp() 56 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); in netvsc_run_xdp() 57 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false); in netvsc_run_xdp() 59 memcpy(xdp->data, data, len); in netvsc_run_xdp() 61 act = bpf_prog_run_xdp(prog, xdp); in netvsc_run_xdp() 73 if (!xdp_do_redirect(ndev, xdp, prog)) { in netvsc_run_xdp() 107 xdp->data_hard_start = NULL; in netvsc_run_xdp() 167 struct netdev_bpf xdp; in netvsc_vf_setxdp() local 178 memset(&xdp, 0, sizeof(xdp)); in netvsc_vf_setxdp() [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/ |
D | test_xdp_veth.sh | 64 if ! ip link set dev lo xdp off > /dev/null 2>&1; then 104 xdp_redirect_map.bpf.o $BPF_DIR/progs type xdp \ 109 ip link set dev veth1 xdp pinned $BPF_DIR/progs/xdp_redirect_map_0 110 ip link set dev veth2 xdp pinned $BPF_DIR/progs/xdp_redirect_map_1 111 ip link set dev veth3 xdp pinned $BPF_DIR/progs/xdp_redirect_map_2 113 ip -n ${NS1} link set dev veth11 xdp obj xdp_dummy.bpf.o sec xdp 114 ip -n ${NS2} link set dev veth22 xdp obj xdp_tx.bpf.o sec xdp 115 ip -n ${NS3} link set dev veth33 xdp obj xdp_dummy.bpf.o sec xdp
|
/linux-6.6.21/net/bpf/ |
D | test_run.c | 134 struct xdp_test_data *xdp = arg; in xdp_test_run_init_page() local 139 orig_ctx = xdp->orig_ctx; in xdp_test_run_init_page() 149 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); in xdp_test_run_init_page() 159 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) in xdp_test_run_setup() argument 166 .pool_size = xdp->batch_size, in xdp_test_run_setup() 169 .init_arg = xdp, in xdp_test_run_setup() 172 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup() 173 if (!xdp->frames) in xdp_test_run_setup() 176 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup() 177 if (!xdp->skbs) in xdp_test_run_setup() [all …]
|