/linux-6.6.21/samples/bpf/ |
D | parse_varlen.c | 22 static int tcp(void *data, uint64_t tp_off, void *data_end) in tcp() argument 26 if (tcp + 1 > data_end) in tcp() 33 static int udp(void *data, uint64_t tp_off, void *data_end) in udp() argument 37 if (udp + 1 > data_end) in udp() 51 static int parse_ipv4(void *data, uint64_t nh_off, void *data_end) in parse_ipv4() argument 57 if (iph + 1 > data_end) in parse_ipv4() 66 if (iph + 1 > data_end) in parse_ipv4() 72 return tcp(data, nh_off + ihl_len, data_end); in parse_ipv4() 74 return udp(data, nh_off + ihl_len, data_end); in parse_ipv4() 78 static int parse_ipv6(void *data, uint64_t nh_off, void *data_end) in parse_ipv6() argument [all …]
|
D | xdp_tx_iptunnel_kern.c | 45 static __always_inline int get_dport(void *trans_data, void *data_end, in get_dport() argument 54 if (th + 1 > data_end) in get_dport() 59 if (uh + 1 > data_end) in get_dport() 79 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() local 92 if (iph + 1 > data_end) in handle_ipv4() 95 dport = get_dport(iph + 1, data_end, iph->protocol); in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 122 if (new_eth + 1 > data_end || in handle_ipv4() 123 old_eth + 1 > data_end || in handle_ipv4() 124 iph + 1 > data_end) in handle_ipv4() [all …]
|
D | tc_l2_redirect_kern.c | 64 void *data_end = (void *)(long)skb->data_end; in _l2_to_iptun_ingress_forward() local 69 if (data + sizeof(*eth) > data_end) in _l2_to_iptun_ingress_forward() 80 if (data + sizeof(*eth) + sizeof(*iph) > data_end) in _l2_to_iptun_ingress_forward() 93 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) in _l2_to_iptun_ingress_forward() 115 void *data_end = (void *)(long)skb->data_end; in _l2_to_iptun_ingress_redirect() local 120 if (data + sizeof(*eth) > data_end) in _l2_to_iptun_ingress_redirect() 132 if (data + sizeof(*eth) + sizeof(*iph) > data_end) in _l2_to_iptun_ingress_redirect() 156 void *data_end = (void *)(long)skb->data_end; in _l2_to_ip6tun_ingress_redirect() local 159 if (data + sizeof(*eth) > data_end) in _l2_to_ip6tun_ingress_redirect() 170 if (data + sizeof(*eth) + sizeof(*iph) > data_end) in _l2_to_ip6tun_ingress_redirect() [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/progs/ |
D | verifier_direct_packet_access.c | 20 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in __retval() 40 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in direct_packet_access_test1() 74 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), in direct_packet_access_test2() 110 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in direct_packet_access_test4_write() 132 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in pkt_end_reg_good_access() 154 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in pkt_end_reg_bad_access() 177 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in pkt_end_reg_both_accesses() 201 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in test8_double_test_variant_1() 225 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in test9_double_test_variant_2() 247 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in packet_access_test10_write_invalid() [all …]
|
D | test_xdp.c | 46 static __always_inline int get_dport(void *trans_data, void *data_end, in get_dport() argument 55 if (th + 1 > data_end) in get_dport() 60 if (uh + 1 > data_end) in get_dport() 80 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() local 93 if (iph + 1 > data_end) in handle_ipv4() 96 dport = get_dport(iph + 1, data_end, iph->protocol); in handle_ipv4() 115 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 121 if (new_eth + 1 > data_end || in handle_ipv4() 122 old_eth + 1 > data_end || in handle_ipv4() 123 iph + 1 > data_end) in handle_ipv4() [all …]
|
D | test_xdp_loop.c | 42 static __always_inline int get_dport(void *trans_data, void *data_end, in get_dport() argument 51 if (th + 1 > data_end) in get_dport() 56 if (uh + 1 > data_end) in get_dport() 76 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() local 89 if (iph + 1 > data_end) in handle_ipv4() 92 dport = get_dport(iph + 1, data_end, iph->protocol); in handle_ipv4() 111 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 117 if (new_eth + 1 > data_end || in handle_ipv4() 118 old_eth + 1 > data_end || in handle_ipv4() 119 iph + 1 > data_end) in handle_ipv4() [all …]
|
D | xdpwall.c | 106 static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end) in parse_ethhdr() argument 110 if (eth + 1 > data_end) in parse_ethhdr() 165 get_transport_hdr(__u16 offset, void *data, void *data_end) in get_transport_hdr() argument 167 if (offset > 255 || data + offset > data_end) in get_transport_hdr() 191 parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end) in parse_tcp() argument 195 if (tcp + 1 > data_end) in parse_tcp() 206 parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end) in parse_udp() argument 210 if (udp + 1 > data_end) in parse_udp() 234 filter_transport_hdr(void *transport_hdr, void *data_end, in filter_transport_hdr() argument 238 if (!parse_tcp(info, transport_hdr, data_end)) in filter_transport_hdr() [all …]
|
D | test_xdp_noinline.c | 232 bool parse_udp(void *data, void *data_end, in parse_udp() argument 241 if (udp + 1 > data_end) in parse_udp() 254 bool parse_tcp(void *data, void *data_end, in parse_tcp() argument 263 if (tcp + 1 > data_end) in parse_tcp() 286 void *data_end; in encap_v6() local 292 data_end = (void *)(long)xdp->data_end; in encap_v6() 296 if (new_eth + 1 > data_end || in encap_v6() 297 old_eth + 1 > data_end || ip6h + 1 > data_end) in encap_v6() 332 void *data_end; in encap_v4() local 340 data_end = (void *)(long)xdp->data_end; in encap_v4() [all …]
|
D | test_xdp_do_redirect.c | 32 void *data_end = (void *)(long)xdp->data_end; in xdp_redirect() local 38 if (payload + 1 > data_end) in xdp_redirect() 67 static bool check_pkt(void *data, void *data_end, const __u32 mark) in check_pkt() argument 72 if (payload + 1 > data_end) in check_pkt() 89 void *data_end = (void *)(long)xdp->data_end; in xdp_count_pkts() local 91 if (check_pkt(data, data_end, MARK_XMIT)) in xdp_count_pkts() 105 void *data_end = (void *)(long)skb->data_end; in tc_count_pkts() local 107 if (check_pkt(data, data_end, MARK_SKB)) in tc_count_pkts()
|
D | test_btf_skc_cls_ingress.c | 33 void *data_end; in test_syncookie_helper() local 35 data_end = (void *)(long)(skb->data_end); in test_syncookie_helper() 42 if ((void *)th + 40 > data_end) { in test_syncookie_helper() 75 void *data_end; in handle_ip6_tcp() local 77 data_end = (void *)(long)(skb->data_end); in handle_ip6_tcp() 80 if (th + 1 > data_end) in handle_ip6_tcp() 89 if ((void *)tuple + tuple_len > data_end) { in handle_ip6_tcp() 153 void *data_end; in cls_ingress() local 155 data_end = (void *)(long)(skb->data_end); in cls_ingress() 158 if (eth + 1 > data_end) in cls_ingress() [all …]
|
D | vrf_socket_lookup.c | 18 static void socket_lookup(void *ctx, void *data_end, void *data) in socket_lookup() argument 26 if (eth + 1 > data_end) in socket_lookup() 33 if (iph + 1 > data_end) in socket_lookup() 38 if ((void *)tp + tplen > data_end) in socket_lookup() 66 void *data_end = (void *)(long)skb->data_end; in tc_socket_lookup() local 72 socket_lookup(skb, data_end, data); in tc_socket_lookup() 79 void *data_end = (void *)(long)xdp->data_end; in xdp_socket_lookup() local 85 socket_lookup(xdp, data_end, data); in xdp_socket_lookup()
|
D | verifier_helper_packet_access.c | 36 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in test1_valid_packet_ptr_range() 84 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in to_packet_test3_variable_add() 109 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in packet_ptr_with_bad_range_1() 133 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in ptr_with_too_short_range_1() 158 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in cls_valid_packet_ptr_range() 206 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in packet_test8_cls_variable_add() 231 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in packet_ptr_with_bad_range_2() 255 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in ptr_with_too_short_range_2() 280 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in test11_cls_unsuitable_helper_1() 303 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) in test12_cls_unsuitable_helper_2() [all …]
|
D | test_tc_neigh_fib.c | 27 void *data_end = ctx_ptr(skb->data_end); in fill_fib_params_v4() local 31 if (data + sizeof(struct ethhdr) > data_end) in fill_fib_params_v4() 35 if ((void *)(ip4h + 1) > data_end) in fill_fib_params_v4() 55 void *data_end = ctx_ptr(skb->data_end); in fill_fib_params_v6() local 59 if (data + sizeof(struct ethhdr) > data_end) in fill_fib_params_v6() 63 if ((void *)(ip6h + 1) > data_end) in fill_fib_params_v6() 81 void *data_end = ctx_ptr(skb->data_end); in tc_chk() local 85 if (data + sizeof(struct ethhdr) > data_end) in tc_chk() 128 void *data_end = ctx_ptr(skb->data_end); in tc_redir() local 131 if (eth + 1 > data_end) in tc_redir()
|
D | test_xdp_vlan.c | 57 bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt) in parse_eth_frame() argument 64 if ((void *)eth + offset + (2*sizeof(struct _vlan_hdr)) > data_end) in parse_eth_frame() 108 void *data_end = (void *)(long)ctx->data_end; in xdp_prognum0() local 112 if (!parse_eth_frame(data, data_end, &pkt)) in xdp_prognum0() 150 void *data_end = (void *)(long)ctx->data_end; in xdp_prognum1() local 154 if (!parse_eth_frame(data, data_end, &pkt)) in xdp_prognum1() 184 void *data_end = (void *)(long)ctx->data_end; in xdp_prognum2() local 189 if (!parse_eth_frame(data, data_end, &pkt)) in xdp_prognum2() 230 void *data_end = (void *)(long)ctx->data_end; in xdp_prognum3() local 235 if (!parse_eth_frame(orig_eth, data_end, &pkt)) in xdp_prognum3()
|
D | test_tc_neigh.c | 42 void *data_end = ctx_ptr(skb->data_end); in is_remote_ep_v4() local 46 if (data + sizeof(struct ethhdr) > data_end) in is_remote_ep_v4() 50 if ((void *)(ip4h + 1) > data_end) in is_remote_ep_v4() 59 void *data_end = ctx_ptr(skb->data_end); in is_remote_ep_v6() local 63 if (data + sizeof(struct ethhdr) > data_end) in is_remote_ep_v6() 67 if ((void *)(ip6h + 1) > data_end) in is_remote_ep_v6() 76 void *data_end = ctx_ptr(skb->data_end); in tc_chk() local 80 if (data + sizeof(struct ethhdr) > data_end) in tc_chk()
|
D | test_tcp_check_syncookie_kern.c | 26 static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk, in gen_syncookie() argument 37 if ((void *)tcph + thlen > data_end) in gen_syncookie() 46 void *data_end) in check_syncookie() argument 61 if (ethh + 1 > data_end) in check_syncookie() 67 if (ipv4h + 1 > data_end) in check_syncookie() 74 if (tcph + 1 > data_end) in check_syncookie() 90 seq_mss = gen_syncookie(data_end, sk, ipv4h, sizeof(*ipv4h), in check_syncookie() 99 if (ipv6h + 1 > data_end) in check_syncookie() 106 if (tcph + 1 > data_end) in check_syncookie() 122 seq_mss = gen_syncookie(data_end, sk, ipv6h, sizeof(*ipv6h), in check_syncookie() [all …]
|
D | verifier_xdp_direct_packet_access.c | 25 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in end_mangling_bad_access_1() 46 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in end_mangling_bad_access_2() 66 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 87 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 108 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 128 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 149 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 170 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 192 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() 213 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) in __flag() [all …]
|
D | test_pkt_access.c | 88 void *data_end = (void *)(long)skb->data_end; in test_pkt_write_access_subprog() local 95 if (tcp + 1 > data_end) in test_pkt_write_access_subprog() 105 void *data_end = (void *)(long)skb->data_end; in test_pkt_access() local 112 if (eth + 1 > data_end) in test_pkt_access() 118 if (iph + 1 > data_end) in test_pkt_access() 126 if (ip6h + 1 > data_end) in test_pkt_access() 142 if (((void *)(tcp) + 20) > data_end || proto != 6) in test_pkt_access() 145 if (((void *)(tcp) + 18) > data_end) in test_pkt_access()
|
D | test_xdp_meta.c | 14 __u8 *data, *data_meta, *data_end; in ing_cls() local 18 data_end = ctx_ptr(ctx, data_end); in ing_cls() 21 if (data + ETH_ALEN > data_end || in ing_cls() 34 __u8 *data, *data_meta, *data_end; in ing_xdp() local 42 data_end = ctx_ptr(ctx, data_end); in ing_xdp() 45 if (data + ETH_ALEN > data_end || in ing_xdp()
|
D | sockmap_parse_prog.c | 8 void *data_end = (void *)(long) skb->data_end; in bpf_prog1() local 13 if (data + 10 > data_end) { in bpf_prog1() 18 data_end = (void *)(long)skb->data_end; in bpf_prog1() 20 if (data + 10 > data_end) in bpf_prog1()
|
D | test_parse_tcp_hdr_opt.c | 39 const void *data_end = (void *)(long)xdp->data_end; in parse_hdr_opt() local 43 if (tcp_opt + 1 > data_end) in parse_hdr_opt() 58 tcp_opt + sizeof(__u8) + sizeof(__u8) > data_end) in parse_hdr_opt() 69 if (tcp_opt + tcp_hdr_opt_len_tpr > data_end) in parse_hdr_opt() 85 const void *data_end = (void *)(long)xdp->data_end; in xdp_ingress_v6() local 94 if (tcp_hdr + 1 > data_end) in xdp_ingress_v6()
|
D | test_tc_edt.c | 67 void *data_end = (void *)(long)skb->data_end; in handle_tcp() local 70 if ((void *)(tcp + 1) > data_end) in handle_tcp() 81 void *data_end = (void *)(long)skb->data_end; in handle_ipv4() local 87 if (data + sizeof(struct ethhdr) > data_end) in handle_ipv4() 90 if ((void *)(iph + 1) > data_end) in handle_ipv4() 93 if (((void *)iph) + ihl > data_end) in handle_ipv4()
|
D | test_check_mtu.c | 61 void *data_end = (void *)(long)ctx->data_end; in xdp_exceed_mtu() local 64 __u32 data_len = data_end - data; in xdp_exceed_mtu() 88 void *data_end = (void *)(long)ctx->data_end; in xdp_minus_delta() local 91 __u32 data_len = data_end - data; in xdp_minus_delta() 112 void *data_end = (void *)(long)ctx->data_end; in xdp_input_len() local 115 __u32 data_len = data_end - data; in xdp_input_len() 203 void *data_end = (void *)(long)ctx->data_end; in tc_exceed_mtu_da() local 206 __u32 data_len = data_end - data; in tc_exceed_mtu_da()
|
D | test_assign_reuse.c | 39 if (ctx->data + sizeof(headers.tcp) > ctx->data_end) in reuse_accept() 45 if (ctx->data + sizeof(headers.udp) > ctx->data_end) in reuse_accept() 83 if (th + 1 > (void *)(long)(skb->data_end)) in maybe_assign_tcp() 96 if (uh + 1 > (void *)(long)(skb->data_end)) in maybe_assign_udp() 109 void *data_end = (void *)(long)skb->data_end; in tc_main() local 114 if (eth + 1 > data_end) in tc_main() 120 if (iph + 1 > data_end) in tc_main() 132 if (ip6h + 1 > data_end) in tc_main()
|
/linux-6.6.21/tools/testing/selftests/net/ |
D | nat6to4.c | 53 const void *data_end = (void *)(long)skb->data_end; in sched_cls_ingress6_nat_6_prog() local 66 if (data + l2_header_size + sizeof(*ip6) > data_end) in sched_cls_ingress6_nat_6_prog() 135 data_end = (void *)(long)skb->data_end; in sched_cls_ingress6_nat_6_prog() 136 if (data + l2_header_size + sizeof(struct iphdr) > data_end) in sched_cls_ingress6_nat_6_prog() 154 const void *data_end = (void *)(long)skb->data_end; in sched_cls_egress4_snat4_prog() local 163 if (data + l2_header_size + sizeof(struct ipv6hdr) > data_end) in sched_cls_egress4_snat4_prog() 210 if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end) in sched_cls_egress4_snat4_prog() 269 data_end = (void *)(long)skb->data_end; in sched_cls_egress4_snat4_prog() 273 if (data + l2_header_size + sizeof(ip6) > data_end) in sched_cls_egress4_snat4_prog()
|