1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include "vmlinux.h"
4
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include <bpf/bpf_core_read.h>
8
9 #define ATTR __always_inline
10 #include "test_jhash.h"
11
12 struct {
13 __uint(type, BPF_MAP_TYPE_ARRAY);
14 __type(key, u32);
15 __type(value, u32);
16 __uint(max_entries, 256);
17 } array1 SEC(".maps");
18
19 struct {
20 __uint(type, BPF_MAP_TYPE_ARRAY);
21 __type(key, u32);
22 __type(value, u32);
23 __uint(max_entries, 256);
24 } array2 SEC(".maps");
25
randmap(int v,const struct net_device * dev)26 static __noinline int randmap(int v, const struct net_device *dev)
27 {
28 struct bpf_map *map = (struct bpf_map *)&array1;
29 int key = bpf_get_prandom_u32() & 0xff;
30 int *val;
31
32 if (bpf_get_prandom_u32() & 1)
33 map = (struct bpf_map *)&array2;
34
35 val = bpf_map_lookup_elem(map, &key);
36 if (val)
37 *val = bpf_get_prandom_u32() + v + dev->mtu;
38
39 return 0;
40 }
41
42 SEC("tp_btf/xdp_devmap_xmit")
BPF_PROG(tp_xdp_devmap_xmit_multi,const struct net_device * from_dev,const struct net_device * to_dev,int sent,int drops,int err)43 int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device
44 *from_dev, const struct net_device *to_dev, int sent, int drops,
45 int err)
46 {
47 return randmap(from_dev->ifindex, from_dev);
48 }
49
50 SEC("fentry/eth_type_trans")
BPF_PROG(fentry_eth_type_trans,struct sk_buff * skb,struct net_device * dev,unsigned short protocol)51 int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb,
52 struct net_device *dev, unsigned short protocol)
53 {
54 return randmap(dev->ifindex + skb->len, dev);
55 }
56
57 SEC("fexit/eth_type_trans")
BPF_PROG(fexit_eth_type_trans,struct sk_buff * skb,struct net_device * dev,unsigned short protocol)58 int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb,
59 struct net_device *dev, unsigned short protocol)
60 {
61 return randmap(dev->ifindex + skb->len, dev);
62 }
63
64 volatile const int never;
65
66 struct __sk_bUfF /* it will not exist in vmlinux */ {
67 int len;
68 } __attribute__((preserve_access_index));
69
70 struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ {
71 size_t len;
72 } __attribute__((preserve_access_index));
73
74 SEC("tc")
balancer_ingress(struct __sk_buff * ctx)75 int balancer_ingress(struct __sk_buff *ctx)
76 {
77 void *data_end = (void *)(long)ctx->data_end;
78 void *data = (void *)(long)ctx->data;
79 void *ptr;
80 int nh_off, i = 0;
81
82 nh_off = 14;
83
84 /* pragma unroll doesn't work on large loops */
85 #define C do { \
86 ptr = data + i; \
87 if (ptr + nh_off > data_end) \
88 break; \
89 ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
90 if (never) { \
91 /* below is a dead code with unresolvable CO-RE relo */ \
92 i += ((struct __sk_bUfF *)ctx)->len; \
93 /* this CO-RE relo may or may not resolve
94 * depending on whether bpf_testmod is loaded.
95 */ \
96 i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \
97 } \
98 } while (0);
99 #define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
100 C30;C30;C30; /* 90 calls */
101 return 0;
102 }
103
104 typedef int (*func_proto_typedef___match)(long);
105 typedef int (*func_proto_typedef___doesnt_match)(char *);
106 typedef int (*func_proto_typedef_nested1)(func_proto_typedef___match);
107
108 int proto_out[3];
109
110 SEC("raw_tracepoint/sys_enter")
core_relo_proto(void * ctx)111 int core_relo_proto(void *ctx)
112 {
113 proto_out[0] = bpf_core_type_exists(func_proto_typedef___match);
114 proto_out[1] = bpf_core_type_exists(func_proto_typedef___doesnt_match);
115 proto_out[2] = bpf_core_type_exists(func_proto_typedef_nested1);
116
117 return 0;
118 }
119
120 char LICENSE[] SEC("license") = "GPL";
121