1 /* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7 #include <linux/skbuff.h>
8 #include <linux/netdevice.h>
9 #include <linux/version.h>
10 #include <uapi/linux/bpf.h>
11 #include <bpf/bpf_helpers.h>
12 #include <bpf/bpf_tracing.h>
13 #include <bpf/bpf_core_read.h>
14 #include "trace_common.h"
15
16 #define MAX_ENTRIES 1000
17 #define MAX_NR_CPUS 1024
18
19 struct {
20 __uint(type, BPF_MAP_TYPE_HASH);
21 __type(key, u32);
22 __type(value, long);
23 __uint(max_entries, MAX_ENTRIES);
24 } hash_map SEC(".maps");
25
26 struct {
27 __uint(type, BPF_MAP_TYPE_LRU_HASH);
28 __type(key, u32);
29 __type(value, long);
30 __uint(max_entries, 10000);
31 } lru_hash_map SEC(".maps");
32
33 struct {
34 __uint(type, BPF_MAP_TYPE_LRU_HASH);
35 __type(key, u32);
36 __type(value, long);
37 __uint(max_entries, 10000);
38 __uint(map_flags, BPF_F_NO_COMMON_LRU);
39 } nocommon_lru_hash_map SEC(".maps");
40
41 struct inner_lru {
42 __uint(type, BPF_MAP_TYPE_LRU_HASH);
43 __type(key, u32);
44 __type(value, long);
45 __uint(max_entries, MAX_ENTRIES);
46 __uint(map_flags, BPF_F_NUMA_NODE);
47 __uint(numa_node, 0);
48 } inner_lru_hash_map SEC(".maps");
49
50 struct {
51 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
52 __uint(max_entries, MAX_NR_CPUS);
53 __uint(key_size, sizeof(u32));
54 __array(values, struct inner_lru); /* use inner_lru as inner map */
55 } array_of_lru_hashs SEC(".maps") = {
56 /* statically initialize the first element */
57 .values = { &inner_lru_hash_map },
58 };
59
60 struct {
61 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
62 __uint(key_size, sizeof(u32));
63 __uint(value_size, sizeof(long));
64 __uint(max_entries, MAX_ENTRIES);
65 } percpu_hash_map SEC(".maps");
66
67 struct {
68 __uint(type, BPF_MAP_TYPE_HASH);
69 __type(key, u32);
70 __type(value, long);
71 __uint(max_entries, MAX_ENTRIES);
72 __uint(map_flags, BPF_F_NO_PREALLOC);
73 } hash_map_alloc SEC(".maps");
74
75 struct {
76 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
77 __uint(key_size, sizeof(u32));
78 __uint(value_size, sizeof(long));
79 __uint(max_entries, MAX_ENTRIES);
80 __uint(map_flags, BPF_F_NO_PREALLOC);
81 } percpu_hash_map_alloc SEC(".maps");
82
83 struct {
84 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
85 __uint(key_size, 8);
86 __uint(value_size, sizeof(long));
87 __uint(max_entries, 10000);
88 __uint(map_flags, BPF_F_NO_PREALLOC);
89 } lpm_trie_map_alloc SEC(".maps");
90
91 struct {
92 __uint(type, BPF_MAP_TYPE_ARRAY);
93 __type(key, u32);
94 __type(value, long);
95 __uint(max_entries, MAX_ENTRIES);
96 } array_map SEC(".maps");
97
98 struct {
99 __uint(type, BPF_MAP_TYPE_LRU_HASH);
100 __type(key, u32);
101 __type(value, long);
102 __uint(max_entries, MAX_ENTRIES);
103 } lru_hash_lookup_map SEC(".maps");
104
SYSCALL(sys_getuid)105 SEC("kprobe/" SYSCALL(sys_getuid))
106 int stress_hmap(struct pt_regs *ctx)
107 {
108 u32 key = bpf_get_current_pid_tgid();
109 long init_val = 1;
110 long *value;
111 int i;
112
113 for (i = 0; i < 10; i++) {
114 bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
115 value = bpf_map_lookup_elem(&hash_map, &key);
116 if (value)
117 bpf_map_delete_elem(&hash_map, &key);
118 }
119
120 return 0;
121 }
122
SYSCALL(sys_geteuid)123 SEC("kprobe/" SYSCALL(sys_geteuid))
124 int stress_percpu_hmap(struct pt_regs *ctx)
125 {
126 u32 key = bpf_get_current_pid_tgid();
127 long init_val = 1;
128 long *value;
129 int i;
130
131 for (i = 0; i < 10; i++) {
132 bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
133 value = bpf_map_lookup_elem(&percpu_hash_map, &key);
134 if (value)
135 bpf_map_delete_elem(&percpu_hash_map, &key);
136 }
137 return 0;
138 }
139
SYSCALL(sys_getgid)140 SEC("kprobe/" SYSCALL(sys_getgid))
141 int stress_hmap_alloc(struct pt_regs *ctx)
142 {
143 u32 key = bpf_get_current_pid_tgid();
144 long init_val = 1;
145 long *value;
146 int i;
147
148 for (i = 0; i < 10; i++) {
149 bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
150 value = bpf_map_lookup_elem(&hash_map_alloc, &key);
151 if (value)
152 bpf_map_delete_elem(&hash_map_alloc, &key);
153 }
154 return 0;
155 }
156
SYSCALL(sys_getegid)157 SEC("kprobe/" SYSCALL(sys_getegid))
158 int stress_percpu_hmap_alloc(struct pt_regs *ctx)
159 {
160 u32 key = bpf_get_current_pid_tgid();
161 long init_val = 1;
162 long *value;
163 int i;
164
165 for (i = 0; i < 10; i++) {
166 bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
167 value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
168 if (value)
169 bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
170 }
171 return 0;
172 }
173
SYSCALL(sys_connect)174 SEC("kprobe/" SYSCALL(sys_connect))
175 int stress_lru_hmap_alloc(struct pt_regs *ctx)
176 {
177 struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
178 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
179 union {
180 u16 dst6[8];
181 struct {
182 u16 magic0;
183 u16 magic1;
184 u16 tcase;
185 u16 unused16;
186 u32 unused32;
187 u32 key;
188 };
189 } test_params;
190 struct sockaddr_in6 *in6;
191 u16 test_case;
192 int addrlen, ret;
193 long val = 1;
194 u32 key = 0;
195
196 in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
197 addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
198
199 if (addrlen != sizeof(*in6))
200 return 0;
201
202 ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
203 &in6->sin6_addr);
204 if (ret)
205 goto done;
206
207 if (test_params.magic0 != 0xdead ||
208 test_params.magic1 != 0xbeef)
209 return 0;
210
211 test_case = test_params.tcase;
212 if (test_case != 3)
213 key = bpf_get_prandom_u32();
214
215 if (test_case == 0) {
216 ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
217 } else if (test_case == 1) {
218 ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
219 BPF_ANY);
220 } else if (test_case == 2) {
221 void *nolocal_lru_map;
222 int cpu = bpf_get_smp_processor_id();
223
224 nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
225 &cpu);
226 if (!nolocal_lru_map) {
227 ret = -ENOENT;
228 goto done;
229 }
230
231 ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
232 BPF_ANY);
233 } else if (test_case == 3) {
234 u32 i;
235
236 key = test_params.key;
237
238 #pragma clang loop unroll(full)
239 for (i = 0; i < 32; i++) {
240 bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
241 key++;
242 }
243 } else {
244 ret = -EINVAL;
245 }
246
247 done:
248 if (ret)
249 bpf_trace_printk(fmt, sizeof(fmt), ret);
250
251 return 0;
252 }
253
SYSCALL(sys_gettid)254 SEC("kprobe/" SYSCALL(sys_gettid))
255 int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
256 {
257 union {
258 u32 b32[2];
259 u8 b8[8];
260 } key;
261 unsigned int i;
262
263 key.b32[0] = 32;
264 key.b8[4] = 192;
265 key.b8[5] = 168;
266 key.b8[6] = 0;
267 key.b8[7] = 1;
268
269 #pragma clang loop unroll(full)
270 for (i = 0; i < 32; ++i)
271 bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
272
273 return 0;
274 }
275
SYSCALL(sys_getpgid)276 SEC("kprobe/" SYSCALL(sys_getpgid))
277 int stress_hash_map_lookup(struct pt_regs *ctx)
278 {
279 u32 key = 1, i;
280 long *value;
281
282 #pragma clang loop unroll(full)
283 for (i = 0; i < 64; ++i)
284 value = bpf_map_lookup_elem(&hash_map, &key);
285
286 return 0;
287 }
288
SYSCALL(sys_getppid)289 SEC("kprobe/" SYSCALL(sys_getppid))
290 int stress_array_map_lookup(struct pt_regs *ctx)
291 {
292 u32 key = 1, i;
293 long *value;
294
295 #pragma clang loop unroll(full)
296 for (i = 0; i < 64; ++i)
297 value = bpf_map_lookup_elem(&array_map, &key);
298
299 return 0;
300 }
301
302 char _license[] SEC("license") = "GPL";
303 u32 _version SEC("version") = LINUX_VERSION_CODE;
304