1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017-2018 Covalent IO, Inc. http://covalent.io */
3 #include <stddef.h>
4 #include <string.h>
5 #include <linux/bpf.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_packet.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/in.h>
11 #include <linux/udp.h>
12 #include <linux/tcp.h>
13 #include <linux/pkt_cls.h>
14 #include <sys/socket.h>
15 #include <bpf/bpf_helpers.h>
16 #include <bpf/bpf_endian.h>
17
18 /* Sockmap sample program connects a client and a backend together
19 * using cgroups.
20 *
21 * client:X <---> frontend:80 client:X <---> backend:80
22 *
23 * For simplicity we hard code values here and bind 1:1. The hard
24 * coded values are part of the setup in sockmap.sh script that
25 * is associated with this BPF program.
26 *
27 * The bpf_printk is verbose and prints information as connections
28 * are established and verdicts are decided.
29 */
30
31 struct {
32 __uint(type, TEST_MAP_TYPE);
33 __uint(max_entries, 20);
34 __uint(key_size, sizeof(int));
35 __uint(value_size, sizeof(int));
36 } sock_map SEC(".maps");
37
38 struct {
39 __uint(type, TEST_MAP_TYPE);
40 __uint(max_entries, 20);
41 __uint(key_size, sizeof(int));
42 __uint(value_size, sizeof(int));
43 } sock_map_txmsg SEC(".maps");
44
45 struct {
46 __uint(type, TEST_MAP_TYPE);
47 __uint(max_entries, 20);
48 __uint(key_size, sizeof(int));
49 __uint(value_size, sizeof(int));
50 } sock_map_redir SEC(".maps");
51
52 struct {
53 __uint(type, BPF_MAP_TYPE_ARRAY);
54 __uint(max_entries, 1);
55 __type(key, int);
56 __type(value, int);
57 } sock_apply_bytes SEC(".maps");
58
59 struct {
60 __uint(type, BPF_MAP_TYPE_ARRAY);
61 __uint(max_entries, 1);
62 __type(key, int);
63 __type(value, int);
64 } sock_cork_bytes SEC(".maps");
65
66 struct {
67 __uint(type, BPF_MAP_TYPE_ARRAY);
68 __uint(max_entries, 6);
69 __type(key, int);
70 __type(value, int);
71 } sock_bytes SEC(".maps");
72
73 struct {
74 __uint(type, BPF_MAP_TYPE_ARRAY);
75 __uint(max_entries, 1);
76 __type(key, int);
77 __type(value, int);
78 } sock_redir_flags SEC(".maps");
79
80 struct {
81 __uint(type, BPF_MAP_TYPE_ARRAY);
82 __uint(max_entries, 3);
83 __type(key, int);
84 __type(value, int);
85 } sock_skb_opts SEC(".maps");
86
87 struct {
88 __uint(type, TEST_MAP_TYPE);
89 __uint(max_entries, 20);
90 __uint(key_size, sizeof(int));
91 __uint(value_size, sizeof(int));
92 } tls_sock_map SEC(".maps");
93
94 SEC("sk_skb1")
bpf_prog1(struct __sk_buff * skb)95 int bpf_prog1(struct __sk_buff *skb)
96 {
97 int *f, two = 2;
98
99 f = bpf_map_lookup_elem(&sock_skb_opts, &two);
100 if (f && *f) {
101 return *f;
102 }
103 return skb->len;
104 }
105
106 SEC("sk_skb2")
bpf_prog2(struct __sk_buff * skb)107 int bpf_prog2(struct __sk_buff *skb)
108 {
109 __u32 lport = skb->local_port;
110 __u32 rport = skb->remote_port;
111 int len, *f, ret, zero = 0;
112 __u64 flags = 0;
113
114 if (lport == 10000)
115 ret = 10;
116 else
117 ret = 1;
118
119 len = (__u32)skb->data_end - (__u32)skb->data;
120 f = bpf_map_lookup_elem(&sock_skb_opts, &zero);
121 if (f && *f) {
122 ret = 3;
123 flags = *f;
124 }
125
126 #ifdef SOCKMAP
127 return bpf_sk_redirect_map(skb, &sock_map, ret, flags);
128 #else
129 return bpf_sk_redirect_hash(skb, &sock_map, &ret, flags);
130 #endif
131
132 }
133
bpf_write_pass(struct __sk_buff * skb,int offset)134 static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
135 {
136 int err = bpf_skb_pull_data(skb, 6 + offset);
137 void *data_end;
138 char *c;
139
140 if (err)
141 return;
142
143 c = (char *)(long)skb->data;
144 data_end = (void *)(long)skb->data_end;
145
146 if (c + 5 + offset < data_end)
147 memcpy(c + offset, "PASS", 4);
148 }
149
150 SEC("sk_skb3")
bpf_prog3(struct __sk_buff * skb)151 int bpf_prog3(struct __sk_buff *skb)
152 {
153 int err, *f, ret = SK_PASS;
154 const int one = 1;
155
156 f = bpf_map_lookup_elem(&sock_skb_opts, &one);
157 if (f && *f) {
158 __u64 flags = 0;
159
160 ret = 0;
161 flags = *f;
162
163 err = bpf_skb_adjust_room(skb, -13, 0, 0);
164 if (err)
165 return SK_DROP;
166 err = bpf_skb_adjust_room(skb, 4, 0, 0);
167 if (err)
168 return SK_DROP;
169 bpf_write_pass(skb, 0);
170 #ifdef SOCKMAP
171 return bpf_sk_redirect_map(skb, &tls_sock_map, ret, flags);
172 #else
173 return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
174 #endif
175 }
176 f = bpf_map_lookup_elem(&sock_skb_opts, &one);
177 if (f && *f)
178 ret = SK_DROP;
179 err = bpf_skb_adjust_room(skb, 4, 0, 0);
180 if (err)
181 return SK_DROP;
182 bpf_write_pass(skb, 13);
183 tls_out:
184 return ret;
185 }
186
187 SEC("sockops")
bpf_sockmap(struct bpf_sock_ops * skops)188 int bpf_sockmap(struct bpf_sock_ops *skops)
189 {
190 __u32 lport, rport;
191 int op, err = 0, index, key, ret;
192
193
194 op = (int) skops->op;
195
196 switch (op) {
197 case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
198 lport = skops->local_port;
199 rport = skops->remote_port;
200
201 if (lport == 10000) {
202 ret = 1;
203 #ifdef SOCKMAP
204 err = bpf_sock_map_update(skops, &sock_map, &ret,
205 BPF_NOEXIST);
206 #else
207 err = bpf_sock_hash_update(skops, &sock_map, &ret,
208 BPF_NOEXIST);
209 #endif
210 }
211 break;
212 case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
213 lport = skops->local_port;
214 rport = skops->remote_port;
215
216 if (bpf_ntohl(rport) == 10001) {
217 ret = 10;
218 #ifdef SOCKMAP
219 err = bpf_sock_map_update(skops, &sock_map, &ret,
220 BPF_NOEXIST);
221 #else
222 err = bpf_sock_hash_update(skops, &sock_map, &ret,
223 BPF_NOEXIST);
224 #endif
225 }
226 break;
227 default:
228 break;
229 }
230
231 return 0;
232 }
233
234 SEC("sk_msg1")
bpf_prog4(struct sk_msg_md * msg)235 int bpf_prog4(struct sk_msg_md *msg)
236 {
237 int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
238 int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
239
240 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
241 if (bytes)
242 bpf_msg_apply_bytes(msg, *bytes);
243 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
244 if (bytes)
245 bpf_msg_cork_bytes(msg, *bytes);
246 start = bpf_map_lookup_elem(&sock_bytes, &zero);
247 end = bpf_map_lookup_elem(&sock_bytes, &one);
248 if (start && end)
249 bpf_msg_pull_data(msg, *start, *end, 0);
250 start_push = bpf_map_lookup_elem(&sock_bytes, &two);
251 end_push = bpf_map_lookup_elem(&sock_bytes, &three);
252 if (start_push && end_push) {
253 err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
254 if (err)
255 return SK_DROP;
256 }
257 start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
258 pop = bpf_map_lookup_elem(&sock_bytes, &five);
259 if (start_pop && pop)
260 bpf_msg_pop_data(msg, *start_pop, *pop, 0);
261 return SK_PASS;
262 }
263
264 SEC("sk_msg2")
bpf_prog6(struct sk_msg_md * msg)265 int bpf_prog6(struct sk_msg_md *msg)
266 {
267 int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
268 int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
269 int err = 0;
270 __u64 flags = 0;
271
272 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
273 if (bytes)
274 bpf_msg_apply_bytes(msg, *bytes);
275 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
276 if (bytes)
277 bpf_msg_cork_bytes(msg, *bytes);
278
279 start = bpf_map_lookup_elem(&sock_bytes, &zero);
280 end = bpf_map_lookup_elem(&sock_bytes, &one);
281 if (start && end)
282 bpf_msg_pull_data(msg, *start, *end, 0);
283
284 start_push = bpf_map_lookup_elem(&sock_bytes, &two);
285 end_push = bpf_map_lookup_elem(&sock_bytes, &three);
286 if (start_push && end_push) {
287 err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
288 if (err)
289 return SK_DROP;
290 }
291
292 start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
293 pop = bpf_map_lookup_elem(&sock_bytes, &five);
294 if (start_pop && pop)
295 bpf_msg_pop_data(msg, *start_pop, *pop, 0);
296
297 f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
298 if (f && *f) {
299 key = 2;
300 flags = *f;
301 }
302 #ifdef SOCKMAP
303 return bpf_msg_redirect_map(msg, &sock_map_redir, key, flags);
304 #else
305 return bpf_msg_redirect_hash(msg, &sock_map_redir, &key, flags);
306 #endif
307 }
308
309 SEC("sk_msg3")
bpf_prog8(struct sk_msg_md * msg)310 int bpf_prog8(struct sk_msg_md *msg)
311 {
312 void *data_end = (void *)(long) msg->data_end;
313 void *data = (void *)(long) msg->data;
314 int ret = 0, *bytes, zero = 0;
315
316 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
317 if (bytes) {
318 ret = bpf_msg_apply_bytes(msg, *bytes);
319 if (ret)
320 return SK_DROP;
321 } else {
322 return SK_DROP;
323 }
324 return SK_PASS;
325 }
326 SEC("sk_msg4")
bpf_prog9(struct sk_msg_md * msg)327 int bpf_prog9(struct sk_msg_md *msg)
328 {
329 void *data_end = (void *)(long) msg->data_end;
330 void *data = (void *)(long) msg->data;
331 int ret = 0, *bytes, zero = 0;
332
333 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
334 if (bytes) {
335 if (((__u64)data_end - (__u64)data) >= *bytes)
336 return SK_PASS;
337 ret = bpf_msg_cork_bytes(msg, *bytes);
338 if (ret)
339 return SK_DROP;
340 }
341 return SK_PASS;
342 }
343
344 SEC("sk_msg5")
bpf_prog10(struct sk_msg_md * msg)345 int bpf_prog10(struct sk_msg_md *msg)
346 {
347 int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
348 int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
349
350 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
351 if (bytes)
352 bpf_msg_apply_bytes(msg, *bytes);
353 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
354 if (bytes)
355 bpf_msg_cork_bytes(msg, *bytes);
356 start = bpf_map_lookup_elem(&sock_bytes, &zero);
357 end = bpf_map_lookup_elem(&sock_bytes, &one);
358 if (start && end)
359 bpf_msg_pull_data(msg, *start, *end, 0);
360 start_push = bpf_map_lookup_elem(&sock_bytes, &two);
361 end_push = bpf_map_lookup_elem(&sock_bytes, &three);
362 if (start_push && end_push) {
363 err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
364 if (err)
365 return SK_PASS;
366 }
367 start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
368 pop = bpf_map_lookup_elem(&sock_bytes, &five);
369 if (start_pop && pop)
370 bpf_msg_pop_data(msg, *start_pop, *pop, 0);
371 return SK_DROP;
372 }
373
374 char _license[] SEC("license") = "GPL";
375