1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Check if we can migrate child sockets.
4 *
5 * 1. If reuse_md->migrating_sk is NULL (SYN packet),
6 * return SK_PASS without selecting a listener.
7 * 2. If reuse_md->migrating_sk is not NULL (socket migration),
8 * select a listener (reuseport_map[migrate_map[cookie]])
9 *
10 * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
11 */
12
13 #include <stddef.h>
14 #include <string.h>
15 #include <linux/bpf.h>
16 #include <linux/if_ether.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/tcp.h>
20 #include <linux/in.h>
21 #include <bpf/bpf_endian.h>
22 #include <bpf/bpf_helpers.h>
23
24 struct {
25 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
26 __uint(max_entries, 256);
27 __type(key, int);
28 __type(value, __u64);
29 } reuseport_map SEC(".maps");
30
31 struct {
32 __uint(type, BPF_MAP_TYPE_HASH);
33 __uint(max_entries, 256);
34 __type(key, __u64);
35 __type(value, int);
36 } migrate_map SEC(".maps");
37
38 int migrated_at_close = 0;
39 int migrated_at_close_fastopen = 0;
40 int migrated_at_send_synack = 0;
41 int migrated_at_recv_ack = 0;
42 __be16 server_port;
43
44 SEC("xdp")
drop_ack(struct xdp_md * xdp)45 int drop_ack(struct xdp_md *xdp)
46 {
47 void *data_end = (void *)(long)xdp->data_end;
48 void *data = (void *)(long)xdp->data;
49 struct ethhdr *eth = data;
50 struct tcphdr *tcp = NULL;
51
52 if (eth + 1 > data_end)
53 goto pass;
54
55 switch (bpf_ntohs(eth->h_proto)) {
56 case ETH_P_IP: {
57 struct iphdr *ip = (struct iphdr *)(eth + 1);
58
59 if (ip + 1 > data_end)
60 goto pass;
61
62 if (ip->protocol != IPPROTO_TCP)
63 goto pass;
64
65 tcp = (struct tcphdr *)((void *)ip + ip->ihl * 4);
66 break;
67 }
68 case ETH_P_IPV6: {
69 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(eth + 1);
70
71 if (ipv6 + 1 > data_end)
72 goto pass;
73
74 if (ipv6->nexthdr != IPPROTO_TCP)
75 goto pass;
76
77 tcp = (struct tcphdr *)(ipv6 + 1);
78 break;
79 }
80 default:
81 goto pass;
82 }
83
84 if (tcp + 1 > data_end)
85 goto pass;
86
87 if (tcp->dest != server_port)
88 goto pass;
89
90 if (!tcp->syn && tcp->ack)
91 return XDP_DROP;
92
93 pass:
94 return XDP_PASS;
95 }
96
97 SEC("sk_reuseport/migrate")
migrate_reuseport(struct sk_reuseport_md * reuse_md)98 int migrate_reuseport(struct sk_reuseport_md *reuse_md)
99 {
100 int *key, flags = 0, state, err;
101 __u64 cookie;
102
103 if (!reuse_md->migrating_sk)
104 return SK_PASS;
105
106 state = reuse_md->migrating_sk->state;
107 cookie = bpf_get_socket_cookie(reuse_md->sk);
108
109 key = bpf_map_lookup_elem(&migrate_map, &cookie);
110 if (!key)
111 return SK_DROP;
112
113 err = bpf_sk_select_reuseport(reuse_md, &reuseport_map, key, flags);
114 if (err)
115 return SK_PASS;
116
117 switch (state) {
118 case BPF_TCP_ESTABLISHED:
119 __sync_fetch_and_add(&migrated_at_close, 1);
120 break;
121 case BPF_TCP_SYN_RECV:
122 __sync_fetch_and_add(&migrated_at_close_fastopen, 1);
123 break;
124 case BPF_TCP_NEW_SYN_RECV:
125 if (!reuse_md->len)
126 __sync_fetch_and_add(&migrated_at_send_synack, 1);
127 else
128 __sync_fetch_and_add(&migrated_at_recv_ack, 1);
129 break;
130 }
131
132 return SK_PASS;
133 }
134
135 char _license[] SEC("license") = "GPL";
136