1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3 /*
4 * AF_XDP user-space access library.
5 *
6 * Copyright (c) 2018 - 2019 Intel Corporation.
7 * Copyright (c) 2019 Facebook
8 *
9 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12 #ifndef __XSK_H
13 #define __XSK_H
14
15 #include <stdio.h>
16 #include <stdint.h>
17 #include <stdbool.h>
18 #include <linux/if_xdp.h>
19
20 #include <bpf/libbpf.h>
21
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25
26 /* This whole API has been deprecated and moved to libxdp that can be found at
27 * https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so
28 * it should just be linking with libxdp instead of libbpf for this set of
29 * functionality. If not, please submit a bug report on the aforementioned page.
30 */
31
32 /* Load-Acquire Store-Release barriers used by the XDP socket
33 * library. The following macros should *NOT* be considered part of
34 * the xsk.h API, and is subject to change anytime.
35 *
36 * LIBRARY INTERNAL
37 */
38
39 #define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
40 #define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
41
42 #if defined(__i386__) || defined(__x86_64__)
43 # define libbpf_smp_store_release(p, v) \
44 do { \
45 asm volatile("" : : : "memory"); \
46 __XSK_WRITE_ONCE(*p, v); \
47 } while (0)
48 # define libbpf_smp_load_acquire(p) \
49 ({ \
50 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
51 asm volatile("" : : : "memory"); \
52 ___p1; \
53 })
54 #elif defined(__aarch64__)
55 # define libbpf_smp_store_release(p, v) \
56 asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
57 # define libbpf_smp_load_acquire(p) \
58 ({ \
59 typeof(*p) ___p1; \
60 asm volatile ("ldar %w0, %1" \
61 : "=r" (___p1) : "Q" (*p) : "memory"); \
62 ___p1; \
63 })
64 #elif defined(__riscv)
65 # define libbpf_smp_store_release(p, v) \
66 do { \
67 asm volatile ("fence rw,w" : : : "memory"); \
68 __XSK_WRITE_ONCE(*p, v); \
69 } while (0)
70 # define libbpf_smp_load_acquire(p) \
71 ({ \
72 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
73 asm volatile ("fence r,rw" : : : "memory"); \
74 ___p1; \
75 })
76 #endif
77
78 #ifndef libbpf_smp_store_release
79 #define libbpf_smp_store_release(p, v) \
80 do { \
81 __sync_synchronize(); \
82 __XSK_WRITE_ONCE(*p, v); \
83 } while (0)
84 #endif
85
86 #ifndef libbpf_smp_load_acquire
87 #define libbpf_smp_load_acquire(p) \
88 ({ \
89 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
90 __sync_synchronize(); \
91 ___p1; \
92 })
93 #endif
94
95 /* LIBRARY INTERNAL -- END */
96
97 /* Do not access these members directly. Use the functions below. */
98 #define DEFINE_XSK_RING(name) \
99 struct name { \
100 __u32 cached_prod; \
101 __u32 cached_cons; \
102 __u32 mask; \
103 __u32 size; \
104 __u32 *producer; \
105 __u32 *consumer; \
106 void *ring; \
107 __u32 *flags; \
108 }
109
110 DEFINE_XSK_RING(xsk_ring_prod);
111 DEFINE_XSK_RING(xsk_ring_cons);
112
113 /* For a detailed explanation on the memory barriers associated with the
114 * ring, please take a look at net/xdp/xsk_queue.h.
115 */
116
117 struct xsk_umem;
118 struct xsk_socket;
119
xsk_ring_prod__fill_addr(struct xsk_ring_prod * fill,__u32 idx)120 static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
121 __u32 idx)
122 {
123 __u64 *addrs = (__u64 *)fill->ring;
124
125 return &addrs[idx & fill->mask];
126 }
127
128 static inline const __u64 *
xsk_ring_cons__comp_addr(const struct xsk_ring_cons * comp,__u32 idx)129 xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
130 {
131 const __u64 *addrs = (const __u64 *)comp->ring;
132
133 return &addrs[idx & comp->mask];
134 }
135
xsk_ring_prod__tx_desc(struct xsk_ring_prod * tx,__u32 idx)136 static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
137 __u32 idx)
138 {
139 struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
140
141 return &descs[idx & tx->mask];
142 }
143
144 static inline const struct xdp_desc *
xsk_ring_cons__rx_desc(const struct xsk_ring_cons * rx,__u32 idx)145 xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
146 {
147 const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
148
149 return &descs[idx & rx->mask];
150 }
151
xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod * r)152 static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
153 {
154 return *r->flags & XDP_RING_NEED_WAKEUP;
155 }
156
xsk_prod_nb_free(struct xsk_ring_prod * r,__u32 nb)157 static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
158 {
159 __u32 free_entries = r->cached_cons - r->cached_prod;
160
161 if (free_entries >= nb)
162 return free_entries;
163
164 /* Refresh the local tail pointer.
165 * cached_cons is r->size bigger than the real consumer pointer so
166 * that this addition can be avoided in the more frequently
167 * executed code that computs free_entries in the beginning of
168 * this function. Without this optimization it whould have been
169 * free_entries = r->cached_prod - r->cached_cons + r->size.
170 */
171 r->cached_cons = libbpf_smp_load_acquire(r->consumer);
172 r->cached_cons += r->size;
173
174 return r->cached_cons - r->cached_prod;
175 }
176
xsk_cons_nb_avail(struct xsk_ring_cons * r,__u32 nb)177 static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
178 {
179 __u32 entries = r->cached_prod - r->cached_cons;
180
181 if (entries == 0) {
182 r->cached_prod = libbpf_smp_load_acquire(r->producer);
183 entries = r->cached_prod - r->cached_cons;
184 }
185
186 return (entries > nb) ? nb : entries;
187 }
188
xsk_ring_prod__reserve(struct xsk_ring_prod * prod,__u32 nb,__u32 * idx)189 static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
190 {
191 if (xsk_prod_nb_free(prod, nb) < nb)
192 return 0;
193
194 *idx = prod->cached_prod;
195 prod->cached_prod += nb;
196
197 return nb;
198 }
199
xsk_ring_prod__submit(struct xsk_ring_prod * prod,__u32 nb)200 static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
201 {
202 /* Make sure everything has been written to the ring before indicating
203 * this to the kernel by writing the producer pointer.
204 */
205 libbpf_smp_store_release(prod->producer, *prod->producer + nb);
206 }
207
xsk_ring_cons__peek(struct xsk_ring_cons * cons,__u32 nb,__u32 * idx)208 static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
209 {
210 __u32 entries = xsk_cons_nb_avail(cons, nb);
211
212 if (entries > 0) {
213 *idx = cons->cached_cons;
214 cons->cached_cons += entries;
215 }
216
217 return entries;
218 }
219
xsk_ring_cons__cancel(struct xsk_ring_cons * cons,__u32 nb)220 static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
221 {
222 cons->cached_cons -= nb;
223 }
224
xsk_ring_cons__release(struct xsk_ring_cons * cons,__u32 nb)225 static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
226 {
227 /* Make sure data has been read before indicating we are done
228 * with the entries by updating the consumer pointer.
229 */
230 libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
231
232 }
233
xsk_umem__get_data(void * umem_area,__u64 addr)234 static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
235 {
236 return &((char *)umem_area)[addr];
237 }
238
xsk_umem__extract_addr(__u64 addr)239 static inline __u64 xsk_umem__extract_addr(__u64 addr)
240 {
241 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
242 }
243
xsk_umem__extract_offset(__u64 addr)244 static inline __u64 xsk_umem__extract_offset(__u64 addr)
245 {
246 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
247 }
248
xsk_umem__add_offset_to_addr(__u64 addr)249 static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
250 {
251 return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
252 }
253
254 int xsk_umem__fd(const struct xsk_umem *umem);
255 int xsk_socket__fd(const struct xsk_socket *xsk);
256
257 #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
258 #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
259 #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
260 #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
261 #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
262 #define XSK_UMEM__DEFAULT_FLAGS 0
263
264 struct xsk_umem_config {
265 __u32 fill_size;
266 __u32 comp_size;
267 __u32 frame_size;
268 __u32 frame_headroom;
269 __u32 flags;
270 };
271
272 int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd);
273 int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
274 int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
275
276 /* Flags for the libbpf_flags field. */
277 #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
278
279 struct xsk_socket_config {
280 __u32 rx_size;
281 __u32 tx_size;
282 __u32 libbpf_flags;
283 __u32 xdp_flags;
284 __u16 bind_flags;
285 };
286
287 /* Set config to NULL to get the default configuration. */
288 int xsk_umem__create(struct xsk_umem **umem,
289 void *umem_area, __u64 size,
290 struct xsk_ring_prod *fill,
291 struct xsk_ring_cons *comp,
292 const struct xsk_umem_config *config);
293 int xsk_socket__create(struct xsk_socket **xsk,
294 const char *ifname, __u32 queue_id,
295 struct xsk_umem *umem,
296 struct xsk_ring_cons *rx,
297 struct xsk_ring_prod *tx,
298 const struct xsk_socket_config *config);
299 int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
300 const char *ifname,
301 __u32 queue_id, struct xsk_umem *umem,
302 struct xsk_ring_cons *rx,
303 struct xsk_ring_prod *tx,
304 struct xsk_ring_prod *fill,
305 struct xsk_ring_cons *comp,
306 const struct xsk_socket_config *config);
307
308 /* Returns 0 for success and -EBUSY if the umem is still in use. */
309 int xsk_umem__delete(struct xsk_umem *umem);
310 void xsk_socket__delete(struct xsk_socket *xsk);
311
312 #ifdef __cplusplus
313 } /* extern "C" */
314 #endif
315
316 #endif /* __XSK_H */
317