1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #include "queueing.h"
7 #include <linux/skb_array.h>
8 
9 struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function,void * ptr)10 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
11 {
12 	int cpu;
13 	struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
14 
15 	if (!worker)
16 		return NULL;
17 
18 	for_each_possible_cpu(cpu) {
19 		per_cpu_ptr(worker, cpu)->ptr = ptr;
20 		INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
21 	}
22 	return worker;
23 }
24 
wg_packet_queue_init(struct crypt_queue * queue,work_func_t function,unsigned int len)25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
26 			 unsigned int len)
27 {
28 	int ret;
29 
30 	memset(queue, 0, sizeof(*queue));
31 	queue->last_cpu = -1;
32 	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
33 	if (ret)
34 		return ret;
35 	queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
36 	if (!queue->worker) {
37 		ptr_ring_cleanup(&queue->ring, NULL);
38 		return -ENOMEM;
39 	}
40 	return 0;
41 }
42 
wg_packet_queue_free(struct crypt_queue * queue,bool purge)43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
44 {
45 	free_percpu(queue->worker);
46 	WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
47 	ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
48 }
49 
50 #define NEXT(skb) ((skb)->prev)
51 #define STUB(queue) ((struct sk_buff *)&queue->empty)
52 
wg_prev_queue_init(struct prev_queue * queue)53 void wg_prev_queue_init(struct prev_queue *queue)
54 {
55 	NEXT(STUB(queue)) = NULL;
56 	queue->head = queue->tail = STUB(queue);
57 	queue->peeked = NULL;
58 	atomic_set(&queue->count, 0);
59 	BUILD_BUG_ON(
60 		offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
61 							offsetof(struct prev_queue, empty) ||
62 		offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
63 							 offsetof(struct prev_queue, empty));
64 }
65 
__wg_prev_queue_enqueue(struct prev_queue * queue,struct sk_buff * skb)66 static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
67 {
68 	WRITE_ONCE(NEXT(skb), NULL);
69 	WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
70 }
71 
wg_prev_queue_enqueue(struct prev_queue * queue,struct sk_buff * skb)72 bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
73 {
74 	if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
75 		return false;
76 	__wg_prev_queue_enqueue(queue, skb);
77 	return true;
78 }
79 
wg_prev_queue_dequeue(struct prev_queue * queue)80 struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
81 {
82 	struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
83 
84 	if (tail == STUB(queue)) {
85 		if (!next)
86 			return NULL;
87 		queue->tail = next;
88 		tail = next;
89 		next = smp_load_acquire(&NEXT(next));
90 	}
91 	if (next) {
92 		queue->tail = next;
93 		atomic_dec(&queue->count);
94 		return tail;
95 	}
96 	if (tail != READ_ONCE(queue->head))
97 		return NULL;
98 	__wg_prev_queue_enqueue(queue, STUB(queue));
99 	next = smp_load_acquire(&NEXT(tail));
100 	if (next) {
101 		queue->tail = next;
102 		atomic_dec(&queue->count);
103 		return tail;
104 	}
105 	return NULL;
106 }
107 
108 #undef NEXT
109 #undef STUB
110