1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #include "queueing.h"
7 #include "device.h"
8 #include "peer.h"
9 #include "timers.h"
10 #include "messages.h"
11 #include "cookie.h"
12 #include "socket.h"
13 
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/udp.h>
17 #include <net/ip_tunnels.h>
18 
19 /* Must be called with bh disabled. */
update_rx_stats(struct wg_peer * peer,size_t len)20 static void update_rx_stats(struct wg_peer *peer, size_t len)
21 {
22 	struct pcpu_sw_netstats *tstats =
23 		get_cpu_ptr(peer->device->dev->tstats);
24 
25 	u64_stats_update_begin(&tstats->syncp);
26 	++tstats->rx_packets;
27 	tstats->rx_bytes += len;
28 	peer->rx_bytes += len;
29 	u64_stats_update_end(&tstats->syncp);
30 	put_cpu_ptr(tstats);
31 }
32 
33 #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
34 
validate_header_len(struct sk_buff * skb)35 static size_t validate_header_len(struct sk_buff *skb)
36 {
37 	if (unlikely(skb->len < sizeof(struct message_header)))
38 		return 0;
39 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
40 	    skb->len >= MESSAGE_MINIMUM_LENGTH)
41 		return sizeof(struct message_data);
42 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
43 	    skb->len == sizeof(struct message_handshake_initiation))
44 		return sizeof(struct message_handshake_initiation);
45 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
46 	    skb->len == sizeof(struct message_handshake_response))
47 		return sizeof(struct message_handshake_response);
48 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
49 	    skb->len == sizeof(struct message_handshake_cookie))
50 		return sizeof(struct message_handshake_cookie);
51 	return 0;
52 }
53 
prepare_skb_header(struct sk_buff * skb,struct wg_device * wg)54 static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
55 {
56 	size_t data_offset, data_len, header_len;
57 	struct udphdr *udp;
58 
59 	if (unlikely(!wg_check_packet_protocol(skb) ||
60 		     skb_transport_header(skb) < skb->head ||
61 		     (skb_transport_header(skb) + sizeof(struct udphdr)) >
62 			     skb_tail_pointer(skb)))
63 		return -EINVAL; /* Bogus IP header */
64 	udp = udp_hdr(skb);
65 	data_offset = (u8 *)udp - skb->data;
66 	if (unlikely(data_offset > U16_MAX ||
67 		     data_offset + sizeof(struct udphdr) > skb->len))
68 		/* Packet has offset at impossible location or isn't big enough
69 		 * to have UDP fields.
70 		 */
71 		return -EINVAL;
72 	data_len = ntohs(udp->len);
73 	if (unlikely(data_len < sizeof(struct udphdr) ||
74 		     data_len > skb->len - data_offset))
75 		/* UDP packet is reporting too small of a size or lying about
76 		 * its size.
77 		 */
78 		return -EINVAL;
79 	data_len -= sizeof(struct udphdr);
80 	data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
81 	if (unlikely(!pskb_may_pull(skb,
82 				data_offset + sizeof(struct message_header)) ||
83 		     pskb_trim(skb, data_len + data_offset) < 0))
84 		return -EINVAL;
85 	skb_pull(skb, data_offset);
86 	if (unlikely(skb->len != data_len))
87 		/* Final len does not agree with calculated len */
88 		return -EINVAL;
89 	header_len = validate_header_len(skb);
90 	if (unlikely(!header_len))
91 		return -EINVAL;
92 	__skb_push(skb, data_offset);
93 	if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
94 		return -EINVAL;
95 	__skb_pull(skb, data_offset);
96 	return 0;
97 }
98 
wg_receive_handshake_packet(struct wg_device * wg,struct sk_buff * skb)99 static void wg_receive_handshake_packet(struct wg_device *wg,
100 					struct sk_buff *skb)
101 {
102 	enum cookie_mac_state mac_state;
103 	struct wg_peer *peer = NULL;
104 	/* This is global, so that our load calculation applies to the whole
105 	 * system. We don't care about races with it at all.
106 	 */
107 	static u64 last_under_load;
108 	bool packet_needs_cookie;
109 	bool under_load;
110 
111 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
112 		net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
113 					wg->dev->name, skb);
114 		wg_cookie_message_consume(
115 			(struct message_handshake_cookie *)skb->data, wg);
116 		return;
117 	}
118 
119 	under_load = atomic_read(&wg->handshake_queue_len) >=
120 			MAX_QUEUED_INCOMING_HANDSHAKES / 8;
121 	if (under_load) {
122 		last_under_load = ktime_get_coarse_boottime_ns();
123 	} else if (last_under_load) {
124 		under_load = !wg_birthdate_has_expired(last_under_load, 1);
125 		if (!under_load)
126 			last_under_load = 0;
127 	}
128 	mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
129 					      under_load);
130 	if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
131 	    (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
132 		packet_needs_cookie = false;
133 	} else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
134 		packet_needs_cookie = true;
135 	} else {
136 		net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
137 					wg->dev->name, skb);
138 		return;
139 	}
140 
141 	switch (SKB_TYPE_LE32(skb)) {
142 	case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
143 		struct message_handshake_initiation *message =
144 			(struct message_handshake_initiation *)skb->data;
145 
146 		if (packet_needs_cookie) {
147 			wg_packet_send_handshake_cookie(wg, skb,
148 							message->sender_index);
149 			return;
150 		}
151 		peer = wg_noise_handshake_consume_initiation(message, wg);
152 		if (unlikely(!peer)) {
153 			net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
154 						wg->dev->name, skb);
155 			return;
156 		}
157 		wg_socket_set_peer_endpoint_from_skb(peer, skb);
158 		net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
159 				    wg->dev->name, peer->internal_id,
160 				    &peer->endpoint.addr);
161 		wg_packet_send_handshake_response(peer);
162 		break;
163 	}
164 	case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
165 		struct message_handshake_response *message =
166 			(struct message_handshake_response *)skb->data;
167 
168 		if (packet_needs_cookie) {
169 			wg_packet_send_handshake_cookie(wg, skb,
170 							message->sender_index);
171 			return;
172 		}
173 		peer = wg_noise_handshake_consume_response(message, wg);
174 		if (unlikely(!peer)) {
175 			net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
176 						wg->dev->name, skb);
177 			return;
178 		}
179 		wg_socket_set_peer_endpoint_from_skb(peer, skb);
180 		net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
181 				    wg->dev->name, peer->internal_id,
182 				    &peer->endpoint.addr);
183 		if (wg_noise_handshake_begin_session(&peer->handshake,
184 						     &peer->keypairs)) {
185 			wg_timers_session_derived(peer);
186 			wg_timers_handshake_complete(peer);
187 			/* Calling this function will either send any existing
188 			 * packets in the queue and not send a keepalive, which
189 			 * is the best case, Or, if there's nothing in the
190 			 * queue, it will send a keepalive, in order to give
191 			 * immediate confirmation of the session.
192 			 */
193 			wg_packet_send_keepalive(peer);
194 		}
195 		break;
196 	}
197 	}
198 
199 	if (unlikely(!peer)) {
200 		WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
201 		return;
202 	}
203 
204 	local_bh_disable();
205 	update_rx_stats(peer, skb->len);
206 	local_bh_enable();
207 
208 	wg_timers_any_authenticated_packet_received(peer);
209 	wg_timers_any_authenticated_packet_traversal(peer);
210 	wg_peer_put(peer);
211 }
212 
wg_packet_handshake_receive_worker(struct work_struct * work)213 void wg_packet_handshake_receive_worker(struct work_struct *work)
214 {
215 	struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
216 	struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
217 	struct sk_buff *skb;
218 
219 	while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
220 		wg_receive_handshake_packet(wg, skb);
221 		dev_kfree_skb(skb);
222 		atomic_dec(&wg->handshake_queue_len);
223 		cond_resched();
224 	}
225 }
226 
keep_key_fresh(struct wg_peer * peer)227 static void keep_key_fresh(struct wg_peer *peer)
228 {
229 	struct noise_keypair *keypair;
230 	bool send;
231 
232 	if (peer->sent_lastminute_handshake)
233 		return;
234 
235 	rcu_read_lock_bh();
236 	keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
237 	send = keypair && READ_ONCE(keypair->sending.is_valid) &&
238 	       keypair->i_am_the_initiator &&
239 	       wg_birthdate_has_expired(keypair->sending.birthdate,
240 			REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
241 	rcu_read_unlock_bh();
242 
243 	if (unlikely(send)) {
244 		peer->sent_lastminute_handshake = true;
245 		wg_packet_send_queued_handshake_initiation(peer, false);
246 	}
247 }
248 
decrypt_packet(struct sk_buff * skb,struct noise_keypair * keypair)249 static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
250 {
251 	struct scatterlist sg[MAX_SKB_FRAGS + 8];
252 	struct sk_buff *trailer;
253 	unsigned int offset;
254 	int num_frags;
255 
256 	if (unlikely(!keypair))
257 		return false;
258 
259 	if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
260 		  wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
261 		  keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
262 		WRITE_ONCE(keypair->receiving.is_valid, false);
263 		return false;
264 	}
265 
266 	PACKET_CB(skb)->nonce =
267 		le64_to_cpu(((struct message_data *)skb->data)->counter);
268 
269 	/* We ensure that the network header is part of the packet before we
270 	 * call skb_cow_data, so that there's no chance that data is removed
271 	 * from the skb, so that later we can extract the original endpoint.
272 	 */
273 	offset = skb->data - skb_network_header(skb);
274 	skb_push(skb, offset);
275 	num_frags = skb_cow_data(skb, 0, &trailer);
276 	offset += sizeof(struct message_data);
277 	skb_pull(skb, offset);
278 	if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
279 		return false;
280 
281 	sg_init_table(sg, num_frags);
282 	if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
283 		return false;
284 
285 	if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
286 					         PACKET_CB(skb)->nonce,
287 						 keypair->receiving.key))
288 		return false;
289 
290 	/* Another ugly situation of pushing and pulling the header so as to
291 	 * keep endpoint information intact.
292 	 */
293 	skb_push(skb, offset);
294 	if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
295 		return false;
296 	skb_pull(skb, offset);
297 
298 	return true;
299 }
300 
301 /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
counter_validate(struct noise_replay_counter * counter,u64 their_counter)302 static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
303 {
304 	unsigned long index, index_current, top, i;
305 	bool ret = false;
306 
307 	spin_lock_bh(&counter->lock);
308 
309 	if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
310 		     their_counter >= REJECT_AFTER_MESSAGES))
311 		goto out;
312 
313 	++their_counter;
314 
315 	if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
316 		     counter->counter))
317 		goto out;
318 
319 	index = their_counter >> ilog2(BITS_PER_LONG);
320 
321 	if (likely(their_counter > counter->counter)) {
322 		index_current = counter->counter >> ilog2(BITS_PER_LONG);
323 		top = min_t(unsigned long, index - index_current,
324 			    COUNTER_BITS_TOTAL / BITS_PER_LONG);
325 		for (i = 1; i <= top; ++i)
326 			counter->backtrack[(i + index_current) &
327 				((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
328 		counter->counter = their_counter;
329 	}
330 
331 	index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
332 	ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
333 				&counter->backtrack[index]);
334 
335 out:
336 	spin_unlock_bh(&counter->lock);
337 	return ret;
338 }
339 
340 #include "selftest/counter.c"
341 
wg_packet_consume_data_done(struct wg_peer * peer,struct sk_buff * skb,struct endpoint * endpoint)342 static void wg_packet_consume_data_done(struct wg_peer *peer,
343 					struct sk_buff *skb,
344 					struct endpoint *endpoint)
345 {
346 	struct net_device *dev = peer->device->dev;
347 	unsigned int len, len_before_trim;
348 	struct wg_peer *routed_peer;
349 
350 	wg_socket_set_peer_endpoint(peer, endpoint);
351 
352 	if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
353 						    PACKET_CB(skb)->keypair))) {
354 		wg_timers_handshake_complete(peer);
355 		wg_packet_send_staged_packets(peer);
356 	}
357 
358 	keep_key_fresh(peer);
359 
360 	wg_timers_any_authenticated_packet_received(peer);
361 	wg_timers_any_authenticated_packet_traversal(peer);
362 
363 	/* A packet with length 0 is a keepalive packet */
364 	if (unlikely(!skb->len)) {
365 		update_rx_stats(peer, message_data_len(0));
366 		net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
367 				    dev->name, peer->internal_id,
368 				    &peer->endpoint.addr);
369 		goto packet_processed;
370 	}
371 
372 	wg_timers_data_received(peer);
373 
374 	if (unlikely(skb_network_header(skb) < skb->head))
375 		goto dishonest_packet_size;
376 	if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
377 		       (ip_hdr(skb)->version == 4 ||
378 			(ip_hdr(skb)->version == 6 &&
379 			 pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
380 		goto dishonest_packet_type;
381 
382 	skb->dev = dev;
383 	/* We've already verified the Poly1305 auth tag, which means this packet
384 	 * was not modified in transit. We can therefore tell the networking
385 	 * stack that all checksums of every layer of encapsulation have already
386 	 * been checked "by the hardware" and therefore is unnecessary to check
387 	 * again in software.
388 	 */
389 	skb->ip_summed = CHECKSUM_UNNECESSARY;
390 	skb->csum_level = ~0; /* All levels */
391 	skb->protocol = ip_tunnel_parse_protocol(skb);
392 	if (skb->protocol == htons(ETH_P_IP)) {
393 		len = ntohs(ip_hdr(skb)->tot_len);
394 		if (unlikely(len < sizeof(struct iphdr)))
395 			goto dishonest_packet_size;
396 		INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
397 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
398 		len = ntohs(ipv6_hdr(skb)->payload_len) +
399 		      sizeof(struct ipv6hdr);
400 		INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
401 	} else {
402 		goto dishonest_packet_type;
403 	}
404 
405 	if (unlikely(len > skb->len))
406 		goto dishonest_packet_size;
407 	len_before_trim = skb->len;
408 	if (unlikely(pskb_trim(skb, len)))
409 		goto packet_processed;
410 
411 	routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
412 					       skb);
413 	wg_peer_put(routed_peer); /* We don't need the extra reference. */
414 
415 	if (unlikely(routed_peer != peer))
416 		goto dishonest_packet_peer;
417 
418 	napi_gro_receive(&peer->napi, skb);
419 	update_rx_stats(peer, message_data_len(len_before_trim));
420 	return;
421 
422 dishonest_packet_peer:
423 	net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
424 				dev->name, skb, peer->internal_id,
425 				&peer->endpoint.addr);
426 	++dev->stats.rx_errors;
427 	++dev->stats.rx_frame_errors;
428 	goto packet_processed;
429 dishonest_packet_type:
430 	net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
431 			    dev->name, peer->internal_id, &peer->endpoint.addr);
432 	++dev->stats.rx_errors;
433 	++dev->stats.rx_frame_errors;
434 	goto packet_processed;
435 dishonest_packet_size:
436 	net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
437 			    dev->name, peer->internal_id, &peer->endpoint.addr);
438 	++dev->stats.rx_errors;
439 	++dev->stats.rx_length_errors;
440 	goto packet_processed;
441 packet_processed:
442 	dev_kfree_skb(skb);
443 }
444 
wg_packet_rx_poll(struct napi_struct * napi,int budget)445 int wg_packet_rx_poll(struct napi_struct *napi, int budget)
446 {
447 	struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
448 	struct noise_keypair *keypair;
449 	struct endpoint endpoint;
450 	enum packet_state state;
451 	struct sk_buff *skb;
452 	int work_done = 0;
453 	bool free;
454 
455 	if (unlikely(budget <= 0))
456 		return 0;
457 
458 	while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
459 	       (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
460 		       PACKET_STATE_UNCRYPTED) {
461 		wg_prev_queue_drop_peeked(&peer->rx_queue);
462 		keypair = PACKET_CB(skb)->keypair;
463 		free = true;
464 
465 		if (unlikely(state != PACKET_STATE_CRYPTED))
466 			goto next;
467 
468 		if (unlikely(!counter_validate(&keypair->receiving_counter,
469 					       PACKET_CB(skb)->nonce))) {
470 			net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
471 					    peer->device->dev->name,
472 					    PACKET_CB(skb)->nonce,
473 					    keypair->receiving_counter.counter);
474 			goto next;
475 		}
476 
477 		if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
478 			goto next;
479 
480 		wg_reset_packet(skb, false);
481 		wg_packet_consume_data_done(peer, skb, &endpoint);
482 		free = false;
483 
484 next:
485 		wg_noise_keypair_put(keypair, false);
486 		wg_peer_put(peer);
487 		if (unlikely(free))
488 			dev_kfree_skb(skb);
489 
490 		if (++work_done >= budget)
491 			break;
492 	}
493 
494 	if (work_done < budget)
495 		napi_complete_done(napi, work_done);
496 
497 	return work_done;
498 }
499 
wg_packet_decrypt_worker(struct work_struct * work)500 void wg_packet_decrypt_worker(struct work_struct *work)
501 {
502 	struct crypt_queue *queue = container_of(work, struct multicore_worker,
503 						 work)->ptr;
504 	struct sk_buff *skb;
505 
506 	while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
507 		enum packet_state state =
508 			likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
509 				PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
510 		wg_queue_enqueue_per_peer_rx(skb, state);
511 		if (need_resched())
512 			cond_resched();
513 	}
514 }
515 
wg_packet_consume_data(struct wg_device * wg,struct sk_buff * skb)516 static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
517 {
518 	__le32 idx = ((struct message_data *)skb->data)->key_idx;
519 	struct wg_peer *peer = NULL;
520 	int ret;
521 
522 	rcu_read_lock_bh();
523 	PACKET_CB(skb)->keypair =
524 		(struct noise_keypair *)wg_index_hashtable_lookup(
525 			wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
526 			&peer);
527 	if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
528 		goto err_keypair;
529 
530 	if (unlikely(READ_ONCE(peer->is_dead)))
531 		goto err;
532 
533 	ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
534 						   wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
535 	if (unlikely(ret == -EPIPE))
536 		wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
537 	if (likely(!ret || ret == -EPIPE)) {
538 		rcu_read_unlock_bh();
539 		return;
540 	}
541 err:
542 	wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
543 err_keypair:
544 	rcu_read_unlock_bh();
545 	wg_peer_put(peer);
546 	dev_kfree_skb(skb);
547 }
548 
wg_packet_receive(struct wg_device * wg,struct sk_buff * skb)549 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
550 {
551 	if (unlikely(prepare_skb_header(skb, wg) < 0))
552 		goto err;
553 	switch (SKB_TYPE_LE32(skb)) {
554 	case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
555 	case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
556 	case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
557 		int cpu, ret = -EBUSY;
558 
559 		if (unlikely(!rng_is_initialized()))
560 			goto drop;
561 		if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
562 			if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
563 				ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
564 				spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
565 			}
566 		} else
567 			ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
568 		if (ret) {
569 	drop:
570 			net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
571 						wg->dev->name, skb);
572 			goto err;
573 		}
574 		atomic_inc(&wg->handshake_queue_len);
575 		cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
576 		/* Queues up a call to packet_process_queued_handshake_packets(skb): */
577 		queue_work_on(cpu, wg->handshake_receive_wq,
578 			      &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
579 		break;
580 	}
581 	case cpu_to_le32(MESSAGE_DATA):
582 		PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
583 		wg_packet_consume_data(wg, skb);
584 		break;
585 	default:
586 		WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
587 		goto err;
588 	}
589 	return;
590 
591 err:
592 	dev_kfree_skb(skb);
593 }
594