1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2022 Intel Corporation
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <linux/kcov.h>
21 #include <linux/bitops.h>
22 #include <net/mac80211.h>
23 #include <net/ieee80211_radiotap.h>
24 #include <asm/unaligned.h>
25
26 #include "ieee80211_i.h"
27 #include "driver-ops.h"
28 #include "led.h"
29 #include "mesh.h"
30 #include "wep.h"
31 #include "wpa.h"
32 #include "tkip.h"
33 #include "wme.h"
34 #include "rate.h"
35
36 /*
37 * monitor mode reception
38 *
39 * This function cleans up the SKB, i.e. it removes all the stuff
40 * only useful for monitoring.
41 */
ieee80211_clean_skb(struct sk_buff * skb,unsigned int present_fcs_len,unsigned int rtap_space)42 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
43 unsigned int present_fcs_len,
44 unsigned int rtap_space)
45 {
46 struct ieee80211_hdr *hdr;
47 unsigned int hdrlen;
48 __le16 fc;
49
50 if (present_fcs_len)
51 __pskb_trim(skb, skb->len - present_fcs_len);
52 pskb_pull(skb, rtap_space);
53
54 hdr = (void *)skb->data;
55 fc = hdr->frame_control;
56
57 /*
58 * Remove the HT-Control field (if present) on management
59 * frames after we've sent the frame to monitoring. We
60 * (currently) don't need it, and don't properly parse
61 * frames with it present, due to the assumption of a
62 * fixed management header length.
63 */
64 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
65 return skb;
66
67 hdrlen = ieee80211_hdrlen(fc);
68 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
69
70 if (!pskb_may_pull(skb, hdrlen)) {
71 dev_kfree_skb(skb);
72 return NULL;
73 }
74
75 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
76 hdrlen - IEEE80211_HT_CTL_LEN);
77 pskb_pull(skb, IEEE80211_HT_CTL_LEN);
78
79 return skb;
80 }
81
should_drop_frame(struct sk_buff * skb,int present_fcs_len,unsigned int rtap_space)82 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
83 unsigned int rtap_space)
84 {
85 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
86 struct ieee80211_hdr *hdr;
87
88 hdr = (void *)(skb->data + rtap_space);
89
90 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
91 RX_FLAG_FAILED_PLCP_CRC |
92 RX_FLAG_ONLY_MONITOR |
93 RX_FLAG_NO_PSDU))
94 return true;
95
96 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
97 return true;
98
99 if (ieee80211_is_ctl(hdr->frame_control) &&
100 !ieee80211_is_pspoll(hdr->frame_control) &&
101 !ieee80211_is_back_req(hdr->frame_control))
102 return true;
103
104 return false;
105 }
106
107 static int
ieee80211_rx_radiotap_hdrlen(struct ieee80211_local * local,struct ieee80211_rx_status * status,struct sk_buff * skb)108 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
109 struct ieee80211_rx_status *status,
110 struct sk_buff *skb)
111 {
112 int len;
113
114 /* always present fields */
115 len = sizeof(struct ieee80211_radiotap_header) + 8;
116
117 /* allocate extra bitmaps */
118 if (status->chains)
119 len += 4 * hweight8(status->chains);
120 /* vendor presence bitmap */
121 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
122 len += 4;
123
124 if (ieee80211_have_rx_timestamp(status)) {
125 len = ALIGN(len, 8);
126 len += 8;
127 }
128 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
129 len += 1;
130
131 /* antenna field, if we don't have per-chain info */
132 if (!status->chains)
133 len += 1;
134
135 /* padding for RX_FLAGS if necessary */
136 len = ALIGN(len, 2);
137
138 if (status->encoding == RX_ENC_HT) /* HT info */
139 len += 3;
140
141 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
142 len = ALIGN(len, 4);
143 len += 8;
144 }
145
146 if (status->encoding == RX_ENC_VHT) {
147 len = ALIGN(len, 2);
148 len += 12;
149 }
150
151 if (local->hw.radiotap_timestamp.units_pos >= 0) {
152 len = ALIGN(len, 8);
153 len += 12;
154 }
155
156 if (status->encoding == RX_ENC_HE &&
157 status->flag & RX_FLAG_RADIOTAP_HE) {
158 len = ALIGN(len, 2);
159 len += 12;
160 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
161 }
162
163 if (status->encoding == RX_ENC_HE &&
164 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
165 len = ALIGN(len, 2);
166 len += 12;
167 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
168 }
169
170 if (status->flag & RX_FLAG_NO_PSDU)
171 len += 1;
172
173 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
174 len = ALIGN(len, 2);
175 len += 4;
176 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
177 }
178
179 if (status->chains) {
180 /* antenna and antenna signal fields */
181 len += 2 * hweight8(status->chains);
182 }
183
184 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
185 struct ieee80211_vendor_radiotap *rtap;
186 int vendor_data_offset = 0;
187
188 /*
189 * The position to look at depends on the existence (or non-
190 * existence) of other elements, so take that into account...
191 */
192 if (status->flag & RX_FLAG_RADIOTAP_HE)
193 vendor_data_offset +=
194 sizeof(struct ieee80211_radiotap_he);
195 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
196 vendor_data_offset +=
197 sizeof(struct ieee80211_radiotap_he_mu);
198 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
199 vendor_data_offset +=
200 sizeof(struct ieee80211_radiotap_lsig);
201
202 rtap = (void *)&skb->data[vendor_data_offset];
203
204 /* alignment for fixed 6-byte vendor data header */
205 len = ALIGN(len, 2);
206 /* vendor data header */
207 len += 6;
208 if (WARN_ON(rtap->align == 0))
209 rtap->align = 1;
210 len = ALIGN(len, rtap->align);
211 len += rtap->len + rtap->pad;
212 }
213
214 return len;
215 }
216
__ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data * sdata,int link_id,struct sta_info * sta,struct sk_buff * skb)217 static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
218 int link_id,
219 struct sta_info *sta,
220 struct sk_buff *skb)
221 {
222 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
223
224 if (link_id >= 0) {
225 status->link_valid = 1;
226 status->link_id = link_id;
227 } else {
228 status->link_valid = 0;
229 }
230
231 skb_queue_tail(&sdata->skb_queue, skb);
232 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
233 if (sta)
234 sta->deflink.rx_stats.packets++;
235 }
236
ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data * sdata,int link_id,struct sta_info * sta,struct sk_buff * skb)237 static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
238 int link_id,
239 struct sta_info *sta,
240 struct sk_buff *skb)
241 {
242 skb->protocol = 0;
243 __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb);
244 }
245
ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data * sdata,struct sk_buff * skb,int rtap_space)246 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
247 struct sk_buff *skb,
248 int rtap_space)
249 {
250 struct {
251 struct ieee80211_hdr_3addr hdr;
252 u8 category;
253 u8 action_code;
254 } __packed __aligned(2) action;
255
256 if (!sdata)
257 return;
258
259 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
260
261 if (skb->len < rtap_space + sizeof(action) +
262 VHT_MUMIMO_GROUPS_DATA_LEN)
263 return;
264
265 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
266 return;
267
268 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
269
270 if (!ieee80211_is_action(action.hdr.frame_control))
271 return;
272
273 if (action.category != WLAN_CATEGORY_VHT)
274 return;
275
276 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
277 return;
278
279 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
280 return;
281
282 skb = skb_copy(skb, GFP_ATOMIC);
283 if (!skb)
284 return;
285
286 ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb);
287 }
288
289 /*
290 * ieee80211_add_rx_radiotap_header - add radiotap header
291 *
292 * add a radiotap header containing all the fields which the hardware provided.
293 */
294 static void
ieee80211_add_rx_radiotap_header(struct ieee80211_local * local,struct sk_buff * skb,struct ieee80211_rate * rate,int rtap_len,bool has_fcs)295 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
296 struct sk_buff *skb,
297 struct ieee80211_rate *rate,
298 int rtap_len, bool has_fcs)
299 {
300 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
301 struct ieee80211_radiotap_header *rthdr;
302 unsigned char *pos;
303 __le32 *it_present;
304 u32 it_present_val;
305 u16 rx_flags = 0;
306 u16 channel_flags = 0;
307 int mpdulen, chain;
308 unsigned long chains = status->chains;
309 struct ieee80211_vendor_radiotap rtap = {};
310 struct ieee80211_radiotap_he he = {};
311 struct ieee80211_radiotap_he_mu he_mu = {};
312 struct ieee80211_radiotap_lsig lsig = {};
313
314 if (status->flag & RX_FLAG_RADIOTAP_HE) {
315 he = *(struct ieee80211_radiotap_he *)skb->data;
316 skb_pull(skb, sizeof(he));
317 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
318 }
319
320 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
321 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
322 skb_pull(skb, sizeof(he_mu));
323 }
324
325 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
326 lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
327 skb_pull(skb, sizeof(lsig));
328 }
329
330 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
331 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
332 /* rtap.len and rtap.pad are undone immediately */
333 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
334 }
335
336 mpdulen = skb->len;
337 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
338 mpdulen += FCS_LEN;
339
340 rthdr = skb_push(skb, rtap_len);
341 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
342 it_present = &rthdr->it_present;
343
344 /* radiotap header, set always present flags */
345 rthdr->it_len = cpu_to_le16(rtap_len);
346 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
347 BIT(IEEE80211_RADIOTAP_CHANNEL) |
348 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
349
350 if (!status->chains)
351 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
352
353 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
354 it_present_val |=
355 BIT(IEEE80211_RADIOTAP_EXT) |
356 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
357 put_unaligned_le32(it_present_val, it_present);
358 it_present++;
359 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
360 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
361 }
362
363 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
364 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
365 BIT(IEEE80211_RADIOTAP_EXT);
366 put_unaligned_le32(it_present_val, it_present);
367 it_present++;
368 it_present_val = rtap.present;
369 }
370
371 put_unaligned_le32(it_present_val, it_present);
372
373 /* This references through an offset into it_optional[] rather
374 * than via it_present otherwise later uses of pos will cause
375 * the compiler to think we have walked past the end of the
376 * struct member.
377 */
378 pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional];
379
380 /* the order of the following fields is important */
381
382 /* IEEE80211_RADIOTAP_TSFT */
383 if (ieee80211_have_rx_timestamp(status)) {
384 /* padding */
385 while ((pos - (u8 *)rthdr) & 7)
386 *pos++ = 0;
387 put_unaligned_le64(
388 ieee80211_calculate_rx_timestamp(local, status,
389 mpdulen, 0),
390 pos);
391 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT));
392 pos += 8;
393 }
394
395 /* IEEE80211_RADIOTAP_FLAGS */
396 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
397 *pos |= IEEE80211_RADIOTAP_F_FCS;
398 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
399 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
400 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
401 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
402 pos++;
403
404 /* IEEE80211_RADIOTAP_RATE */
405 if (!rate || status->encoding != RX_ENC_LEGACY) {
406 /*
407 * Without rate information don't add it. If we have,
408 * MCS information is a separate field in radiotap,
409 * added below. The byte here is needed as padding
410 * for the channel though, so initialise it to 0.
411 */
412 *pos = 0;
413 } else {
414 int shift = 0;
415 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE));
416 if (status->bw == RATE_INFO_BW_10)
417 shift = 1;
418 else if (status->bw == RATE_INFO_BW_5)
419 shift = 2;
420 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
421 }
422 pos++;
423
424 /* IEEE80211_RADIOTAP_CHANNEL */
425 /* TODO: frequency offset in KHz */
426 put_unaligned_le16(status->freq, pos);
427 pos += 2;
428 if (status->bw == RATE_INFO_BW_10)
429 channel_flags |= IEEE80211_CHAN_HALF;
430 else if (status->bw == RATE_INFO_BW_5)
431 channel_flags |= IEEE80211_CHAN_QUARTER;
432
433 if (status->band == NL80211_BAND_5GHZ ||
434 status->band == NL80211_BAND_6GHZ)
435 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
436 else if (status->encoding != RX_ENC_LEGACY)
437 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
438 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
439 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
440 else if (rate)
441 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
442 else
443 channel_flags |= IEEE80211_CHAN_2GHZ;
444 put_unaligned_le16(channel_flags, pos);
445 pos += 2;
446
447 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
448 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
449 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
450 *pos = status->signal;
451 rthdr->it_present |=
452 cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL));
453 pos++;
454 }
455
456 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
457
458 if (!status->chains) {
459 /* IEEE80211_RADIOTAP_ANTENNA */
460 *pos = status->antenna;
461 pos++;
462 }
463
464 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
465
466 /* IEEE80211_RADIOTAP_RX_FLAGS */
467 /* ensure 2 byte alignment for the 2 byte field as required */
468 if ((pos - (u8 *)rthdr) & 1)
469 *pos++ = 0;
470 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
471 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
472 put_unaligned_le16(rx_flags, pos);
473 pos += 2;
474
475 if (status->encoding == RX_ENC_HT) {
476 unsigned int stbc;
477
478 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
479 *pos = local->hw.radiotap_mcs_details;
480 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
481 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
482 if (status->enc_flags & RX_ENC_FLAG_LDPC)
483 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC;
484 pos++;
485 *pos = 0;
486 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
487 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
488 if (status->bw == RATE_INFO_BW_40)
489 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
490 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
491 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
492 if (status->enc_flags & RX_ENC_FLAG_LDPC)
493 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
494 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
495 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
496 pos++;
497 *pos++ = status->rate_idx;
498 }
499
500 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
501 u16 flags = 0;
502
503 /* ensure 4 byte alignment */
504 while ((pos - (u8 *)rthdr) & 3)
505 pos++;
506 rthdr->it_present |=
507 cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS));
508 put_unaligned_le32(status->ampdu_reference, pos);
509 pos += 4;
510 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
511 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
512 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
513 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
514 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
515 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
516 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
517 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
518 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
519 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
520 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
521 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
522 put_unaligned_le16(flags, pos);
523 pos += 2;
524 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
525 *pos++ = status->ampdu_delimiter_crc;
526 else
527 *pos++ = 0;
528 *pos++ = 0;
529 }
530
531 if (status->encoding == RX_ENC_VHT) {
532 u16 known = local->hw.radiotap_vht_details;
533
534 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT));
535 put_unaligned_le16(known, pos);
536 pos += 2;
537 /* flags */
538 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
539 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
540 /* in VHT, STBC is binary */
541 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
542 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
543 if (status->enc_flags & RX_ENC_FLAG_BF)
544 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
545 pos++;
546 /* bandwidth */
547 switch (status->bw) {
548 case RATE_INFO_BW_80:
549 *pos++ = 4;
550 break;
551 case RATE_INFO_BW_160:
552 *pos++ = 11;
553 break;
554 case RATE_INFO_BW_40:
555 *pos++ = 1;
556 break;
557 default:
558 *pos++ = 0;
559 }
560 /* MCS/NSS */
561 *pos = (status->rate_idx << 4) | status->nss;
562 pos += 4;
563 /* coding field */
564 if (status->enc_flags & RX_ENC_FLAG_LDPC)
565 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
566 pos++;
567 /* group ID */
568 pos++;
569 /* partial_aid */
570 pos += 2;
571 }
572
573 if (local->hw.radiotap_timestamp.units_pos >= 0) {
574 u16 accuracy = 0;
575 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
576
577 rthdr->it_present |=
578 cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP));
579
580 /* ensure 8 byte alignment */
581 while ((pos - (u8 *)rthdr) & 7)
582 pos++;
583
584 put_unaligned_le64(status->device_timestamp, pos);
585 pos += sizeof(u64);
586
587 if (local->hw.radiotap_timestamp.accuracy >= 0) {
588 accuracy = local->hw.radiotap_timestamp.accuracy;
589 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
590 }
591 put_unaligned_le16(accuracy, pos);
592 pos += sizeof(u16);
593
594 *pos++ = local->hw.radiotap_timestamp.units_pos;
595 *pos++ = flags;
596 }
597
598 if (status->encoding == RX_ENC_HE &&
599 status->flag & RX_FLAG_RADIOTAP_HE) {
600 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
601
602 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
603 he.data6 |= HE_PREP(DATA6_NSTS,
604 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
605 status->enc_flags));
606 he.data3 |= HE_PREP(DATA3_STBC, 1);
607 } else {
608 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
609 }
610
611 #define CHECK_GI(s) \
612 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
613 (int)NL80211_RATE_INFO_HE_GI_##s)
614
615 CHECK_GI(0_8);
616 CHECK_GI(1_6);
617 CHECK_GI(3_2);
618
619 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
620 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
621 he.data3 |= HE_PREP(DATA3_CODING,
622 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
623
624 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
625
626 switch (status->bw) {
627 case RATE_INFO_BW_20:
628 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
629 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
630 break;
631 case RATE_INFO_BW_40:
632 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
633 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
634 break;
635 case RATE_INFO_BW_80:
636 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
637 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
638 break;
639 case RATE_INFO_BW_160:
640 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
641 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
642 break;
643 case RATE_INFO_BW_HE_RU:
644 #define CHECK_RU_ALLOC(s) \
645 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
646 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
647
648 CHECK_RU_ALLOC(26);
649 CHECK_RU_ALLOC(52);
650 CHECK_RU_ALLOC(106);
651 CHECK_RU_ALLOC(242);
652 CHECK_RU_ALLOC(484);
653 CHECK_RU_ALLOC(996);
654 CHECK_RU_ALLOC(2x996);
655
656 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
657 status->he_ru + 4);
658 break;
659 default:
660 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
661 }
662
663 /* ensure 2 byte alignment */
664 while ((pos - (u8 *)rthdr) & 1)
665 pos++;
666 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE));
667 memcpy(pos, &he, sizeof(he));
668 pos += sizeof(he);
669 }
670
671 if (status->encoding == RX_ENC_HE &&
672 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
673 /* ensure 2 byte alignment */
674 while ((pos - (u8 *)rthdr) & 1)
675 pos++;
676 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU));
677 memcpy(pos, &he_mu, sizeof(he_mu));
678 pos += sizeof(he_mu);
679 }
680
681 if (status->flag & RX_FLAG_NO_PSDU) {
682 rthdr->it_present |=
683 cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU));
684 *pos++ = status->zero_length_psdu_type;
685 }
686
687 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
688 /* ensure 2 byte alignment */
689 while ((pos - (u8 *)rthdr) & 1)
690 pos++;
691 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG));
692 memcpy(pos, &lsig, sizeof(lsig));
693 pos += sizeof(lsig);
694 }
695
696 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
697 *pos++ = status->chain_signal[chain];
698 *pos++ = chain;
699 }
700
701 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
702 /* ensure 2 byte alignment for the vendor field as required */
703 if ((pos - (u8 *)rthdr) & 1)
704 *pos++ = 0;
705 *pos++ = rtap.oui[0];
706 *pos++ = rtap.oui[1];
707 *pos++ = rtap.oui[2];
708 *pos++ = rtap.subns;
709 put_unaligned_le16(rtap.len, pos);
710 pos += 2;
711 /* align the actual payload as requested */
712 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
713 *pos++ = 0;
714 /* data (and possible padding) already follows */
715 }
716 }
717
718 static struct sk_buff *
ieee80211_make_monitor_skb(struct ieee80211_local * local,struct sk_buff ** origskb,struct ieee80211_rate * rate,int rtap_space,bool use_origskb)719 ieee80211_make_monitor_skb(struct ieee80211_local *local,
720 struct sk_buff **origskb,
721 struct ieee80211_rate *rate,
722 int rtap_space, bool use_origskb)
723 {
724 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
725 int rt_hdrlen, needed_headroom;
726 struct sk_buff *skb;
727
728 /* room for the radiotap header based on driver features */
729 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
730 needed_headroom = rt_hdrlen - rtap_space;
731
732 if (use_origskb) {
733 /* only need to expand headroom if necessary */
734 skb = *origskb;
735 *origskb = NULL;
736
737 /*
738 * This shouldn't trigger often because most devices have an
739 * RX header they pull before we get here, and that should
740 * be big enough for our radiotap information. We should
741 * probably export the length to drivers so that we can have
742 * them allocate enough headroom to start with.
743 */
744 if (skb_headroom(skb) < needed_headroom &&
745 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
746 dev_kfree_skb(skb);
747 return NULL;
748 }
749 } else {
750 /*
751 * Need to make a copy and possibly remove radiotap header
752 * and FCS from the original.
753 */
754 skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
755 0, GFP_ATOMIC);
756
757 if (!skb)
758 return NULL;
759 }
760
761 /* prepend radiotap information */
762 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
763
764 skb_reset_mac_header(skb);
765 skb->ip_summed = CHECKSUM_UNNECESSARY;
766 skb->pkt_type = PACKET_OTHERHOST;
767 skb->protocol = htons(ETH_P_802_2);
768
769 return skb;
770 }
771
772 /*
773 * This function copies a received frame to all monitor interfaces and
774 * returns a cleaned-up SKB that no longer includes the FCS nor the
775 * radiotap header the driver might have added.
776 */
777 static struct sk_buff *
ieee80211_rx_monitor(struct ieee80211_local * local,struct sk_buff * origskb,struct ieee80211_rate * rate)778 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
779 struct ieee80211_rate *rate)
780 {
781 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
782 struct ieee80211_sub_if_data *sdata;
783 struct sk_buff *monskb = NULL;
784 int present_fcs_len = 0;
785 unsigned int rtap_space = 0;
786 struct ieee80211_sub_if_data *monitor_sdata =
787 rcu_dereference(local->monitor_sdata);
788 bool only_monitor = false;
789 unsigned int min_head_len;
790
791 if (status->flag & RX_FLAG_RADIOTAP_HE)
792 rtap_space += sizeof(struct ieee80211_radiotap_he);
793
794 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
795 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
796
797 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
798 rtap_space += sizeof(struct ieee80211_radiotap_lsig);
799
800 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
801 struct ieee80211_vendor_radiotap *rtap =
802 (void *)(origskb->data + rtap_space);
803
804 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
805 }
806
807 min_head_len = rtap_space;
808
809 /*
810 * First, we may need to make a copy of the skb because
811 * (1) we need to modify it for radiotap (if not present), and
812 * (2) the other RX handlers will modify the skb we got.
813 *
814 * We don't need to, of course, if we aren't going to return
815 * the SKB because it has a bad FCS/PLCP checksum.
816 */
817
818 if (!(status->flag & RX_FLAG_NO_PSDU)) {
819 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
820 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
821 /* driver bug */
822 WARN_ON(1);
823 dev_kfree_skb(origskb);
824 return NULL;
825 }
826 present_fcs_len = FCS_LEN;
827 }
828
829 /* also consider the hdr->frame_control */
830 min_head_len += 2;
831 }
832
833 /* ensure that the expected data elements are in skb head */
834 if (!pskb_may_pull(origskb, min_head_len)) {
835 dev_kfree_skb(origskb);
836 return NULL;
837 }
838
839 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
840
841 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
842 if (only_monitor) {
843 dev_kfree_skb(origskb);
844 return NULL;
845 }
846
847 return ieee80211_clean_skb(origskb, present_fcs_len,
848 rtap_space);
849 }
850
851 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
852
853 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
854 bool last_monitor = list_is_last(&sdata->u.mntr.list,
855 &local->mon_list);
856
857 if (!monskb)
858 monskb = ieee80211_make_monitor_skb(local, &origskb,
859 rate, rtap_space,
860 only_monitor &&
861 last_monitor);
862
863 if (monskb) {
864 struct sk_buff *skb;
865
866 if (last_monitor) {
867 skb = monskb;
868 monskb = NULL;
869 } else {
870 skb = skb_clone(monskb, GFP_ATOMIC);
871 }
872
873 if (skb) {
874 skb->dev = sdata->dev;
875 dev_sw_netstats_rx_add(skb->dev, skb->len);
876 netif_receive_skb(skb);
877 }
878 }
879
880 if (last_monitor)
881 break;
882 }
883
884 /* this happens if last_monitor was erroneously false */
885 dev_kfree_skb(monskb);
886
887 /* ditto */
888 if (!origskb)
889 return NULL;
890
891 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
892 }
893
ieee80211_parse_qos(struct ieee80211_rx_data * rx)894 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
895 {
896 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
897 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
898 int tid, seqno_idx, security_idx;
899
900 /* does the frame have a qos control field? */
901 if (ieee80211_is_data_qos(hdr->frame_control)) {
902 u8 *qc = ieee80211_get_qos_ctl(hdr);
903 /* frame has qos control */
904 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
905 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
906 status->rx_flags |= IEEE80211_RX_AMSDU;
907
908 seqno_idx = tid;
909 security_idx = tid;
910 } else {
911 /*
912 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
913 *
914 * Sequence numbers for management frames, QoS data
915 * frames with a broadcast/multicast address in the
916 * Address 1 field, and all non-QoS data frames sent
917 * by QoS STAs are assigned using an additional single
918 * modulo-4096 counter, [...]
919 *
920 * We also use that counter for non-QoS STAs.
921 */
922 seqno_idx = IEEE80211_NUM_TIDS;
923 security_idx = 0;
924 if (ieee80211_is_mgmt(hdr->frame_control))
925 security_idx = IEEE80211_NUM_TIDS;
926 tid = 0;
927 }
928
929 rx->seqno_idx = seqno_idx;
930 rx->security_idx = security_idx;
931 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
932 * For now, set skb->priority to 0 for other cases. */
933 rx->skb->priority = (tid > 7) ? 0 : tid;
934 }
935
936 /**
937 * DOC: Packet alignment
938 *
939 * Drivers always need to pass packets that are aligned to two-byte boundaries
940 * to the stack.
941 *
942 * Additionally, should, if possible, align the payload data in a way that
943 * guarantees that the contained IP header is aligned to a four-byte
944 * boundary. In the case of regular frames, this simply means aligning the
945 * payload to a four-byte boundary (because either the IP header is directly
946 * contained, or IV/RFC1042 headers that have a length divisible by four are
947 * in front of it). If the payload data is not properly aligned and the
948 * architecture doesn't support efficient unaligned operations, mac80211
949 * will align the data.
950 *
951 * With A-MSDU frames, however, the payload data address must yield two modulo
952 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
953 * push the IP header further back to a multiple of four again. Thankfully, the
954 * specs were sane enough this time around to require padding each A-MSDU
955 * subframe to a length that is a multiple of four.
956 *
957 * Padding like Atheros hardware adds which is between the 802.11 header and
958 * the payload is not supported, the driver is required to move the 802.11
959 * header to be directly in front of the payload in that case.
960 */
ieee80211_verify_alignment(struct ieee80211_rx_data * rx)961 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
962 {
963 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
964 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
965 #endif
966 }
967
968
969 /* rx handlers */
970
ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff * skb)971 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
972 {
973 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
974
975 if (is_multicast_ether_addr(hdr->addr1))
976 return 0;
977
978 return ieee80211_is_robust_mgmt_frame(skb);
979 }
980
981
ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff * skb)982 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
983 {
984 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
985
986 if (!is_multicast_ether_addr(hdr->addr1))
987 return 0;
988
989 return ieee80211_is_robust_mgmt_frame(skb);
990 }
991
992
993 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
ieee80211_get_mmie_keyidx(struct sk_buff * skb)994 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
995 {
996 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
997 struct ieee80211_mmie *mmie;
998 struct ieee80211_mmie_16 *mmie16;
999
1000 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
1001 return -1;
1002
1003 if (!ieee80211_is_robust_mgmt_frame(skb) &&
1004 !ieee80211_is_beacon(hdr->frame_control))
1005 return -1; /* not a robust management frame */
1006
1007 mmie = (struct ieee80211_mmie *)
1008 (skb->data + skb->len - sizeof(*mmie));
1009 if (mmie->element_id == WLAN_EID_MMIE &&
1010 mmie->length == sizeof(*mmie) - 2)
1011 return le16_to_cpu(mmie->key_id);
1012
1013 mmie16 = (struct ieee80211_mmie_16 *)
1014 (skb->data + skb->len - sizeof(*mmie16));
1015 if (skb->len >= 24 + sizeof(*mmie16) &&
1016 mmie16->element_id == WLAN_EID_MMIE &&
1017 mmie16->length == sizeof(*mmie16) - 2)
1018 return le16_to_cpu(mmie16->key_id);
1019
1020 return -1;
1021 }
1022
ieee80211_get_keyid(struct sk_buff * skb)1023 static int ieee80211_get_keyid(struct sk_buff *skb)
1024 {
1025 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1026 __le16 fc = hdr->frame_control;
1027 int hdrlen = ieee80211_hdrlen(fc);
1028 u8 keyid;
1029
1030 /* WEP, TKIP, CCMP and GCMP */
1031 if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN))
1032 return -EINVAL;
1033
1034 skb_copy_bits(skb, hdrlen + 3, &keyid, 1);
1035
1036 keyid >>= 6;
1037
1038 return keyid;
1039 }
1040
ieee80211_rx_mesh_check(struct ieee80211_rx_data * rx)1041 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1042 {
1043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1044 char *dev_addr = rx->sdata->vif.addr;
1045
1046 if (ieee80211_is_data(hdr->frame_control)) {
1047 if (is_multicast_ether_addr(hdr->addr1)) {
1048 if (ieee80211_has_tods(hdr->frame_control) ||
1049 !ieee80211_has_fromds(hdr->frame_control))
1050 return RX_DROP_MONITOR;
1051 if (ether_addr_equal(hdr->addr3, dev_addr))
1052 return RX_DROP_MONITOR;
1053 } else {
1054 if (!ieee80211_has_a4(hdr->frame_control))
1055 return RX_DROP_MONITOR;
1056 if (ether_addr_equal(hdr->addr4, dev_addr))
1057 return RX_DROP_MONITOR;
1058 }
1059 }
1060
1061 /* If there is not an established peer link and this is not a peer link
1062 * establisment frame, beacon or probe, drop the frame.
1063 */
1064
1065 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1066 struct ieee80211_mgmt *mgmt;
1067
1068 if (!ieee80211_is_mgmt(hdr->frame_control))
1069 return RX_DROP_MONITOR;
1070
1071 if (ieee80211_is_action(hdr->frame_control)) {
1072 u8 category;
1073
1074 /* make sure category field is present */
1075 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1076 return RX_DROP_MONITOR;
1077
1078 mgmt = (struct ieee80211_mgmt *)hdr;
1079 category = mgmt->u.action.category;
1080 if (category != WLAN_CATEGORY_MESH_ACTION &&
1081 category != WLAN_CATEGORY_SELF_PROTECTED)
1082 return RX_DROP_MONITOR;
1083 return RX_CONTINUE;
1084 }
1085
1086 if (ieee80211_is_probe_req(hdr->frame_control) ||
1087 ieee80211_is_probe_resp(hdr->frame_control) ||
1088 ieee80211_is_beacon(hdr->frame_control) ||
1089 ieee80211_is_auth(hdr->frame_control))
1090 return RX_CONTINUE;
1091
1092 return RX_DROP_MONITOR;
1093 }
1094
1095 return RX_CONTINUE;
1096 }
1097
ieee80211_rx_reorder_ready(struct tid_ampdu_rx * tid_agg_rx,int index)1098 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1099 int index)
1100 {
1101 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1102 struct sk_buff *tail = skb_peek_tail(frames);
1103 struct ieee80211_rx_status *status;
1104
1105 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1106 return true;
1107
1108 if (!tail)
1109 return false;
1110
1111 status = IEEE80211_SKB_RXCB(tail);
1112 if (status->flag & RX_FLAG_AMSDU_MORE)
1113 return false;
1114
1115 return true;
1116 }
1117
ieee80211_release_reorder_frame(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,int index,struct sk_buff_head * frames)1118 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1119 struct tid_ampdu_rx *tid_agg_rx,
1120 int index,
1121 struct sk_buff_head *frames)
1122 {
1123 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1124 struct sk_buff *skb;
1125 struct ieee80211_rx_status *status;
1126
1127 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1128
1129 if (skb_queue_empty(skb_list))
1130 goto no_frame;
1131
1132 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1133 __skb_queue_purge(skb_list);
1134 goto no_frame;
1135 }
1136
1137 /* release frames from the reorder ring buffer */
1138 tid_agg_rx->stored_mpdu_num--;
1139 while ((skb = __skb_dequeue(skb_list))) {
1140 status = IEEE80211_SKB_RXCB(skb);
1141 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1142 __skb_queue_tail(frames, skb);
1143 }
1144
1145 no_frame:
1146 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1147 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1148 }
1149
ieee80211_release_reorder_frames(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,u16 head_seq_num,struct sk_buff_head * frames)1150 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1151 struct tid_ampdu_rx *tid_agg_rx,
1152 u16 head_seq_num,
1153 struct sk_buff_head *frames)
1154 {
1155 int index;
1156
1157 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1158
1159 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1160 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1161 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1162 frames);
1163 }
1164 }
1165
1166 /*
1167 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1168 * the skb was added to the buffer longer than this time ago, the earlier
1169 * frames that have not yet been received are assumed to be lost and the skb
1170 * can be released for processing. This may also release other skb's from the
1171 * reorder buffer if there are no additional gaps between the frames.
1172 *
1173 * Callers must hold tid_agg_rx->reorder_lock.
1174 */
1175 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1176
ieee80211_sta_reorder_release(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,struct sk_buff_head * frames)1177 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1178 struct tid_ampdu_rx *tid_agg_rx,
1179 struct sk_buff_head *frames)
1180 {
1181 int index, i, j;
1182
1183 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1184
1185 /* release the buffer until next missing frame */
1186 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1187 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1188 tid_agg_rx->stored_mpdu_num) {
1189 /*
1190 * No buffers ready to be released, but check whether any
1191 * frames in the reorder buffer have timed out.
1192 */
1193 int skipped = 1;
1194 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1195 j = (j + 1) % tid_agg_rx->buf_size) {
1196 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1197 skipped++;
1198 continue;
1199 }
1200 if (skipped &&
1201 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1202 HT_RX_REORDER_BUF_TIMEOUT))
1203 goto set_release_timer;
1204
1205 /* don't leave incomplete A-MSDUs around */
1206 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1207 i = (i + 1) % tid_agg_rx->buf_size)
1208 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1209
1210 ht_dbg_ratelimited(sdata,
1211 "release an RX reorder frame due to timeout on earlier frames\n");
1212 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1213 frames);
1214
1215 /*
1216 * Increment the head seq# also for the skipped slots.
1217 */
1218 tid_agg_rx->head_seq_num =
1219 (tid_agg_rx->head_seq_num +
1220 skipped) & IEEE80211_SN_MASK;
1221 skipped = 0;
1222 }
1223 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1224 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1225 frames);
1226 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1227 }
1228
1229 if (tid_agg_rx->stored_mpdu_num) {
1230 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1231
1232 for (; j != (index - 1) % tid_agg_rx->buf_size;
1233 j = (j + 1) % tid_agg_rx->buf_size) {
1234 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1235 break;
1236 }
1237
1238 set_release_timer:
1239
1240 if (!tid_agg_rx->removed)
1241 mod_timer(&tid_agg_rx->reorder_timer,
1242 tid_agg_rx->reorder_time[j] + 1 +
1243 HT_RX_REORDER_BUF_TIMEOUT);
1244 } else {
1245 del_timer(&tid_agg_rx->reorder_timer);
1246 }
1247 }
1248
1249 /*
1250 * As this function belongs to the RX path it must be under
1251 * rcu_read_lock protection. It returns false if the frame
1252 * can be processed immediately, true if it was consumed.
1253 */
ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,struct sk_buff * skb,struct sk_buff_head * frames)1254 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1255 struct tid_ampdu_rx *tid_agg_rx,
1256 struct sk_buff *skb,
1257 struct sk_buff_head *frames)
1258 {
1259 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1260 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1261 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1262 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1263 u16 head_seq_num, buf_size;
1264 int index;
1265 bool ret = true;
1266
1267 spin_lock(&tid_agg_rx->reorder_lock);
1268
1269 /*
1270 * Offloaded BA sessions have no known starting sequence number so pick
1271 * one from first Rxed frame for this tid after BA was started.
1272 */
1273 if (unlikely(tid_agg_rx->auto_seq)) {
1274 tid_agg_rx->auto_seq = false;
1275 tid_agg_rx->ssn = mpdu_seq_num;
1276 tid_agg_rx->head_seq_num = mpdu_seq_num;
1277 }
1278
1279 buf_size = tid_agg_rx->buf_size;
1280 head_seq_num = tid_agg_rx->head_seq_num;
1281
1282 /*
1283 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1284 * be reordered.
1285 */
1286 if (unlikely(!tid_agg_rx->started)) {
1287 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1288 ret = false;
1289 goto out;
1290 }
1291 tid_agg_rx->started = true;
1292 }
1293
1294 /* frame with out of date sequence number */
1295 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1296 dev_kfree_skb(skb);
1297 goto out;
1298 }
1299
1300 /*
1301 * If frame the sequence number exceeds our buffering window
1302 * size release some previous frames to make room for this one.
1303 */
1304 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1305 head_seq_num = ieee80211_sn_inc(
1306 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1307 /* release stored frames up to new head to stack */
1308 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1309 head_seq_num, frames);
1310 }
1311
1312 /* Now the new frame is always in the range of the reordering buffer */
1313
1314 index = mpdu_seq_num % tid_agg_rx->buf_size;
1315
1316 /* check if we already stored this frame */
1317 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1318 dev_kfree_skb(skb);
1319 goto out;
1320 }
1321
1322 /*
1323 * If the current MPDU is in the right order and nothing else
1324 * is stored we can process it directly, no need to buffer it.
1325 * If it is first but there's something stored, we may be able
1326 * to release frames after this one.
1327 */
1328 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1329 tid_agg_rx->stored_mpdu_num == 0) {
1330 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1331 tid_agg_rx->head_seq_num =
1332 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1333 ret = false;
1334 goto out;
1335 }
1336
1337 /* put the frame in the reordering buffer */
1338 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1339 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1340 tid_agg_rx->reorder_time[index] = jiffies;
1341 tid_agg_rx->stored_mpdu_num++;
1342 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1343 }
1344
1345 out:
1346 spin_unlock(&tid_agg_rx->reorder_lock);
1347 return ret;
1348 }
1349
1350 /*
1351 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1352 * true if the MPDU was buffered, false if it should be processed.
1353 */
ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)1354 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1355 struct sk_buff_head *frames)
1356 {
1357 struct sk_buff *skb = rx->skb;
1358 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1359 struct sta_info *sta = rx->sta;
1360 struct tid_ampdu_rx *tid_agg_rx;
1361 u16 sc;
1362 u8 tid, ack_policy;
1363
1364 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1365 is_multicast_ether_addr(hdr->addr1))
1366 goto dont_reorder;
1367
1368 /*
1369 * filter the QoS data rx stream according to
1370 * STA/TID and check if this STA/TID is on aggregation
1371 */
1372
1373 if (!sta)
1374 goto dont_reorder;
1375
1376 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1377 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1378 tid = ieee80211_get_tid(hdr);
1379
1380 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1381 if (!tid_agg_rx) {
1382 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1383 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1384 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1385 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1386 WLAN_BACK_RECIPIENT,
1387 WLAN_REASON_QSTA_REQUIRE_SETUP);
1388 goto dont_reorder;
1389 }
1390
1391 /* qos null data frames are excluded */
1392 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1393 goto dont_reorder;
1394
1395 /* not part of a BA session */
1396 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
1397 goto dont_reorder;
1398
1399 /* new, potentially un-ordered, ampdu frame - process it */
1400
1401 /* reset session timer */
1402 if (tid_agg_rx->timeout)
1403 tid_agg_rx->last_rx = jiffies;
1404
1405 /* if this mpdu is fragmented - terminate rx aggregation session */
1406 sc = le16_to_cpu(hdr->seq_ctrl);
1407 if (sc & IEEE80211_SCTL_FRAG) {
1408 ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb);
1409 return;
1410 }
1411
1412 /*
1413 * No locking needed -- we will only ever process one
1414 * RX packet at a time, and thus own tid_agg_rx. All
1415 * other code manipulating it needs to (and does) make
1416 * sure that we cannot get to it any more before doing
1417 * anything with it.
1418 */
1419 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1420 frames))
1421 return;
1422
1423 dont_reorder:
1424 __skb_queue_tail(frames, skb);
1425 }
1426
1427 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_dup(struct ieee80211_rx_data * rx)1428 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1429 {
1430 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1431 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1432
1433 if (status->flag & RX_FLAG_DUP_VALIDATED)
1434 return RX_CONTINUE;
1435
1436 /*
1437 * Drop duplicate 802.11 retransmissions
1438 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1439 */
1440
1441 if (rx->skb->len < 24)
1442 return RX_CONTINUE;
1443
1444 if (ieee80211_is_ctl(hdr->frame_control) ||
1445 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1446 is_multicast_ether_addr(hdr->addr1))
1447 return RX_CONTINUE;
1448
1449 if (!rx->sta)
1450 return RX_CONTINUE;
1451
1452 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1453 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1454 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1455 rx->link_sta->rx_stats.num_duplicates++;
1456 return RX_DROP_UNUSABLE;
1457 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1458 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1459 }
1460
1461 return RX_CONTINUE;
1462 }
1463
1464 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check(struct ieee80211_rx_data * rx)1465 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1466 {
1467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1468
1469 /* Drop disallowed frame classes based on STA auth/assoc state;
1470 * IEEE 802.11, Chap 5.5.
1471 *
1472 * mac80211 filters only based on association state, i.e. it drops
1473 * Class 3 frames from not associated stations. hostapd sends
1474 * deauth/disassoc frames when needed. In addition, hostapd is
1475 * responsible for filtering on both auth and assoc states.
1476 */
1477
1478 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1479 return ieee80211_rx_mesh_check(rx);
1480
1481 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1482 ieee80211_is_pspoll(hdr->frame_control)) &&
1483 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1484 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1485 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1486 /*
1487 * accept port control frames from the AP even when it's not
1488 * yet marked ASSOC to prevent a race where we don't set the
1489 * assoc bit quickly enough before it sends the first frame
1490 */
1491 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1492 ieee80211_is_data_present(hdr->frame_control)) {
1493 unsigned int hdrlen;
1494 __be16 ethertype;
1495
1496 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1497
1498 if (rx->skb->len < hdrlen + 8)
1499 return RX_DROP_MONITOR;
1500
1501 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2);
1502 if (ethertype == rx->sdata->control_port_protocol)
1503 return RX_CONTINUE;
1504 }
1505
1506 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1507 cfg80211_rx_spurious_frame(rx->sdata->dev,
1508 hdr->addr2,
1509 GFP_ATOMIC))
1510 return RX_DROP_UNUSABLE;
1511
1512 return RX_DROP_MONITOR;
1513 }
1514
1515 return RX_CONTINUE;
1516 }
1517
1518
1519 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_more_data(struct ieee80211_rx_data * rx)1520 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1521 {
1522 struct ieee80211_local *local;
1523 struct ieee80211_hdr *hdr;
1524 struct sk_buff *skb;
1525
1526 local = rx->local;
1527 skb = rx->skb;
1528 hdr = (struct ieee80211_hdr *) skb->data;
1529
1530 if (!local->pspolling)
1531 return RX_CONTINUE;
1532
1533 if (!ieee80211_has_fromds(hdr->frame_control))
1534 /* this is not from AP */
1535 return RX_CONTINUE;
1536
1537 if (!ieee80211_is_data(hdr->frame_control))
1538 return RX_CONTINUE;
1539
1540 if (!ieee80211_has_moredata(hdr->frame_control)) {
1541 /* AP has no more frames buffered for us */
1542 local->pspolling = false;
1543 return RX_CONTINUE;
1544 }
1545
1546 /* more data bit is set, let's request a new frame from the AP */
1547 ieee80211_send_pspoll(local, rx->sdata);
1548
1549 return RX_CONTINUE;
1550 }
1551
sta_ps_start(struct sta_info * sta)1552 static void sta_ps_start(struct sta_info *sta)
1553 {
1554 struct ieee80211_sub_if_data *sdata = sta->sdata;
1555 struct ieee80211_local *local = sdata->local;
1556 struct ps_data *ps;
1557 int tid;
1558
1559 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1560 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1561 ps = &sdata->bss->ps;
1562 else
1563 return;
1564
1565 atomic_inc(&ps->num_sta_ps);
1566 set_sta_flag(sta, WLAN_STA_PS_STA);
1567 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1568 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1569 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1570 sta->sta.addr, sta->sta.aid);
1571
1572 ieee80211_clear_fast_xmit(sta);
1573
1574 if (!sta->sta.txq[0])
1575 return;
1576
1577 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1578 struct ieee80211_txq *txq = sta->sta.txq[tid];
1579 struct txq_info *txqi = to_txq_info(txq);
1580
1581 spin_lock(&local->active_txq_lock[txq->ac]);
1582 if (!list_empty(&txqi->schedule_order))
1583 list_del_init(&txqi->schedule_order);
1584 spin_unlock(&local->active_txq_lock[txq->ac]);
1585
1586 if (txq_has_queue(txq))
1587 set_bit(tid, &sta->txq_buffered_tids);
1588 else
1589 clear_bit(tid, &sta->txq_buffered_tids);
1590 }
1591 }
1592
sta_ps_end(struct sta_info * sta)1593 static void sta_ps_end(struct sta_info *sta)
1594 {
1595 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1596 sta->sta.addr, sta->sta.aid);
1597
1598 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1599 /*
1600 * Clear the flag only if the other one is still set
1601 * so that the TX path won't start TX'ing new frames
1602 * directly ... In the case that the driver flag isn't
1603 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1604 */
1605 clear_sta_flag(sta, WLAN_STA_PS_STA);
1606 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1607 sta->sta.addr, sta->sta.aid);
1608 return;
1609 }
1610
1611 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1612 clear_sta_flag(sta, WLAN_STA_PS_STA);
1613 ieee80211_sta_ps_deliver_wakeup(sta);
1614 }
1615
ieee80211_sta_ps_transition(struct ieee80211_sta * pubsta,bool start)1616 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1617 {
1618 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1619 bool in_ps;
1620
1621 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1622
1623 /* Don't let the same PS state be set twice */
1624 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1625 if ((start && in_ps) || (!start && !in_ps))
1626 return -EINVAL;
1627
1628 if (start)
1629 sta_ps_start(sta);
1630 else
1631 sta_ps_end(sta);
1632
1633 return 0;
1634 }
1635 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1636
ieee80211_sta_pspoll(struct ieee80211_sta * pubsta)1637 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1638 {
1639 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1640
1641 if (test_sta_flag(sta, WLAN_STA_SP))
1642 return;
1643
1644 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1645 ieee80211_sta_ps_deliver_poll_response(sta);
1646 else
1647 set_sta_flag(sta, WLAN_STA_PSPOLL);
1648 }
1649 EXPORT_SYMBOL(ieee80211_sta_pspoll);
1650
ieee80211_sta_uapsd_trigger(struct ieee80211_sta * pubsta,u8 tid)1651 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1652 {
1653 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1654 int ac = ieee80211_ac_from_tid(tid);
1655
1656 /*
1657 * If this AC is not trigger-enabled do nothing unless the
1658 * driver is calling us after it already checked.
1659 *
1660 * NB: This could/should check a separate bitmap of trigger-
1661 * enabled queues, but for now we only implement uAPSD w/o
1662 * TSPEC changes to the ACs, so they're always the same.
1663 */
1664 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1665 tid != IEEE80211_NUM_TIDS)
1666 return;
1667
1668 /* if we are in a service period, do nothing */
1669 if (test_sta_flag(sta, WLAN_STA_SP))
1670 return;
1671
1672 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1673 ieee80211_sta_ps_deliver_uapsd(sta);
1674 else
1675 set_sta_flag(sta, WLAN_STA_UAPSD);
1676 }
1677 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1678
1679 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data * rx)1680 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1681 {
1682 struct ieee80211_sub_if_data *sdata = rx->sdata;
1683 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1684 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1685
1686 if (!rx->sta)
1687 return RX_CONTINUE;
1688
1689 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1690 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1691 return RX_CONTINUE;
1692
1693 /*
1694 * The device handles station powersave, so don't do anything about
1695 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1696 * it to mac80211 since they're handled.)
1697 */
1698 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1699 return RX_CONTINUE;
1700
1701 /*
1702 * Don't do anything if the station isn't already asleep. In
1703 * the uAPSD case, the station will probably be marked asleep,
1704 * in the PS-Poll case the station must be confused ...
1705 */
1706 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1707 return RX_CONTINUE;
1708
1709 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1710 ieee80211_sta_pspoll(&rx->sta->sta);
1711
1712 /* Free PS Poll skb here instead of returning RX_DROP that would
1713 * count as an dropped frame. */
1714 dev_kfree_skb(rx->skb);
1715
1716 return RX_QUEUED;
1717 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1718 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1719 ieee80211_has_pm(hdr->frame_control) &&
1720 (ieee80211_is_data_qos(hdr->frame_control) ||
1721 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1722 u8 tid = ieee80211_get_tid(hdr);
1723
1724 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1725 }
1726
1727 return RX_CONTINUE;
1728 }
1729
1730 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data * rx)1731 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1732 {
1733 struct sta_info *sta = rx->sta;
1734 struct link_sta_info *link_sta = rx->link_sta;
1735 struct sk_buff *skb = rx->skb;
1736 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1737 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1738 int i;
1739
1740 if (!sta || !link_sta)
1741 return RX_CONTINUE;
1742
1743 /*
1744 * Update last_rx only for IBSS packets which are for the current
1745 * BSSID and for station already AUTHORIZED to avoid keeping the
1746 * current IBSS network alive in cases where other STAs start
1747 * using different BSSID. This will also give the station another
1748 * chance to restart the authentication/authorization in case
1749 * something went wrong the first time.
1750 */
1751 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1752 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1753 NL80211_IFTYPE_ADHOC);
1754 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1755 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1756 link_sta->rx_stats.last_rx = jiffies;
1757 if (ieee80211_is_data(hdr->frame_control) &&
1758 !is_multicast_ether_addr(hdr->addr1))
1759 link_sta->rx_stats.last_rate =
1760 sta_stats_encode_rate(status);
1761 }
1762 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1763 link_sta->rx_stats.last_rx = jiffies;
1764 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
1765 !is_multicast_ether_addr(hdr->addr1)) {
1766 /*
1767 * Mesh beacons will update last_rx when if they are found to
1768 * match the current local configuration when processed.
1769 */
1770 link_sta->rx_stats.last_rx = jiffies;
1771 if (ieee80211_is_data(hdr->frame_control))
1772 link_sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1773 }
1774
1775 link_sta->rx_stats.fragments++;
1776
1777 u64_stats_update_begin(&link_sta->rx_stats.syncp);
1778 link_sta->rx_stats.bytes += rx->skb->len;
1779 u64_stats_update_end(&link_sta->rx_stats.syncp);
1780
1781 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1782 link_sta->rx_stats.last_signal = status->signal;
1783 ewma_signal_add(&link_sta->rx_stats_avg.signal,
1784 -status->signal);
1785 }
1786
1787 if (status->chains) {
1788 link_sta->rx_stats.chains = status->chains;
1789 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1790 int signal = status->chain_signal[i];
1791
1792 if (!(status->chains & BIT(i)))
1793 continue;
1794
1795 link_sta->rx_stats.chain_signal_last[i] = signal;
1796 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i],
1797 -signal);
1798 }
1799 }
1800
1801 if (ieee80211_is_s1g_beacon(hdr->frame_control))
1802 return RX_CONTINUE;
1803
1804 /*
1805 * Change STA power saving mode only at the end of a frame
1806 * exchange sequence, and only for a data or management
1807 * frame as specified in IEEE 802.11-2016 11.2.3.2
1808 */
1809 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1810 !ieee80211_has_morefrags(hdr->frame_control) &&
1811 !is_multicast_ether_addr(hdr->addr1) &&
1812 (ieee80211_is_mgmt(hdr->frame_control) ||
1813 ieee80211_is_data(hdr->frame_control)) &&
1814 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1815 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1816 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1817 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1818 if (!ieee80211_has_pm(hdr->frame_control))
1819 sta_ps_end(sta);
1820 } else {
1821 if (ieee80211_has_pm(hdr->frame_control))
1822 sta_ps_start(sta);
1823 }
1824 }
1825
1826 /* mesh power save support */
1827 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1828 ieee80211_mps_rx_h_sta_process(sta, hdr);
1829
1830 /*
1831 * Drop (qos-)data::nullfunc frames silently, since they
1832 * are used only to control station power saving mode.
1833 */
1834 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1835 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1836
1837 /*
1838 * If we receive a 4-addr nullfunc frame from a STA
1839 * that was not moved to a 4-addr STA vlan yet send
1840 * the event to userspace and for older hostapd drop
1841 * the frame to the monitor interface.
1842 */
1843 if (ieee80211_has_a4(hdr->frame_control) &&
1844 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1845 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1846 !rx->sdata->u.vlan.sta))) {
1847 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1848 cfg80211_rx_unexpected_4addr_frame(
1849 rx->sdata->dev, sta->sta.addr,
1850 GFP_ATOMIC);
1851 return RX_DROP_MONITOR;
1852 }
1853 /*
1854 * Update counter and free packet here to avoid
1855 * counting this as a dropped packed.
1856 */
1857 link_sta->rx_stats.packets++;
1858 dev_kfree_skb(rx->skb);
1859 return RX_QUEUED;
1860 }
1861
1862 return RX_CONTINUE;
1863 } /* ieee80211_rx_h_sta_process */
1864
1865 static struct ieee80211_key *
ieee80211_rx_get_bigtk(struct ieee80211_rx_data * rx,int idx)1866 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
1867 {
1868 struct ieee80211_key *key = NULL;
1869 int idx2;
1870
1871 /* Make sure key gets set if either BIGTK key index is set so that
1872 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
1873 * Beacon frames and Beacon frames that claim to use another BIGTK key
1874 * index (i.e., a key that we do not have).
1875 */
1876
1877 if (idx < 0) {
1878 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1879 idx2 = idx + 1;
1880 } else {
1881 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1882 idx2 = idx + 1;
1883 else
1884 idx2 = idx - 1;
1885 }
1886
1887 if (rx->link_sta)
1888 key = rcu_dereference(rx->link_sta->gtk[idx]);
1889 if (!key)
1890 key = rcu_dereference(rx->link->gtk[idx]);
1891 if (!key && rx->link_sta)
1892 key = rcu_dereference(rx->link_sta->gtk[idx2]);
1893 if (!key)
1894 key = rcu_dereference(rx->link->gtk[idx2]);
1895
1896 return key;
1897 }
1898
1899 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_decrypt(struct ieee80211_rx_data * rx)1900 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1901 {
1902 struct sk_buff *skb = rx->skb;
1903 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1904 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1905 int keyidx;
1906 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1907 struct ieee80211_key *sta_ptk = NULL;
1908 struct ieee80211_key *ptk_idx = NULL;
1909 int mmie_keyidx = -1;
1910 __le16 fc;
1911
1912 if (ieee80211_is_ext(hdr->frame_control))
1913 return RX_CONTINUE;
1914
1915 /*
1916 * Key selection 101
1917 *
1918 * There are five types of keys:
1919 * - GTK (group keys)
1920 * - IGTK (group keys for management frames)
1921 * - BIGTK (group keys for Beacon frames)
1922 * - PTK (pairwise keys)
1923 * - STK (station-to-station pairwise keys)
1924 *
1925 * When selecting a key, we have to distinguish between multicast
1926 * (including broadcast) and unicast frames, the latter can only
1927 * use PTKs and STKs while the former always use GTKs, IGTKs, and
1928 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
1929 * then unicast frames can also use key indices like GTKs. Hence, if we
1930 * don't have a PTK/STK we check the key index for a WEP key.
1931 *
1932 * Note that in a regular BSS, multicast frames are sent by the
1933 * AP only, associated stations unicast the frame to the AP first
1934 * which then multicasts it on their behalf.
1935 *
1936 * There is also a slight problem in IBSS mode: GTKs are negotiated
1937 * with each station, that is something we don't currently handle.
1938 * The spec seems to expect that one negotiates the same key with
1939 * every station but there's no such requirement; VLANs could be
1940 * possible.
1941 */
1942
1943 /* start without a key */
1944 rx->key = NULL;
1945 fc = hdr->frame_control;
1946
1947 if (rx->sta) {
1948 int keyid = rx->sta->ptk_idx;
1949 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1950
1951 if (ieee80211_has_protected(fc) &&
1952 !(status->flag & RX_FLAG_IV_STRIPPED)) {
1953 keyid = ieee80211_get_keyid(rx->skb);
1954
1955 if (unlikely(keyid < 0))
1956 return RX_DROP_UNUSABLE;
1957
1958 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
1959 }
1960 }
1961
1962 if (!ieee80211_has_protected(fc))
1963 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1964
1965 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1966 rx->key = ptk_idx ? ptk_idx : sta_ptk;
1967 if ((status->flag & RX_FLAG_DECRYPTED) &&
1968 (status->flag & RX_FLAG_IV_STRIPPED))
1969 return RX_CONTINUE;
1970 /* Skip decryption if the frame is not protected. */
1971 if (!ieee80211_has_protected(fc))
1972 return RX_CONTINUE;
1973 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
1974 /* Broadcast/multicast robust management frame / BIP */
1975 if ((status->flag & RX_FLAG_DECRYPTED) &&
1976 (status->flag & RX_FLAG_IV_STRIPPED))
1977 return RX_CONTINUE;
1978
1979 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
1980 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
1981 NUM_DEFAULT_BEACON_KEYS) {
1982 if (rx->sdata->dev)
1983 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
1984 skb->data,
1985 skb->len);
1986 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1987 }
1988
1989 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
1990 if (!rx->key)
1991 return RX_CONTINUE; /* Beacon protection not in use */
1992 } else if (mmie_keyidx >= 0) {
1993 /* Broadcast/multicast robust management frame / BIP */
1994 if ((status->flag & RX_FLAG_DECRYPTED) &&
1995 (status->flag & RX_FLAG_IV_STRIPPED))
1996 return RX_CONTINUE;
1997
1998 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1999 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
2000 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
2001 if (rx->link_sta) {
2002 if (ieee80211_is_group_privacy_action(skb) &&
2003 test_sta_flag(rx->sta, WLAN_STA_MFP))
2004 return RX_DROP_MONITOR;
2005
2006 rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]);
2007 }
2008 if (!rx->key)
2009 rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]);
2010 } else if (!ieee80211_has_protected(fc)) {
2011 /*
2012 * The frame was not protected, so skip decryption. However, we
2013 * need to set rx->key if there is a key that could have been
2014 * used so that the frame may be dropped if encryption would
2015 * have been expected.
2016 */
2017 struct ieee80211_key *key = NULL;
2018 int i;
2019
2020 if (ieee80211_is_beacon(fc)) {
2021 key = ieee80211_rx_get_bigtk(rx, -1);
2022 } else if (ieee80211_is_mgmt(fc) &&
2023 is_multicast_ether_addr(hdr->addr1)) {
2024 key = rcu_dereference(rx->link->default_mgmt_key);
2025 } else {
2026 if (rx->link_sta) {
2027 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2028 key = rcu_dereference(rx->link_sta->gtk[i]);
2029 if (key)
2030 break;
2031 }
2032 }
2033 if (!key) {
2034 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2035 key = rcu_dereference(rx->link->gtk[i]);
2036 if (key)
2037 break;
2038 }
2039 }
2040 }
2041 if (key)
2042 rx->key = key;
2043 return RX_CONTINUE;
2044 } else {
2045 /*
2046 * The device doesn't give us the IV so we won't be
2047 * able to look up the key. That's ok though, we
2048 * don't need to decrypt the frame, we just won't
2049 * be able to keep statistics accurate.
2050 * Except for key threshold notifications, should
2051 * we somehow allow the driver to tell us which key
2052 * the hardware used if this flag is set?
2053 */
2054 if ((status->flag & RX_FLAG_DECRYPTED) &&
2055 (status->flag & RX_FLAG_IV_STRIPPED))
2056 return RX_CONTINUE;
2057
2058 keyidx = ieee80211_get_keyid(rx->skb);
2059
2060 if (unlikely(keyidx < 0))
2061 return RX_DROP_UNUSABLE;
2062
2063 /* check per-station GTK first, if multicast packet */
2064 if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta)
2065 rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]);
2066
2067 /* if not found, try default key */
2068 if (!rx->key) {
2069 if (is_multicast_ether_addr(hdr->addr1))
2070 rx->key = rcu_dereference(rx->link->gtk[keyidx]);
2071 if (!rx->key)
2072 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2073
2074 /*
2075 * RSNA-protected unicast frames should always be
2076 * sent with pairwise or station-to-station keys,
2077 * but for WEP we allow using a key index as well.
2078 */
2079 if (rx->key &&
2080 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2081 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2082 !is_multicast_ether_addr(hdr->addr1))
2083 rx->key = NULL;
2084 }
2085 }
2086
2087 if (rx->key) {
2088 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2089 return RX_DROP_MONITOR;
2090
2091 /* TODO: add threshold stuff again */
2092 } else {
2093 return RX_DROP_MONITOR;
2094 }
2095
2096 switch (rx->key->conf.cipher) {
2097 case WLAN_CIPHER_SUITE_WEP40:
2098 case WLAN_CIPHER_SUITE_WEP104:
2099 result = ieee80211_crypto_wep_decrypt(rx);
2100 break;
2101 case WLAN_CIPHER_SUITE_TKIP:
2102 result = ieee80211_crypto_tkip_decrypt(rx);
2103 break;
2104 case WLAN_CIPHER_SUITE_CCMP:
2105 result = ieee80211_crypto_ccmp_decrypt(
2106 rx, IEEE80211_CCMP_MIC_LEN);
2107 break;
2108 case WLAN_CIPHER_SUITE_CCMP_256:
2109 result = ieee80211_crypto_ccmp_decrypt(
2110 rx, IEEE80211_CCMP_256_MIC_LEN);
2111 break;
2112 case WLAN_CIPHER_SUITE_AES_CMAC:
2113 result = ieee80211_crypto_aes_cmac_decrypt(rx);
2114 break;
2115 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2116 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
2117 break;
2118 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2119 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2120 result = ieee80211_crypto_aes_gmac_decrypt(rx);
2121 break;
2122 case WLAN_CIPHER_SUITE_GCMP:
2123 case WLAN_CIPHER_SUITE_GCMP_256:
2124 result = ieee80211_crypto_gcmp_decrypt(rx);
2125 break;
2126 default:
2127 result = RX_DROP_UNUSABLE;
2128 }
2129
2130 /* the hdr variable is invalid after the decrypt handlers */
2131
2132 /* either the frame has been decrypted or will be dropped */
2133 status->flag |= RX_FLAG_DECRYPTED;
2134
2135 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE &&
2136 rx->sdata->dev))
2137 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2138 skb->data, skb->len);
2139
2140 return result;
2141 }
2142
ieee80211_init_frag_cache(struct ieee80211_fragment_cache * cache)2143 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
2144 {
2145 int i;
2146
2147 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2148 skb_queue_head_init(&cache->entries[i].skb_list);
2149 }
2150
ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache * cache)2151 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
2152 {
2153 int i;
2154
2155 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2156 __skb_queue_purge(&cache->entries[i].skb_list);
2157 }
2158
2159 static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_add(struct ieee80211_fragment_cache * cache,unsigned int frag,unsigned int seq,int rx_queue,struct sk_buff ** skb)2160 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
2161 unsigned int frag, unsigned int seq, int rx_queue,
2162 struct sk_buff **skb)
2163 {
2164 struct ieee80211_fragment_entry *entry;
2165
2166 entry = &cache->entries[cache->next++];
2167 if (cache->next >= IEEE80211_FRAGMENT_MAX)
2168 cache->next = 0;
2169
2170 __skb_queue_purge(&entry->skb_list);
2171
2172 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2173 *skb = NULL;
2174 entry->first_frag_time = jiffies;
2175 entry->seq = seq;
2176 entry->rx_queue = rx_queue;
2177 entry->last_frag = frag;
2178 entry->check_sequential_pn = false;
2179 entry->extra_len = 0;
2180
2181 return entry;
2182 }
2183
2184 static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_find(struct ieee80211_fragment_cache * cache,unsigned int frag,unsigned int seq,int rx_queue,struct ieee80211_hdr * hdr)2185 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
2186 unsigned int frag, unsigned int seq,
2187 int rx_queue, struct ieee80211_hdr *hdr)
2188 {
2189 struct ieee80211_fragment_entry *entry;
2190 int i, idx;
2191
2192 idx = cache->next;
2193 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2194 struct ieee80211_hdr *f_hdr;
2195 struct sk_buff *f_skb;
2196
2197 idx--;
2198 if (idx < 0)
2199 idx = IEEE80211_FRAGMENT_MAX - 1;
2200
2201 entry = &cache->entries[idx];
2202 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2203 entry->rx_queue != rx_queue ||
2204 entry->last_frag + 1 != frag)
2205 continue;
2206
2207 f_skb = __skb_peek(&entry->skb_list);
2208 f_hdr = (struct ieee80211_hdr *) f_skb->data;
2209
2210 /*
2211 * Check ftype and addresses are equal, else check next fragment
2212 */
2213 if (((hdr->frame_control ^ f_hdr->frame_control) &
2214 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2215 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2216 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2217 continue;
2218
2219 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2220 __skb_queue_purge(&entry->skb_list);
2221 continue;
2222 }
2223 return entry;
2224 }
2225
2226 return NULL;
2227 }
2228
requires_sequential_pn(struct ieee80211_rx_data * rx,__le16 fc)2229 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
2230 {
2231 return rx->key &&
2232 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2233 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2234 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2235 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2236 ieee80211_has_protected(fc);
2237 }
2238
2239 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data * rx)2240 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2241 {
2242 struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
2243 struct ieee80211_hdr *hdr;
2244 u16 sc;
2245 __le16 fc;
2246 unsigned int frag, seq;
2247 struct ieee80211_fragment_entry *entry;
2248 struct sk_buff *skb;
2249 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2250
2251 hdr = (struct ieee80211_hdr *)rx->skb->data;
2252 fc = hdr->frame_control;
2253
2254 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
2255 return RX_CONTINUE;
2256
2257 sc = le16_to_cpu(hdr->seq_ctrl);
2258 frag = sc & IEEE80211_SCTL_FRAG;
2259
2260 if (rx->sta)
2261 cache = &rx->sta->frags;
2262
2263 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2264 goto out;
2265
2266 if (is_multicast_ether_addr(hdr->addr1))
2267 return RX_DROP_MONITOR;
2268
2269 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2270
2271 if (skb_linearize(rx->skb))
2272 return RX_DROP_UNUSABLE;
2273
2274 /*
2275 * skb_linearize() might change the skb->data and
2276 * previously cached variables (in this case, hdr) need to
2277 * be refreshed with the new data.
2278 */
2279 hdr = (struct ieee80211_hdr *)rx->skb->data;
2280 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2281
2282 if (frag == 0) {
2283 /* This is the first fragment of a new frame. */
2284 entry = ieee80211_reassemble_add(cache, frag, seq,
2285 rx->seqno_idx, &(rx->skb));
2286 if (requires_sequential_pn(rx, fc)) {
2287 int queue = rx->security_idx;
2288
2289 /* Store CCMP/GCMP PN so that we can verify that the
2290 * next fragment has a sequential PN value.
2291 */
2292 entry->check_sequential_pn = true;
2293 entry->is_protected = true;
2294 entry->key_color = rx->key->color;
2295 memcpy(entry->last_pn,
2296 rx->key->u.ccmp.rx_pn[queue],
2297 IEEE80211_CCMP_PN_LEN);
2298 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2299 u.ccmp.rx_pn) !=
2300 offsetof(struct ieee80211_key,
2301 u.gcmp.rx_pn));
2302 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2303 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2304 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2305 IEEE80211_GCMP_PN_LEN);
2306 } else if (rx->key &&
2307 (ieee80211_has_protected(fc) ||
2308 (status->flag & RX_FLAG_DECRYPTED))) {
2309 entry->is_protected = true;
2310 entry->key_color = rx->key->color;
2311 }
2312 return RX_QUEUED;
2313 }
2314
2315 /* This is a fragment for a frame that should already be pending in
2316 * fragment cache. Add this fragment to the end of the pending entry.
2317 */
2318 entry = ieee80211_reassemble_find(cache, frag, seq,
2319 rx->seqno_idx, hdr);
2320 if (!entry) {
2321 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2322 return RX_DROP_MONITOR;
2323 }
2324
2325 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2326 * MPDU PN values are not incrementing in steps of 1."
2327 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2328 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2329 */
2330 if (entry->check_sequential_pn) {
2331 int i;
2332 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2333
2334 if (!requires_sequential_pn(rx, fc))
2335 return RX_DROP_UNUSABLE;
2336
2337 /* Prevent mixed key and fragment cache attacks */
2338 if (entry->key_color != rx->key->color)
2339 return RX_DROP_UNUSABLE;
2340
2341 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2342 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2343 pn[i]++;
2344 if (pn[i])
2345 break;
2346 }
2347
2348 rpn = rx->ccm_gcm.pn;
2349 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2350 return RX_DROP_UNUSABLE;
2351 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2352 } else if (entry->is_protected &&
2353 (!rx->key ||
2354 (!ieee80211_has_protected(fc) &&
2355 !(status->flag & RX_FLAG_DECRYPTED)) ||
2356 rx->key->color != entry->key_color)) {
2357 /* Drop this as a mixed key or fragment cache attack, even
2358 * if for TKIP Michael MIC should protect us, and WEP is a
2359 * lost cause anyway.
2360 */
2361 return RX_DROP_UNUSABLE;
2362 } else if (entry->is_protected && rx->key &&
2363 entry->key_color != rx->key->color &&
2364 (status->flag & RX_FLAG_DECRYPTED)) {
2365 return RX_DROP_UNUSABLE;
2366 }
2367
2368 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2369 __skb_queue_tail(&entry->skb_list, rx->skb);
2370 entry->last_frag = frag;
2371 entry->extra_len += rx->skb->len;
2372 if (ieee80211_has_morefrags(fc)) {
2373 rx->skb = NULL;
2374 return RX_QUEUED;
2375 }
2376
2377 rx->skb = __skb_dequeue(&entry->skb_list);
2378 if (skb_tailroom(rx->skb) < entry->extra_len) {
2379 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2380 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2381 GFP_ATOMIC))) {
2382 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2383 __skb_queue_purge(&entry->skb_list);
2384 return RX_DROP_UNUSABLE;
2385 }
2386 }
2387 while ((skb = __skb_dequeue(&entry->skb_list))) {
2388 skb_put_data(rx->skb, skb->data, skb->len);
2389 dev_kfree_skb(skb);
2390 }
2391
2392 out:
2393 ieee80211_led_rx(rx->local);
2394 if (rx->sta)
2395 rx->link_sta->rx_stats.packets++;
2396 return RX_CONTINUE;
2397 }
2398
ieee80211_802_1x_port_control(struct ieee80211_rx_data * rx)2399 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2400 {
2401 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2402 return -EACCES;
2403
2404 return 0;
2405 }
2406
ieee80211_drop_unencrypted(struct ieee80211_rx_data * rx,__le16 fc)2407 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2408 {
2409 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
2410 struct sk_buff *skb = rx->skb;
2411 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2412
2413 /*
2414 * Pass through unencrypted frames if the hardware has
2415 * decrypted them already.
2416 */
2417 if (status->flag & RX_FLAG_DECRYPTED)
2418 return 0;
2419
2420 /* check mesh EAPOL frames first */
2421 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
2422 ieee80211_is_data(fc))) {
2423 struct ieee80211s_hdr *mesh_hdr;
2424 u16 hdr_len = ieee80211_hdrlen(fc);
2425 u16 ethertype_offset;
2426 __be16 ethertype;
2427
2428 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
2429 goto drop_check;
2430
2431 /* make sure fixed part of mesh header is there, also checks skb len */
2432 if (!pskb_may_pull(rx->skb, hdr_len + 6))
2433 goto drop_check;
2434
2435 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
2436 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
2437 sizeof(rfc1042_header);
2438
2439 if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 &&
2440 ethertype == rx->sdata->control_port_protocol)
2441 return 0;
2442 }
2443
2444 drop_check:
2445 /* Drop unencrypted frames if key is set. */
2446 if (unlikely(!ieee80211_has_protected(fc) &&
2447 !ieee80211_is_any_nullfunc(fc) &&
2448 ieee80211_is_data(fc) && rx->key))
2449 return -EACCES;
2450
2451 return 0;
2452 }
2453
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data * rx)2454 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2455 {
2456 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2457 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2458 __le16 fc = hdr->frame_control;
2459
2460 /*
2461 * Pass through unencrypted frames if the hardware has
2462 * decrypted them already.
2463 */
2464 if (status->flag & RX_FLAG_DECRYPTED)
2465 return 0;
2466
2467 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2468 if (unlikely(!ieee80211_has_protected(fc) &&
2469 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2470 rx->key)) {
2471 if (ieee80211_is_deauth(fc) ||
2472 ieee80211_is_disassoc(fc))
2473 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2474 rx->skb->data,
2475 rx->skb->len);
2476 return -EACCES;
2477 }
2478 /* BIP does not use Protected field, so need to check MMIE */
2479 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2480 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2481 if (ieee80211_is_deauth(fc) ||
2482 ieee80211_is_disassoc(fc))
2483 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2484 rx->skb->data,
2485 rx->skb->len);
2486 return -EACCES;
2487 }
2488 if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
2489 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2490 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2491 rx->skb->data,
2492 rx->skb->len);
2493 return -EACCES;
2494 }
2495 /*
2496 * When using MFP, Action frames are not allowed prior to
2497 * having configured keys.
2498 */
2499 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2500 ieee80211_is_robust_mgmt_frame(rx->skb)))
2501 return -EACCES;
2502 }
2503
2504 return 0;
2505 }
2506
2507 static int
__ieee80211_data_to_8023(struct ieee80211_rx_data * rx,bool * port_control)2508 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2509 {
2510 struct ieee80211_sub_if_data *sdata = rx->sdata;
2511 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2512 bool check_port_control = false;
2513 struct ethhdr *ehdr;
2514 int ret;
2515
2516 *port_control = false;
2517 if (ieee80211_has_a4(hdr->frame_control) &&
2518 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2519 return -1;
2520
2521 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2522 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2523
2524 if (!sdata->u.mgd.use_4addr)
2525 return -1;
2526 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2527 check_port_control = true;
2528 }
2529
2530 if (is_multicast_ether_addr(hdr->addr1) &&
2531 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2532 return -1;
2533
2534 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2535 if (ret < 0)
2536 return ret;
2537
2538 ehdr = (struct ethhdr *) rx->skb->data;
2539 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2540 *port_control = true;
2541 else if (check_port_control)
2542 return -1;
2543
2544 return 0;
2545 }
2546
ieee80211_is_our_addr(struct ieee80211_sub_if_data * sdata,const u8 * addr,int * out_link_id)2547 bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata,
2548 const u8 *addr, int *out_link_id)
2549 {
2550 unsigned int link_id;
2551
2552 /* non-MLO, or MLD address replaced by hardware */
2553 if (ether_addr_equal(sdata->vif.addr, addr))
2554 return true;
2555
2556 if (!sdata->vif.valid_links)
2557 return false;
2558
2559 for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) {
2560 struct ieee80211_bss_conf *conf;
2561
2562 conf = rcu_dereference(sdata->vif.link_conf[link_id]);
2563
2564 if (!conf)
2565 continue;
2566 if (ether_addr_equal(conf->addr, addr)) {
2567 if (out_link_id)
2568 *out_link_id = link_id;
2569 return true;
2570 }
2571 }
2572
2573 return false;
2574 }
2575
2576 /*
2577 * requires that rx->skb is a frame with ethernet header
2578 */
ieee80211_frame_allowed(struct ieee80211_rx_data * rx,__le16 fc)2579 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2580 {
2581 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2582 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2583 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2584
2585 /*
2586 * Allow EAPOL frames to us/the PAE group address regardless of
2587 * whether the frame was encrypted or not, and always disallow
2588 * all other destination addresses for them.
2589 */
2590 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
2591 return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) ||
2592 ether_addr_equal(ehdr->h_dest, pae_group_addr);
2593
2594 if (ieee80211_802_1x_port_control(rx) ||
2595 ieee80211_drop_unencrypted(rx, fc))
2596 return false;
2597
2598 return true;
2599 }
2600
ieee80211_deliver_skb_to_local_stack(struct sk_buff * skb,struct ieee80211_rx_data * rx)2601 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2602 struct ieee80211_rx_data *rx)
2603 {
2604 struct ieee80211_sub_if_data *sdata = rx->sdata;
2605 struct net_device *dev = sdata->dev;
2606
2607 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2608 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
2609 !sdata->control_port_no_preauth)) &&
2610 sdata->control_port_over_nl80211)) {
2611 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2612 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2613
2614 cfg80211_rx_control_port(dev, skb, noencrypt);
2615 dev_kfree_skb(skb);
2616 } else {
2617 struct ethhdr *ehdr = (void *)skb_mac_header(skb);
2618
2619 memset(skb->cb, 0, sizeof(skb->cb));
2620
2621 /*
2622 * 802.1X over 802.11 requires that the authenticator address
2623 * be used for EAPOL frames. However, 802.1X allows the use of
2624 * the PAE group address instead. If the interface is part of
2625 * a bridge and we pass the frame with the PAE group address,
2626 * then the bridge will forward it to the network (even if the
2627 * client was not associated yet), which isn't supposed to
2628 * happen.
2629 * To avoid that, rewrite the destination address to our own
2630 * address, so that the authenticator (e.g. hostapd) will see
2631 * the frame, but bridge won't forward it anywhere else. Note
2632 * that due to earlier filtering, the only other address can
2633 * be the PAE group address, unless the hardware allowed them
2634 * through in 802.3 offloaded mode.
2635 */
2636 if (unlikely(skb->protocol == sdata->control_port_protocol &&
2637 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
2638 ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
2639
2640 /* deliver to local stack */
2641 if (rx->list)
2642 list_add_tail(&skb->list, rx->list);
2643 else
2644 netif_receive_skb(skb);
2645 }
2646 }
2647
2648 /*
2649 * requires that rx->skb is a frame with ethernet header
2650 */
2651 static void
ieee80211_deliver_skb(struct ieee80211_rx_data * rx)2652 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2653 {
2654 struct ieee80211_sub_if_data *sdata = rx->sdata;
2655 struct net_device *dev = sdata->dev;
2656 struct sk_buff *skb, *xmit_skb;
2657 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2658 struct sta_info *dsta;
2659
2660 skb = rx->skb;
2661 xmit_skb = NULL;
2662
2663 dev_sw_netstats_rx_add(dev, skb->len);
2664
2665 if (rx->sta) {
2666 /* The seqno index has the same property as needed
2667 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2668 * for non-QoS-data frames. Here we know it's a data
2669 * frame, so count MSDUs.
2670 */
2671 u64_stats_update_begin(&rx->link_sta->rx_stats.syncp);
2672 rx->link_sta->rx_stats.msdu[rx->seqno_idx]++;
2673 u64_stats_update_end(&rx->link_sta->rx_stats.syncp);
2674 }
2675
2676 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2677 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2678 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2679 ehdr->h_proto != rx->sdata->control_port_protocol &&
2680 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2681 if (is_multicast_ether_addr(ehdr->h_dest) &&
2682 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2683 /*
2684 * send multicast frames both to higher layers in
2685 * local net stack and back to the wireless medium
2686 */
2687 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2688 if (!xmit_skb)
2689 net_info_ratelimited("%s: failed to clone multicast frame\n",
2690 dev->name);
2691 } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2692 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2693 dsta = sta_info_get(sdata, ehdr->h_dest);
2694 if (dsta) {
2695 /*
2696 * The destination station is associated to
2697 * this AP (in this VLAN), so send the frame
2698 * directly to it and do not pass it to local
2699 * net stack.
2700 */
2701 xmit_skb = skb;
2702 skb = NULL;
2703 }
2704 }
2705 }
2706
2707 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2708 if (skb) {
2709 /* 'align' will only take the values 0 or 2 here since all
2710 * frames are required to be aligned to 2-byte boundaries
2711 * when being passed to mac80211; the code here works just
2712 * as well if that isn't true, but mac80211 assumes it can
2713 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2714 */
2715 int align;
2716
2717 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2718 if (align) {
2719 if (WARN_ON(skb_headroom(skb) < 3)) {
2720 dev_kfree_skb(skb);
2721 skb = NULL;
2722 } else {
2723 u8 *data = skb->data;
2724 size_t len = skb_headlen(skb);
2725 skb->data -= align;
2726 memmove(skb->data, data, len);
2727 skb_set_tail_pointer(skb, len);
2728 }
2729 }
2730 }
2731 #endif
2732
2733 if (skb) {
2734 skb->protocol = eth_type_trans(skb, dev);
2735 ieee80211_deliver_skb_to_local_stack(skb, rx);
2736 }
2737
2738 if (xmit_skb) {
2739 /*
2740 * Send to wireless media and increase priority by 256 to
2741 * keep the received priority instead of reclassifying
2742 * the frame (see cfg80211_classify8021d).
2743 */
2744 xmit_skb->priority += 256;
2745 xmit_skb->protocol = htons(ETH_P_802_3);
2746 skb_reset_network_header(xmit_skb);
2747 skb_reset_mac_header(xmit_skb);
2748 dev_queue_xmit(xmit_skb);
2749 }
2750 }
2751
2752 static ieee80211_rx_result debug_noinline
__ieee80211_rx_h_amsdu(struct ieee80211_rx_data * rx,u8 data_offset)2753 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2754 {
2755 struct net_device *dev = rx->sdata->dev;
2756 struct sk_buff *skb = rx->skb;
2757 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2758 __le16 fc = hdr->frame_control;
2759 struct sk_buff_head frame_list;
2760 struct ethhdr ethhdr;
2761 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2762
2763 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2764 check_da = NULL;
2765 check_sa = NULL;
2766 } else switch (rx->sdata->vif.type) {
2767 case NL80211_IFTYPE_AP:
2768 case NL80211_IFTYPE_AP_VLAN:
2769 check_da = NULL;
2770 break;
2771 case NL80211_IFTYPE_STATION:
2772 if (!rx->sta ||
2773 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2774 check_sa = NULL;
2775 break;
2776 case NL80211_IFTYPE_MESH_POINT:
2777 check_sa = NULL;
2778 break;
2779 default:
2780 break;
2781 }
2782
2783 skb->dev = dev;
2784 __skb_queue_head_init(&frame_list);
2785
2786 if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
2787 rx->sdata->vif.addr,
2788 rx->sdata->vif.type,
2789 data_offset, true))
2790 return RX_DROP_UNUSABLE;
2791
2792 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2793 rx->sdata->vif.type,
2794 rx->local->hw.extra_tx_headroom,
2795 check_da, check_sa);
2796
2797 while (!skb_queue_empty(&frame_list)) {
2798 rx->skb = __skb_dequeue(&frame_list);
2799
2800 if (!ieee80211_frame_allowed(rx, fc)) {
2801 dev_kfree_skb(rx->skb);
2802 continue;
2803 }
2804
2805 ieee80211_deliver_skb(rx);
2806 }
2807
2808 return RX_QUEUED;
2809 }
2810
2811 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_amsdu(struct ieee80211_rx_data * rx)2812 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2813 {
2814 struct sk_buff *skb = rx->skb;
2815 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2816 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2817 __le16 fc = hdr->frame_control;
2818
2819 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2820 return RX_CONTINUE;
2821
2822 if (unlikely(!ieee80211_is_data(fc)))
2823 return RX_CONTINUE;
2824
2825 if (unlikely(!ieee80211_is_data_present(fc)))
2826 return RX_DROP_MONITOR;
2827
2828 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2829 switch (rx->sdata->vif.type) {
2830 case NL80211_IFTYPE_AP_VLAN:
2831 if (!rx->sdata->u.vlan.sta)
2832 return RX_DROP_UNUSABLE;
2833 break;
2834 case NL80211_IFTYPE_STATION:
2835 if (!rx->sdata->u.mgd.use_4addr)
2836 return RX_DROP_UNUSABLE;
2837 break;
2838 default:
2839 return RX_DROP_UNUSABLE;
2840 }
2841 }
2842
2843 if (is_multicast_ether_addr(hdr->addr1))
2844 return RX_DROP_UNUSABLE;
2845
2846 if (rx->key) {
2847 /*
2848 * We should not receive A-MSDUs on pre-HT connections,
2849 * and HT connections cannot use old ciphers. Thus drop
2850 * them, as in those cases we couldn't even have SPP
2851 * A-MSDUs or such.
2852 */
2853 switch (rx->key->conf.cipher) {
2854 case WLAN_CIPHER_SUITE_WEP40:
2855 case WLAN_CIPHER_SUITE_WEP104:
2856 case WLAN_CIPHER_SUITE_TKIP:
2857 return RX_DROP_UNUSABLE;
2858 default:
2859 break;
2860 }
2861 }
2862
2863 return __ieee80211_rx_h_amsdu(rx, 0);
2864 }
2865
2866 #ifdef CONFIG_MAC80211_MESH
2867 static ieee80211_rx_result
ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data * rx)2868 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2869 {
2870 struct ieee80211_hdr *fwd_hdr, *hdr;
2871 struct ieee80211_tx_info *info;
2872 struct ieee80211s_hdr *mesh_hdr;
2873 struct sk_buff *skb = rx->skb, *fwd_skb;
2874 struct ieee80211_local *local = rx->local;
2875 struct ieee80211_sub_if_data *sdata = rx->sdata;
2876 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2877 u16 ac, q, hdrlen;
2878 int tailroom = 0;
2879
2880 hdr = (struct ieee80211_hdr *) skb->data;
2881 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2882
2883 /* make sure fixed part of mesh header is there, also checks skb len */
2884 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2885 return RX_DROP_MONITOR;
2886
2887 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2888
2889 /* make sure full mesh header is there, also checks skb len */
2890 if (!pskb_may_pull(rx->skb,
2891 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2892 return RX_DROP_MONITOR;
2893
2894 /* reload pointers */
2895 hdr = (struct ieee80211_hdr *) skb->data;
2896 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2897
2898 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2899 return RX_DROP_MONITOR;
2900
2901 /* frame is in RMC, don't forward */
2902 if (ieee80211_is_data(hdr->frame_control) &&
2903 is_multicast_ether_addr(hdr->addr1) &&
2904 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2905 return RX_DROP_MONITOR;
2906
2907 if (!ieee80211_is_data(hdr->frame_control))
2908 return RX_CONTINUE;
2909
2910 if (!mesh_hdr->ttl)
2911 return RX_DROP_MONITOR;
2912
2913 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2914 struct mesh_path *mppath;
2915 char *proxied_addr;
2916 char *mpp_addr;
2917
2918 if (is_multicast_ether_addr(hdr->addr1)) {
2919 mpp_addr = hdr->addr3;
2920 proxied_addr = mesh_hdr->eaddr1;
2921 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2922 MESH_FLAGS_AE_A5_A6) {
2923 /* has_a4 already checked in ieee80211_rx_mesh_check */
2924 mpp_addr = hdr->addr4;
2925 proxied_addr = mesh_hdr->eaddr2;
2926 } else {
2927 return RX_DROP_MONITOR;
2928 }
2929
2930 rcu_read_lock();
2931 mppath = mpp_path_lookup(sdata, proxied_addr);
2932 if (!mppath) {
2933 mpp_path_add(sdata, proxied_addr, mpp_addr);
2934 } else {
2935 spin_lock_bh(&mppath->state_lock);
2936 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2937 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2938 mppath->exp_time = jiffies;
2939 spin_unlock_bh(&mppath->state_lock);
2940 }
2941 rcu_read_unlock();
2942 }
2943
2944 /* Frame has reached destination. Don't forward */
2945 if (!is_multicast_ether_addr(hdr->addr1) &&
2946 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2947 return RX_CONTINUE;
2948
2949 ac = ieee802_1d_to_ac[skb->priority];
2950 q = sdata->vif.hw_queue[ac];
2951 if (ieee80211_queue_stopped(&local->hw, q)) {
2952 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2953 return RX_DROP_MONITOR;
2954 }
2955 skb_set_queue_mapping(skb, ac);
2956
2957 if (!--mesh_hdr->ttl) {
2958 if (!is_multicast_ether_addr(hdr->addr1))
2959 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2960 dropped_frames_ttl);
2961 goto out;
2962 }
2963
2964 if (!ifmsh->mshcfg.dot11MeshForwarding)
2965 goto out;
2966
2967 if (sdata->crypto_tx_tailroom_needed_cnt)
2968 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2969
2970 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2971 IEEE80211_ENCRYPT_HEADROOM,
2972 tailroom, GFP_ATOMIC);
2973 if (!fwd_skb)
2974 goto out;
2975
2976 fwd_skb->dev = sdata->dev;
2977 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2978 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2979 info = IEEE80211_SKB_CB(fwd_skb);
2980 memset(info, 0, sizeof(*info));
2981 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
2982 info->control.vif = &rx->sdata->vif;
2983 info->control.jiffies = jiffies;
2984 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2985 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2986 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2987 /* update power mode indication when forwarding */
2988 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2989 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2990 /* mesh power mode flags updated in mesh_nexthop_lookup */
2991 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2992 } else {
2993 /* unable to resolve next hop */
2994 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2995 fwd_hdr->addr3, 0,
2996 WLAN_REASON_MESH_PATH_NOFORWARD,
2997 fwd_hdr->addr2);
2998 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2999 kfree_skb(fwd_skb);
3000 return RX_DROP_MONITOR;
3001 }
3002
3003 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
3004 ieee80211_add_pending_skb(local, fwd_skb);
3005 out:
3006 if (is_multicast_ether_addr(hdr->addr1))
3007 return RX_CONTINUE;
3008 return RX_DROP_MONITOR;
3009 }
3010 #endif
3011
3012 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data * rx)3013 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
3014 {
3015 struct ieee80211_sub_if_data *sdata = rx->sdata;
3016 struct ieee80211_local *local = rx->local;
3017 struct net_device *dev = sdata->dev;
3018 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
3019 __le16 fc = hdr->frame_control;
3020 bool port_control;
3021 int err;
3022
3023 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
3024 return RX_CONTINUE;
3025
3026 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
3027 return RX_DROP_MONITOR;
3028
3029 /*
3030 * Send unexpected-4addr-frame event to hostapd. For older versions,
3031 * also drop the frame to cooked monitor interfaces.
3032 */
3033 if (ieee80211_has_a4(hdr->frame_control) &&
3034 sdata->vif.type == NL80211_IFTYPE_AP) {
3035 if (rx->sta &&
3036 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
3037 cfg80211_rx_unexpected_4addr_frame(
3038 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
3039 return RX_DROP_MONITOR;
3040 }
3041
3042 err = __ieee80211_data_to_8023(rx, &port_control);
3043 if (unlikely(err))
3044 return RX_DROP_UNUSABLE;
3045
3046 if (!ieee80211_frame_allowed(rx, fc))
3047 return RX_DROP_MONITOR;
3048
3049 /* directly handle TDLS channel switch requests/responses */
3050 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
3051 cpu_to_be16(ETH_P_TDLS))) {
3052 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
3053
3054 if (pskb_may_pull(rx->skb,
3055 offsetof(struct ieee80211_tdls_data, u)) &&
3056 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
3057 tf->category == WLAN_CATEGORY_TDLS &&
3058 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
3059 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
3060 rx->skb->protocol = cpu_to_be16(ETH_P_TDLS);
3061 __ieee80211_queue_skb_to_iface(sdata, rx->link_id,
3062 rx->sta, rx->skb);
3063 return RX_QUEUED;
3064 }
3065 }
3066
3067 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
3068 unlikely(port_control) && sdata->bss) {
3069 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
3070 u.ap);
3071 dev = sdata->dev;
3072 rx->sdata = sdata;
3073 }
3074
3075 rx->skb->dev = dev;
3076
3077 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
3078 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
3079 !is_multicast_ether_addr(
3080 ((struct ethhdr *)rx->skb->data)->h_dest) &&
3081 (!local->scanning &&
3082 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
3083 mod_timer(&local->dynamic_ps_timer, jiffies +
3084 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
3085
3086 ieee80211_deliver_skb(rx);
3087
3088 return RX_QUEUED;
3089 }
3090
3091 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ctrl(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)3092 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
3093 {
3094 struct sk_buff *skb = rx->skb;
3095 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
3096 struct tid_ampdu_rx *tid_agg_rx;
3097 u16 start_seq_num;
3098 u16 tid;
3099
3100 if (likely(!ieee80211_is_ctl(bar->frame_control)))
3101 return RX_CONTINUE;
3102
3103 if (ieee80211_is_back_req(bar->frame_control)) {
3104 struct {
3105 __le16 control, start_seq_num;
3106 } __packed bar_data;
3107 struct ieee80211_event event = {
3108 .type = BAR_RX_EVENT,
3109 };
3110
3111 if (!rx->sta)
3112 return RX_DROP_MONITOR;
3113
3114 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
3115 &bar_data, sizeof(bar_data)))
3116 return RX_DROP_MONITOR;
3117
3118 tid = le16_to_cpu(bar_data.control) >> 12;
3119
3120 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
3121 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
3122 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
3123 WLAN_BACK_RECIPIENT,
3124 WLAN_REASON_QSTA_REQUIRE_SETUP);
3125
3126 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
3127 if (!tid_agg_rx)
3128 return RX_DROP_MONITOR;
3129
3130 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
3131 event.u.ba.tid = tid;
3132 event.u.ba.ssn = start_seq_num;
3133 event.u.ba.sta = &rx->sta->sta;
3134
3135 /* reset session timer */
3136 if (tid_agg_rx->timeout)
3137 mod_timer(&tid_agg_rx->session_timer,
3138 TU_TO_EXP_TIME(tid_agg_rx->timeout));
3139
3140 spin_lock(&tid_agg_rx->reorder_lock);
3141 /* release stored frames up to start of BAR */
3142 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
3143 start_seq_num, frames);
3144 spin_unlock(&tid_agg_rx->reorder_lock);
3145
3146 drv_event_callback(rx->local, rx->sdata, &event);
3147
3148 kfree_skb(skb);
3149 return RX_QUEUED;
3150 }
3151
3152 /*
3153 * After this point, we only want management frames,
3154 * so we can drop all remaining control frames to
3155 * cooked monitor interfaces.
3156 */
3157 return RX_DROP_MONITOR;
3158 }
3159
ieee80211_process_sa_query_req(struct ieee80211_sub_if_data * sdata,struct ieee80211_mgmt * mgmt,size_t len)3160 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3161 struct ieee80211_mgmt *mgmt,
3162 size_t len)
3163 {
3164 struct ieee80211_local *local = sdata->local;
3165 struct sk_buff *skb;
3166 struct ieee80211_mgmt *resp;
3167
3168 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3169 /* Not to own unicast address */
3170 return;
3171 }
3172
3173 if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) ||
3174 !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) {
3175 /* Not from the current AP or not associated yet. */
3176 return;
3177 }
3178
3179 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3180 /* Too short SA Query request frame */
3181 return;
3182 }
3183
3184 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3185 if (skb == NULL)
3186 return;
3187
3188 skb_reserve(skb, local->hw.extra_tx_headroom);
3189 resp = skb_put_zero(skb, 24);
3190 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3191 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3192 memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
3193 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3194 IEEE80211_STYPE_ACTION);
3195 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3196 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3197 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3198 memcpy(resp->u.action.u.sa_query.trans_id,
3199 mgmt->u.action.u.sa_query.trans_id,
3200 WLAN_SA_QUERY_TR_ID_LEN);
3201
3202 ieee80211_tx_skb(sdata, skb);
3203 }
3204
3205 static void
ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data * rx)3206 ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
3207 {
3208 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3209 const struct element *ie;
3210 size_t baselen;
3211
3212 if (!wiphy_ext_feature_isset(rx->local->hw.wiphy,
3213 NL80211_EXT_FEATURE_BSS_COLOR))
3214 return;
3215
3216 if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
3217 return;
3218
3219 if (rx->sdata->vif.bss_conf.csa_active)
3220 return;
3221
3222 baselen = mgmt->u.beacon.variable - rx->skb->data;
3223 if (baselen > rx->skb->len)
3224 return;
3225
3226 ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
3227 mgmt->u.beacon.variable,
3228 rx->skb->len - baselen);
3229 if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) &&
3230 ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
3231 struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf;
3232 const struct ieee80211_he_operation *he_oper;
3233 u8 color;
3234
3235 he_oper = (void *)(ie->data + 1);
3236 if (le32_get_bits(he_oper->he_oper_params,
3237 IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED))
3238 return;
3239
3240 color = le32_get_bits(he_oper->he_oper_params,
3241 IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
3242 if (color == bss_conf->he_bss_color.color)
3243 ieeee80211_obss_color_collision_notify(&rx->sdata->vif,
3244 BIT_ULL(color),
3245 GFP_ATOMIC);
3246 }
3247 }
3248
3249 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data * rx)3250 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3251 {
3252 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3253 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3254
3255 if (ieee80211_is_s1g_beacon(mgmt->frame_control))
3256 return RX_CONTINUE;
3257
3258 /*
3259 * From here on, look only at management frames.
3260 * Data and control frames are already handled,
3261 * and unknown (reserved) frames are useless.
3262 */
3263 if (rx->skb->len < 24)
3264 return RX_DROP_MONITOR;
3265
3266 if (!ieee80211_is_mgmt(mgmt->frame_control))
3267 return RX_DROP_MONITOR;
3268
3269 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3270 ieee80211_is_beacon(mgmt->frame_control) &&
3271 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3272 int sig = 0;
3273
3274 /* sw bss color collision detection */
3275 ieee80211_rx_check_bss_color_collision(rx);
3276
3277 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3278 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3279 sig = status->signal;
3280
3281 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
3282 rx->skb->data, rx->skb->len,
3283 ieee80211_rx_status_to_khz(status),
3284 sig);
3285 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3286 }
3287
3288 if (ieee80211_drop_unencrypted_mgmt(rx))
3289 return RX_DROP_UNUSABLE;
3290
3291 return RX_CONTINUE;
3292 }
3293
3294 static bool
ieee80211_process_rx_twt_action(struct ieee80211_rx_data * rx)3295 ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx)
3296 {
3297 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data;
3298 struct ieee80211_sub_if_data *sdata = rx->sdata;
3299
3300 /* TWT actions are only supported in AP for the moment */
3301 if (sdata->vif.type != NL80211_IFTYPE_AP)
3302 return false;
3303
3304 if (!rx->local->ops->add_twt_setup)
3305 return false;
3306
3307 if (!sdata->vif.bss_conf.twt_responder)
3308 return false;
3309
3310 if (!rx->sta)
3311 return false;
3312
3313 switch (mgmt->u.action.u.s1g.action_code) {
3314 case WLAN_S1G_TWT_SETUP: {
3315 struct ieee80211_twt_setup *twt;
3316
3317 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE +
3318 1 + /* action code */
3319 sizeof(struct ieee80211_twt_setup) +
3320 2 /* TWT req_type agrt */)
3321 break;
3322
3323 twt = (void *)mgmt->u.action.u.s1g.variable;
3324 if (twt->element_id != WLAN_EID_S1G_TWT)
3325 break;
3326
3327 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE +
3328 4 + /* action code + token + tlv */
3329 twt->length)
3330 break;
3331
3332 return true; /* queue the frame */
3333 }
3334 case WLAN_S1G_TWT_TEARDOWN:
3335 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2)
3336 break;
3337
3338 return true; /* queue the frame */
3339 default:
3340 break;
3341 }
3342
3343 return false;
3344 }
3345
3346 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action(struct ieee80211_rx_data * rx)3347 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3348 {
3349 struct ieee80211_local *local = rx->local;
3350 struct ieee80211_sub_if_data *sdata = rx->sdata;
3351 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3352 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3353 int len = rx->skb->len;
3354
3355 if (!ieee80211_is_action(mgmt->frame_control))
3356 return RX_CONTINUE;
3357
3358 /* drop too small frames */
3359 if (len < IEEE80211_MIN_ACTION_SIZE)
3360 return RX_DROP_UNUSABLE;
3361
3362 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3363 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3364 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3365 return RX_DROP_UNUSABLE;
3366
3367 switch (mgmt->u.action.category) {
3368 case WLAN_CATEGORY_HT:
3369 /* reject HT action frames from stations not supporting HT */
3370 if (!rx->link_sta->pub->ht_cap.ht_supported)
3371 goto invalid;
3372
3373 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3374 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3375 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3376 sdata->vif.type != NL80211_IFTYPE_AP &&
3377 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3378 break;
3379
3380 /* verify action & smps_control/chanwidth are present */
3381 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3382 goto invalid;
3383
3384 switch (mgmt->u.action.u.ht_smps.action) {
3385 case WLAN_HT_ACTION_SMPS: {
3386 struct ieee80211_supported_band *sband;
3387 enum ieee80211_smps_mode smps_mode;
3388 struct sta_opmode_info sta_opmode = {};
3389
3390 if (sdata->vif.type != NL80211_IFTYPE_AP &&
3391 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3392 goto handled;
3393
3394 /* convert to HT capability */
3395 switch (mgmt->u.action.u.ht_smps.smps_control) {
3396 case WLAN_HT_SMPS_CONTROL_DISABLED:
3397 smps_mode = IEEE80211_SMPS_OFF;
3398 break;
3399 case WLAN_HT_SMPS_CONTROL_STATIC:
3400 smps_mode = IEEE80211_SMPS_STATIC;
3401 break;
3402 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3403 smps_mode = IEEE80211_SMPS_DYNAMIC;
3404 break;
3405 default:
3406 goto invalid;
3407 }
3408
3409 /* if no change do nothing */
3410 if (rx->link_sta->pub->smps_mode == smps_mode)
3411 goto handled;
3412 rx->link_sta->pub->smps_mode = smps_mode;
3413 sta_opmode.smps_mode =
3414 ieee80211_smps_mode_to_smps_mode(smps_mode);
3415 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3416
3417 sband = rx->local->hw.wiphy->bands[status->band];
3418
3419 rate_control_rate_update(local, sband, rx->sta, 0,
3420 IEEE80211_RC_SMPS_CHANGED);
3421 cfg80211_sta_opmode_change_notify(sdata->dev,
3422 rx->sta->addr,
3423 &sta_opmode,
3424 GFP_ATOMIC);
3425 goto handled;
3426 }
3427 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3428 struct ieee80211_supported_band *sband;
3429 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3430 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3431 struct sta_opmode_info sta_opmode = {};
3432
3433 /* If it doesn't support 40 MHz it can't change ... */
3434 if (!(rx->link_sta->pub->ht_cap.cap &
3435 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3436 goto handled;
3437
3438 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3439 max_bw = IEEE80211_STA_RX_BW_20;
3440 else
3441 max_bw = ieee80211_sta_cap_rx_bw(rx->link_sta);
3442
3443 /* set cur_max_bandwidth and recalc sta bw */
3444 rx->link_sta->cur_max_bandwidth = max_bw;
3445 new_bw = ieee80211_sta_cur_vht_bw(rx->link_sta);
3446
3447 if (rx->link_sta->pub->bandwidth == new_bw)
3448 goto handled;
3449
3450 rx->link_sta->pub->bandwidth = new_bw;
3451 sband = rx->local->hw.wiphy->bands[status->band];
3452 sta_opmode.bw =
3453 ieee80211_sta_rx_bw_to_chan_width(rx->link_sta);
3454 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3455
3456 rate_control_rate_update(local, sband, rx->sta, 0,
3457 IEEE80211_RC_BW_CHANGED);
3458 cfg80211_sta_opmode_change_notify(sdata->dev,
3459 rx->sta->addr,
3460 &sta_opmode,
3461 GFP_ATOMIC);
3462 goto handled;
3463 }
3464 default:
3465 goto invalid;
3466 }
3467
3468 break;
3469 case WLAN_CATEGORY_PUBLIC:
3470 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3471 goto invalid;
3472 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3473 break;
3474 if (!rx->sta)
3475 break;
3476 if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid))
3477 break;
3478 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3479 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3480 break;
3481 if (len < offsetof(struct ieee80211_mgmt,
3482 u.action.u.ext_chan_switch.variable))
3483 goto invalid;
3484 goto queue;
3485 case WLAN_CATEGORY_VHT:
3486 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3487 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3488 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3489 sdata->vif.type != NL80211_IFTYPE_AP &&
3490 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3491 break;
3492
3493 /* verify action code is present */
3494 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3495 goto invalid;
3496
3497 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3498 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3499 /* verify opmode is present */
3500 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3501 goto invalid;
3502 goto queue;
3503 }
3504 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3505 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3506 goto invalid;
3507 goto queue;
3508 }
3509 default:
3510 break;
3511 }
3512 break;
3513 case WLAN_CATEGORY_BACK:
3514 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3515 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3516 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3517 sdata->vif.type != NL80211_IFTYPE_AP &&
3518 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3519 break;
3520
3521 /* verify action_code is present */
3522 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3523 break;
3524
3525 switch (mgmt->u.action.u.addba_req.action_code) {
3526 case WLAN_ACTION_ADDBA_REQ:
3527 if (len < (IEEE80211_MIN_ACTION_SIZE +
3528 sizeof(mgmt->u.action.u.addba_req)))
3529 goto invalid;
3530 break;
3531 case WLAN_ACTION_ADDBA_RESP:
3532 if (len < (IEEE80211_MIN_ACTION_SIZE +
3533 sizeof(mgmt->u.action.u.addba_resp)))
3534 goto invalid;
3535 break;
3536 case WLAN_ACTION_DELBA:
3537 if (len < (IEEE80211_MIN_ACTION_SIZE +
3538 sizeof(mgmt->u.action.u.delba)))
3539 goto invalid;
3540 break;
3541 default:
3542 goto invalid;
3543 }
3544
3545 goto queue;
3546 case WLAN_CATEGORY_SPECTRUM_MGMT:
3547 /* verify action_code is present */
3548 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3549 break;
3550
3551 switch (mgmt->u.action.u.measurement.action_code) {
3552 case WLAN_ACTION_SPCT_MSR_REQ:
3553 if (status->band != NL80211_BAND_5GHZ)
3554 break;
3555
3556 if (len < (IEEE80211_MIN_ACTION_SIZE +
3557 sizeof(mgmt->u.action.u.measurement)))
3558 break;
3559
3560 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3561 break;
3562
3563 ieee80211_process_measurement_req(sdata, mgmt, len);
3564 goto handled;
3565 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3566 u8 *bssid;
3567 if (len < (IEEE80211_MIN_ACTION_SIZE +
3568 sizeof(mgmt->u.action.u.chan_switch)))
3569 break;
3570
3571 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3572 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3573 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3574 break;
3575
3576 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3577 bssid = sdata->deflink.u.mgd.bssid;
3578 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3579 bssid = sdata->u.ibss.bssid;
3580 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3581 bssid = mgmt->sa;
3582 else
3583 break;
3584
3585 if (!ether_addr_equal(mgmt->bssid, bssid))
3586 break;
3587
3588 goto queue;
3589 }
3590 }
3591 break;
3592 case WLAN_CATEGORY_SELF_PROTECTED:
3593 if (len < (IEEE80211_MIN_ACTION_SIZE +
3594 sizeof(mgmt->u.action.u.self_prot.action_code)))
3595 break;
3596
3597 switch (mgmt->u.action.u.self_prot.action_code) {
3598 case WLAN_SP_MESH_PEERING_OPEN:
3599 case WLAN_SP_MESH_PEERING_CLOSE:
3600 case WLAN_SP_MESH_PEERING_CONFIRM:
3601 if (!ieee80211_vif_is_mesh(&sdata->vif))
3602 goto invalid;
3603 if (sdata->u.mesh.user_mpm)
3604 /* userspace handles this frame */
3605 break;
3606 goto queue;
3607 case WLAN_SP_MGK_INFORM:
3608 case WLAN_SP_MGK_ACK:
3609 if (!ieee80211_vif_is_mesh(&sdata->vif))
3610 goto invalid;
3611 break;
3612 }
3613 break;
3614 case WLAN_CATEGORY_MESH_ACTION:
3615 if (len < (IEEE80211_MIN_ACTION_SIZE +
3616 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3617 break;
3618
3619 if (!ieee80211_vif_is_mesh(&sdata->vif))
3620 break;
3621 if (mesh_action_is_path_sel(mgmt) &&
3622 !mesh_path_sel_is_hwmp(sdata))
3623 break;
3624 goto queue;
3625 case WLAN_CATEGORY_S1G:
3626 switch (mgmt->u.action.u.s1g.action_code) {
3627 case WLAN_S1G_TWT_SETUP:
3628 case WLAN_S1G_TWT_TEARDOWN:
3629 if (ieee80211_process_rx_twt_action(rx))
3630 goto queue;
3631 break;
3632 default:
3633 break;
3634 }
3635 break;
3636 }
3637
3638 return RX_CONTINUE;
3639
3640 invalid:
3641 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3642 /* will return in the next handlers */
3643 return RX_CONTINUE;
3644
3645 handled:
3646 if (rx->sta)
3647 rx->link_sta->rx_stats.packets++;
3648 dev_kfree_skb(rx->skb);
3649 return RX_QUEUED;
3650
3651 queue:
3652 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
3653 return RX_QUEUED;
3654 }
3655
3656 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data * rx)3657 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3658 {
3659 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3660 struct cfg80211_rx_info info = {
3661 .freq = ieee80211_rx_status_to_khz(status),
3662 .buf = rx->skb->data,
3663 .len = rx->skb->len,
3664 .link_id = rx->link_id,
3665 .have_link_id = rx->link_id >= 0,
3666 };
3667
3668 /* skip known-bad action frames and return them in the next handler */
3669 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3670 return RX_CONTINUE;
3671
3672 /*
3673 * Getting here means the kernel doesn't know how to handle
3674 * it, but maybe userspace does ... include returned frames
3675 * so userspace can register for those to know whether ones
3676 * it transmitted were processed or returned.
3677 */
3678
3679 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3680 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3681 info.sig_dbm = status->signal;
3682
3683 if (ieee80211_is_timing_measurement(rx->skb) ||
3684 ieee80211_is_ftm(rx->skb)) {
3685 info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp);
3686 info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp);
3687 }
3688
3689 if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) {
3690 if (rx->sta)
3691 rx->link_sta->rx_stats.packets++;
3692 dev_kfree_skb(rx->skb);
3693 return RX_QUEUED;
3694 }
3695
3696 return RX_CONTINUE;
3697 }
3698
3699 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data * rx)3700 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
3701 {
3702 struct ieee80211_sub_if_data *sdata = rx->sdata;
3703 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3704 int len = rx->skb->len;
3705
3706 if (!ieee80211_is_action(mgmt->frame_control))
3707 return RX_CONTINUE;
3708
3709 switch (mgmt->u.action.category) {
3710 case WLAN_CATEGORY_SA_QUERY:
3711 if (len < (IEEE80211_MIN_ACTION_SIZE +
3712 sizeof(mgmt->u.action.u.sa_query)))
3713 break;
3714
3715 switch (mgmt->u.action.u.sa_query.action) {
3716 case WLAN_ACTION_SA_QUERY_REQUEST:
3717 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3718 break;
3719 ieee80211_process_sa_query_req(sdata, mgmt, len);
3720 goto handled;
3721 }
3722 break;
3723 }
3724
3725 return RX_CONTINUE;
3726
3727 handled:
3728 if (rx->sta)
3729 rx->link_sta->rx_stats.packets++;
3730 dev_kfree_skb(rx->skb);
3731 return RX_QUEUED;
3732 }
3733
3734 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action_return(struct ieee80211_rx_data * rx)3735 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3736 {
3737 struct ieee80211_local *local = rx->local;
3738 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3739 struct sk_buff *nskb;
3740 struct ieee80211_sub_if_data *sdata = rx->sdata;
3741 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3742
3743 if (!ieee80211_is_action(mgmt->frame_control))
3744 return RX_CONTINUE;
3745
3746 /*
3747 * For AP mode, hostapd is responsible for handling any action
3748 * frames that we didn't handle, including returning unknown
3749 * ones. For all other modes we will return them to the sender,
3750 * setting the 0x80 bit in the action category, as required by
3751 * 802.11-2012 9.24.4.
3752 * Newer versions of hostapd shall also use the management frame
3753 * registration mechanisms, but older ones still use cooked
3754 * monitor interfaces so push all frames there.
3755 */
3756 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3757 (sdata->vif.type == NL80211_IFTYPE_AP ||
3758 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3759 return RX_DROP_MONITOR;
3760
3761 if (is_multicast_ether_addr(mgmt->da))
3762 return RX_DROP_MONITOR;
3763
3764 /* do not return rejected action frames */
3765 if (mgmt->u.action.category & 0x80)
3766 return RX_DROP_UNUSABLE;
3767
3768 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3769 GFP_ATOMIC);
3770 if (nskb) {
3771 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3772
3773 nmgmt->u.action.category |= 0x80;
3774 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3775 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3776
3777 memset(nskb->cb, 0, sizeof(nskb->cb));
3778
3779 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3780 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3781
3782 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3783 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3784 IEEE80211_TX_CTL_NO_CCK_RATE;
3785 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3786 info->hw_queue =
3787 local->hw.offchannel_tx_hw_queue;
3788 }
3789
3790 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1,
3791 status->band);
3792 }
3793 dev_kfree_skb(rx->skb);
3794 return RX_QUEUED;
3795 }
3796
3797 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ext(struct ieee80211_rx_data * rx)3798 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
3799 {
3800 struct ieee80211_sub_if_data *sdata = rx->sdata;
3801 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
3802
3803 if (!ieee80211_is_ext(hdr->frame_control))
3804 return RX_CONTINUE;
3805
3806 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3807 return RX_DROP_MONITOR;
3808
3809 /* for now only beacons are ext, so queue them */
3810 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
3811
3812 return RX_QUEUED;
3813 }
3814
3815 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data * rx)3816 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3817 {
3818 struct ieee80211_sub_if_data *sdata = rx->sdata;
3819 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3820 __le16 stype;
3821
3822 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3823
3824 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3825 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3826 sdata->vif.type != NL80211_IFTYPE_OCB &&
3827 sdata->vif.type != NL80211_IFTYPE_STATION)
3828 return RX_DROP_MONITOR;
3829
3830 switch (stype) {
3831 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3832 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3833 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3834 /* process for all: mesh, mlme, ibss */
3835 break;
3836 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3837 if (is_multicast_ether_addr(mgmt->da) &&
3838 !is_broadcast_ether_addr(mgmt->da))
3839 return RX_DROP_MONITOR;
3840
3841 /* process only for station/IBSS */
3842 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3843 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3844 return RX_DROP_MONITOR;
3845 break;
3846 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3847 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3848 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3849 if (is_multicast_ether_addr(mgmt->da) &&
3850 !is_broadcast_ether_addr(mgmt->da))
3851 return RX_DROP_MONITOR;
3852
3853 /* process only for station */
3854 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3855 return RX_DROP_MONITOR;
3856 break;
3857 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3858 /* process only for ibss and mesh */
3859 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3860 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3861 return RX_DROP_MONITOR;
3862 break;
3863 default:
3864 return RX_DROP_MONITOR;
3865 }
3866
3867 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
3868
3869 return RX_QUEUED;
3870 }
3871
ieee80211_rx_cooked_monitor(struct ieee80211_rx_data * rx,struct ieee80211_rate * rate)3872 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3873 struct ieee80211_rate *rate)
3874 {
3875 struct ieee80211_sub_if_data *sdata;
3876 struct ieee80211_local *local = rx->local;
3877 struct sk_buff *skb = rx->skb, *skb2;
3878 struct net_device *prev_dev = NULL;
3879 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3880 int needed_headroom;
3881
3882 /*
3883 * If cooked monitor has been processed already, then
3884 * don't do it again. If not, set the flag.
3885 */
3886 if (rx->flags & IEEE80211_RX_CMNTR)
3887 goto out_free_skb;
3888 rx->flags |= IEEE80211_RX_CMNTR;
3889
3890 /* If there are no cooked monitor interfaces, just free the SKB */
3891 if (!local->cooked_mntrs)
3892 goto out_free_skb;
3893
3894 /* vendor data is long removed here */
3895 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3896 /* room for the radiotap header based on driver features */
3897 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3898
3899 if (skb_headroom(skb) < needed_headroom &&
3900 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3901 goto out_free_skb;
3902
3903 /* prepend radiotap information */
3904 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3905 false);
3906
3907 skb_reset_mac_header(skb);
3908 skb->ip_summed = CHECKSUM_UNNECESSARY;
3909 skb->pkt_type = PACKET_OTHERHOST;
3910 skb->protocol = htons(ETH_P_802_2);
3911
3912 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3913 if (!ieee80211_sdata_running(sdata))
3914 continue;
3915
3916 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3917 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3918 continue;
3919
3920 if (prev_dev) {
3921 skb2 = skb_clone(skb, GFP_ATOMIC);
3922 if (skb2) {
3923 skb2->dev = prev_dev;
3924 netif_receive_skb(skb2);
3925 }
3926 }
3927
3928 prev_dev = sdata->dev;
3929 dev_sw_netstats_rx_add(sdata->dev, skb->len);
3930 }
3931
3932 if (prev_dev) {
3933 skb->dev = prev_dev;
3934 netif_receive_skb(skb);
3935 return;
3936 }
3937
3938 out_free_skb:
3939 dev_kfree_skb(skb);
3940 }
3941
ieee80211_rx_handlers_result(struct ieee80211_rx_data * rx,ieee80211_rx_result res)3942 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3943 ieee80211_rx_result res)
3944 {
3945 switch (res) {
3946 case RX_DROP_MONITOR:
3947 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3948 if (rx->sta)
3949 rx->link_sta->rx_stats.dropped++;
3950 fallthrough;
3951 case RX_CONTINUE: {
3952 struct ieee80211_rate *rate = NULL;
3953 struct ieee80211_supported_band *sband;
3954 struct ieee80211_rx_status *status;
3955
3956 status = IEEE80211_SKB_RXCB((rx->skb));
3957
3958 sband = rx->local->hw.wiphy->bands[status->band];
3959 if (status->encoding == RX_ENC_LEGACY)
3960 rate = &sband->bitrates[status->rate_idx];
3961
3962 ieee80211_rx_cooked_monitor(rx, rate);
3963 break;
3964 }
3965 case RX_DROP_UNUSABLE:
3966 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3967 if (rx->sta)
3968 rx->link_sta->rx_stats.dropped++;
3969 dev_kfree_skb(rx->skb);
3970 break;
3971 case RX_QUEUED:
3972 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3973 break;
3974 }
3975 }
3976
ieee80211_rx_handlers(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)3977 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3978 struct sk_buff_head *frames)
3979 {
3980 ieee80211_rx_result res = RX_DROP_MONITOR;
3981 struct sk_buff *skb;
3982
3983 #define CALL_RXH(rxh) \
3984 do { \
3985 res = rxh(rx); \
3986 if (res != RX_CONTINUE) \
3987 goto rxh_next; \
3988 } while (0)
3989
3990 /* Lock here to avoid hitting all of the data used in the RX
3991 * path (e.g. key data, station data, ...) concurrently when
3992 * a frame is released from the reorder buffer due to timeout
3993 * from the timer, potentially concurrently with RX from the
3994 * driver.
3995 */
3996 spin_lock_bh(&rx->local->rx_path_lock);
3997
3998 while ((skb = __skb_dequeue(frames))) {
3999 /*
4000 * all the other fields are valid across frames
4001 * that belong to an aMPDU since they are on the
4002 * same TID from the same station
4003 */
4004 rx->skb = skb;
4005
4006 if (WARN_ON_ONCE(!rx->link))
4007 goto rxh_next;
4008
4009 CALL_RXH(ieee80211_rx_h_check_more_data);
4010 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
4011 CALL_RXH(ieee80211_rx_h_sta_process);
4012 CALL_RXH(ieee80211_rx_h_decrypt);
4013 CALL_RXH(ieee80211_rx_h_defragment);
4014 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
4015 /* must be after MMIC verify so header is counted in MPDU mic */
4016 #ifdef CONFIG_MAC80211_MESH
4017 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
4018 CALL_RXH(ieee80211_rx_h_mesh_fwding);
4019 #endif
4020 CALL_RXH(ieee80211_rx_h_amsdu);
4021 CALL_RXH(ieee80211_rx_h_data);
4022
4023 /* special treatment -- needs the queue */
4024 res = ieee80211_rx_h_ctrl(rx, frames);
4025 if (res != RX_CONTINUE)
4026 goto rxh_next;
4027
4028 CALL_RXH(ieee80211_rx_h_mgmt_check);
4029 CALL_RXH(ieee80211_rx_h_action);
4030 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
4031 CALL_RXH(ieee80211_rx_h_action_post_userspace);
4032 CALL_RXH(ieee80211_rx_h_action_return);
4033 CALL_RXH(ieee80211_rx_h_ext);
4034 CALL_RXH(ieee80211_rx_h_mgmt);
4035
4036 rxh_next:
4037 ieee80211_rx_handlers_result(rx, res);
4038
4039 #undef CALL_RXH
4040 }
4041
4042 spin_unlock_bh(&rx->local->rx_path_lock);
4043 }
4044
ieee80211_invoke_rx_handlers(struct ieee80211_rx_data * rx)4045 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
4046 {
4047 struct sk_buff_head reorder_release;
4048 ieee80211_rx_result res = RX_DROP_MONITOR;
4049
4050 __skb_queue_head_init(&reorder_release);
4051
4052 #define CALL_RXH(rxh) \
4053 do { \
4054 res = rxh(rx); \
4055 if (res != RX_CONTINUE) \
4056 goto rxh_next; \
4057 } while (0)
4058
4059 CALL_RXH(ieee80211_rx_h_check_dup);
4060 CALL_RXH(ieee80211_rx_h_check);
4061
4062 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
4063
4064 ieee80211_rx_handlers(rx, &reorder_release);
4065 return;
4066
4067 rxh_next:
4068 ieee80211_rx_handlers_result(rx, res);
4069
4070 #undef CALL_RXH
4071 }
4072
4073 static bool
ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta * sta,u8 link_id)4074 ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
4075 {
4076 if (!sta->mlo)
4077 return false;
4078
4079 return !!(sta->valid_links & BIT(link_id));
4080 }
4081
ieee80211_rx_data_set_link(struct ieee80211_rx_data * rx,u8 link_id)4082 static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
4083 u8 link_id)
4084 {
4085 rx->link_id = link_id;
4086 rx->link = rcu_dereference(rx->sdata->link[link_id]);
4087
4088 if (!rx->sta)
4089 return rx->link;
4090
4091 if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
4092 return false;
4093
4094 rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
4095
4096 return rx->link && rx->link_sta;
4097 }
4098
ieee80211_rx_data_set_sta(struct ieee80211_rx_data * rx,struct ieee80211_sta * pubsta,int link_id)4099 static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
4100 struct ieee80211_sta *pubsta,
4101 int link_id)
4102 {
4103 struct sta_info *sta;
4104
4105 sta = container_of(pubsta, struct sta_info, sta);
4106
4107 rx->link_id = link_id;
4108 rx->sta = sta;
4109
4110 if (sta) {
4111 rx->local = sta->sdata->local;
4112 if (!rx->sdata)
4113 rx->sdata = sta->sdata;
4114 rx->link_sta = &sta->deflink;
4115 }
4116
4117 if (link_id < 0)
4118 rx->link = &rx->sdata->deflink;
4119 else if (!ieee80211_rx_data_set_link(rx, link_id))
4120 return false;
4121
4122 return true;
4123 }
4124
4125 /*
4126 * This function makes calls into the RX path, therefore
4127 * it has to be invoked under RCU read lock.
4128 */
ieee80211_release_reorder_timeout(struct sta_info * sta,int tid)4129 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
4130 {
4131 struct sk_buff_head frames;
4132 struct ieee80211_rx_data rx = {
4133 /* This is OK -- must be QoS data frame */
4134 .security_idx = tid,
4135 .seqno_idx = tid,
4136 };
4137 struct tid_ampdu_rx *tid_agg_rx;
4138 int link_id = -1;
4139
4140 /* FIXME: statistics won't be right with this */
4141 if (sta->sta.valid_links)
4142 link_id = ffs(sta->sta.valid_links) - 1;
4143
4144 if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
4145 return;
4146
4147 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
4148 if (!tid_agg_rx)
4149 return;
4150
4151 __skb_queue_head_init(&frames);
4152
4153 spin_lock(&tid_agg_rx->reorder_lock);
4154 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
4155 spin_unlock(&tid_agg_rx->reorder_lock);
4156
4157 if (!skb_queue_empty(&frames)) {
4158 struct ieee80211_event event = {
4159 .type = BA_FRAME_TIMEOUT,
4160 .u.ba.tid = tid,
4161 .u.ba.sta = &sta->sta,
4162 };
4163 drv_event_callback(rx.local, rx.sdata, &event);
4164 }
4165
4166 ieee80211_rx_handlers(&rx, &frames);
4167 }
4168
ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta * pubsta,u8 tid,u16 ssn,u64 filtered,u16 received_mpdus)4169 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
4170 u16 ssn, u64 filtered,
4171 u16 received_mpdus)
4172 {
4173 struct sta_info *sta;
4174 struct tid_ampdu_rx *tid_agg_rx;
4175 struct sk_buff_head frames;
4176 struct ieee80211_rx_data rx = {
4177 /* This is OK -- must be QoS data frame */
4178 .security_idx = tid,
4179 .seqno_idx = tid,
4180 };
4181 int i, diff;
4182
4183 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
4184 return;
4185
4186 __skb_queue_head_init(&frames);
4187
4188 sta = container_of(pubsta, struct sta_info, sta);
4189
4190 if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
4191 return;
4192
4193 rcu_read_lock();
4194 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
4195 if (!tid_agg_rx)
4196 goto out;
4197
4198 spin_lock_bh(&tid_agg_rx->reorder_lock);
4199
4200 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
4201 int release;
4202
4203 /* release all frames in the reorder buffer */
4204 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
4205 IEEE80211_SN_MODULO;
4206 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
4207 release, &frames);
4208 /* update ssn to match received ssn */
4209 tid_agg_rx->head_seq_num = ssn;
4210 } else {
4211 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
4212 &frames);
4213 }
4214
4215 /* handle the case that received ssn is behind the mac ssn.
4216 * it can be tid_agg_rx->buf_size behind and still be valid */
4217 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
4218 if (diff >= tid_agg_rx->buf_size) {
4219 tid_agg_rx->reorder_buf_filtered = 0;
4220 goto release;
4221 }
4222 filtered = filtered >> diff;
4223 ssn += diff;
4224
4225 /* update bitmap */
4226 for (i = 0; i < tid_agg_rx->buf_size; i++) {
4227 int index = (ssn + i) % tid_agg_rx->buf_size;
4228
4229 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
4230 if (filtered & BIT_ULL(i))
4231 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
4232 }
4233
4234 /* now process also frames that the filter marking released */
4235 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
4236
4237 release:
4238 spin_unlock_bh(&tid_agg_rx->reorder_lock);
4239
4240 ieee80211_rx_handlers(&rx, &frames);
4241
4242 out:
4243 rcu_read_unlock();
4244 }
4245 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
4246
4247 /* main receive path */
4248
ieee80211_bssid_match(const u8 * raddr,const u8 * addr)4249 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
4250 {
4251 return ether_addr_equal(raddr, addr) ||
4252 is_broadcast_ether_addr(raddr);
4253 }
4254
ieee80211_accept_frame(struct ieee80211_rx_data * rx)4255 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
4256 {
4257 struct ieee80211_sub_if_data *sdata = rx->sdata;
4258 struct sk_buff *skb = rx->skb;
4259 struct ieee80211_hdr *hdr = (void *)skb->data;
4260 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4261 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
4262 bool multicast = is_multicast_ether_addr(hdr->addr1) ||
4263 ieee80211_is_s1g_beacon(hdr->frame_control);
4264
4265 switch (sdata->vif.type) {
4266 case NL80211_IFTYPE_STATION:
4267 if (!bssid && !sdata->u.mgd.use_4addr)
4268 return false;
4269 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
4270 return false;
4271 if (multicast)
4272 return true;
4273 return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id);
4274 case NL80211_IFTYPE_ADHOC:
4275 if (!bssid)
4276 return false;
4277 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
4278 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
4279 !is_valid_ether_addr(hdr->addr2))
4280 return false;
4281 if (ieee80211_is_beacon(hdr->frame_control))
4282 return true;
4283 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
4284 return false;
4285 if (!multicast &&
4286 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4287 return false;
4288 if (!rx->sta) {
4289 int rate_idx;
4290 if (status->encoding != RX_ENC_LEGACY)
4291 rate_idx = 0; /* TODO: HT/VHT rates */
4292 else
4293 rate_idx = status->rate_idx;
4294 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
4295 BIT(rate_idx));
4296 }
4297 return true;
4298 case NL80211_IFTYPE_OCB:
4299 if (!bssid)
4300 return false;
4301 if (!ieee80211_is_data_present(hdr->frame_control))
4302 return false;
4303 if (!is_broadcast_ether_addr(bssid))
4304 return false;
4305 if (!multicast &&
4306 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
4307 return false;
4308 if (!rx->sta) {
4309 int rate_idx;
4310 if (status->encoding != RX_ENC_LEGACY)
4311 rate_idx = 0; /* TODO: HT rates */
4312 else
4313 rate_idx = status->rate_idx;
4314 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
4315 BIT(rate_idx));
4316 }
4317 return true;
4318 case NL80211_IFTYPE_MESH_POINT:
4319 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
4320 return false;
4321 if (multicast)
4322 return true;
4323 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4324 case NL80211_IFTYPE_AP_VLAN:
4325 case NL80211_IFTYPE_AP:
4326 if (!bssid)
4327 return ieee80211_is_our_addr(sdata, hdr->addr1,
4328 &rx->link_id);
4329
4330 if (!is_broadcast_ether_addr(bssid) &&
4331 !ieee80211_is_our_addr(sdata, bssid, NULL)) {
4332 /*
4333 * Accept public action frames even when the
4334 * BSSID doesn't match, this is used for P2P
4335 * and location updates. Note that mac80211
4336 * itself never looks at these frames.
4337 */
4338 if (!multicast &&
4339 !ieee80211_is_our_addr(sdata, hdr->addr1,
4340 &rx->link_id))
4341 return false;
4342 if (ieee80211_is_public_action(hdr, skb->len))
4343 return true;
4344 return ieee80211_is_beacon(hdr->frame_control);
4345 }
4346
4347 if (!ieee80211_has_tods(hdr->frame_control)) {
4348 /* ignore data frames to TDLS-peers */
4349 if (ieee80211_is_data(hdr->frame_control))
4350 return false;
4351 /* ignore action frames to TDLS-peers */
4352 if (ieee80211_is_action(hdr->frame_control) &&
4353 !is_broadcast_ether_addr(bssid) &&
4354 !ether_addr_equal(bssid, hdr->addr1))
4355 return false;
4356 }
4357
4358 /*
4359 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
4360 * the BSSID - we've checked that already but may have accepted
4361 * the wildcard (ff:ff:ff:ff:ff:ff).
4362 *
4363 * It also says:
4364 * The BSSID of the Data frame is determined as follows:
4365 * a) If the STA is contained within an AP or is associated
4366 * with an AP, the BSSID is the address currently in use
4367 * by the STA contained in the AP.
4368 *
4369 * So we should not accept data frames with an address that's
4370 * multicast.
4371 *
4372 * Accepting it also opens a security problem because stations
4373 * could encrypt it with the GTK and inject traffic that way.
4374 */
4375 if (ieee80211_is_data(hdr->frame_control) && multicast)
4376 return false;
4377
4378 return true;
4379 case NL80211_IFTYPE_P2P_DEVICE:
4380 return ieee80211_is_public_action(hdr, skb->len) ||
4381 ieee80211_is_probe_req(hdr->frame_control) ||
4382 ieee80211_is_probe_resp(hdr->frame_control) ||
4383 ieee80211_is_beacon(hdr->frame_control);
4384 case NL80211_IFTYPE_NAN:
4385 /* Currently no frames on NAN interface are allowed */
4386 return false;
4387 default:
4388 break;
4389 }
4390
4391 WARN_ON_ONCE(1);
4392 return false;
4393 }
4394
ieee80211_check_fast_rx(struct sta_info * sta)4395 void ieee80211_check_fast_rx(struct sta_info *sta)
4396 {
4397 struct ieee80211_sub_if_data *sdata = sta->sdata;
4398 struct ieee80211_local *local = sdata->local;
4399 struct ieee80211_key *key;
4400 struct ieee80211_fast_rx fastrx = {
4401 .dev = sdata->dev,
4402 .vif_type = sdata->vif.type,
4403 .control_port_protocol = sdata->control_port_protocol,
4404 }, *old, *new = NULL;
4405 u32 offload_flags;
4406 bool set_offload = false;
4407 bool assign = false;
4408 bool offload;
4409
4410 /* use sparse to check that we don't return without updating */
4411 __acquire(check_fast_rx);
4412
4413 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4414 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4415 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4416 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4417
4418 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4419
4420 /* fast-rx doesn't do reordering */
4421 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4422 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4423 goto clear;
4424
4425 switch (sdata->vif.type) {
4426 case NL80211_IFTYPE_STATION:
4427 if (sta->sta.tdls) {
4428 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4429 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4430 fastrx.expected_ds_bits = 0;
4431 } else {
4432 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4433 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4434 fastrx.expected_ds_bits =
4435 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4436 }
4437
4438 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4439 fastrx.expected_ds_bits |=
4440 cpu_to_le16(IEEE80211_FCTL_TODS);
4441 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4442 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4443 }
4444
4445 if (!sdata->u.mgd.powersave)
4446 break;
4447
4448 /* software powersave is a huge mess, avoid all of it */
4449 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4450 goto clear;
4451 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4452 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4453 goto clear;
4454 break;
4455 case NL80211_IFTYPE_AP_VLAN:
4456 case NL80211_IFTYPE_AP:
4457 /* parallel-rx requires this, at least with calls to
4458 * ieee80211_sta_ps_transition()
4459 */
4460 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4461 goto clear;
4462 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4463 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4464 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4465
4466 fastrx.internal_forward =
4467 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4468 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4469 !sdata->u.vlan.sta);
4470
4471 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4472 sdata->u.vlan.sta) {
4473 fastrx.expected_ds_bits |=
4474 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4475 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4476 fastrx.internal_forward = 0;
4477 }
4478
4479 break;
4480 default:
4481 goto clear;
4482 }
4483
4484 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4485 goto clear;
4486
4487 rcu_read_lock();
4488 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4489 if (!key)
4490 key = rcu_dereference(sdata->default_unicast_key);
4491 if (key) {
4492 switch (key->conf.cipher) {
4493 case WLAN_CIPHER_SUITE_TKIP:
4494 /* we don't want to deal with MMIC in fast-rx */
4495 goto clear_rcu;
4496 case WLAN_CIPHER_SUITE_CCMP:
4497 case WLAN_CIPHER_SUITE_CCMP_256:
4498 case WLAN_CIPHER_SUITE_GCMP:
4499 case WLAN_CIPHER_SUITE_GCMP_256:
4500 break;
4501 default:
4502 /* We also don't want to deal with
4503 * WEP or cipher scheme.
4504 */
4505 goto clear_rcu;
4506 }
4507
4508 fastrx.key = true;
4509 fastrx.icv_len = key->conf.icv_len;
4510 }
4511
4512 assign = true;
4513 clear_rcu:
4514 rcu_read_unlock();
4515 clear:
4516 __release(check_fast_rx);
4517
4518 if (assign)
4519 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4520
4521 offload_flags = get_bss_sdata(sdata)->vif.offload_flags;
4522 offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED;
4523
4524 if (assign && offload)
4525 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4526 else
4527 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4528
4529 if (set_offload)
4530 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign);
4531
4532 spin_lock_bh(&sta->lock);
4533 old = rcu_dereference_protected(sta->fast_rx, true);
4534 rcu_assign_pointer(sta->fast_rx, new);
4535 spin_unlock_bh(&sta->lock);
4536
4537 if (old)
4538 kfree_rcu(old, rcu_head);
4539 }
4540
ieee80211_clear_fast_rx(struct sta_info * sta)4541 void ieee80211_clear_fast_rx(struct sta_info *sta)
4542 {
4543 struct ieee80211_fast_rx *old;
4544
4545 spin_lock_bh(&sta->lock);
4546 old = rcu_dereference_protected(sta->fast_rx, true);
4547 RCU_INIT_POINTER(sta->fast_rx, NULL);
4548 spin_unlock_bh(&sta->lock);
4549
4550 if (old)
4551 kfree_rcu(old, rcu_head);
4552 }
4553
__ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data * sdata)4554 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4555 {
4556 struct ieee80211_local *local = sdata->local;
4557 struct sta_info *sta;
4558
4559 lockdep_assert_held(&local->sta_mtx);
4560
4561 list_for_each_entry(sta, &local->sta_list, list) {
4562 if (sdata != sta->sdata &&
4563 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4564 continue;
4565 ieee80211_check_fast_rx(sta);
4566 }
4567 }
4568
ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data * sdata)4569 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4570 {
4571 struct ieee80211_local *local = sdata->local;
4572
4573 mutex_lock(&local->sta_mtx);
4574 __ieee80211_check_fast_rx_iface(sdata);
4575 mutex_unlock(&local->sta_mtx);
4576 }
4577
ieee80211_rx_8023(struct ieee80211_rx_data * rx,struct ieee80211_fast_rx * fast_rx,int orig_len)4578 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
4579 struct ieee80211_fast_rx *fast_rx,
4580 int orig_len)
4581 {
4582 struct ieee80211_sta_rx_stats *stats;
4583 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4584 struct sta_info *sta = rx->sta;
4585 struct link_sta_info *link_sta;
4586 struct sk_buff *skb = rx->skb;
4587 void *sa = skb->data + ETH_ALEN;
4588 void *da = skb->data;
4589
4590 if (rx->link_id >= 0) {
4591 link_sta = rcu_dereference(sta->link[rx->link_id]);
4592 if (WARN_ON_ONCE(!link_sta)) {
4593 dev_kfree_skb(rx->skb);
4594 return;
4595 }
4596 } else {
4597 link_sta = &sta->deflink;
4598 }
4599
4600 stats = &link_sta->rx_stats;
4601 if (fast_rx->uses_rss)
4602 stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
4603
4604 /* statistics part of ieee80211_rx_h_sta_process() */
4605 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4606 stats->last_signal = status->signal;
4607 if (!fast_rx->uses_rss)
4608 ewma_signal_add(&link_sta->rx_stats_avg.signal,
4609 -status->signal);
4610 }
4611
4612 if (status->chains) {
4613 int i;
4614
4615 stats->chains = status->chains;
4616 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4617 int signal = status->chain_signal[i];
4618
4619 if (!(status->chains & BIT(i)))
4620 continue;
4621
4622 stats->chain_signal_last[i] = signal;
4623 if (!fast_rx->uses_rss)
4624 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i],
4625 -signal);
4626 }
4627 }
4628 /* end of statistics */
4629
4630 stats->last_rx = jiffies;
4631 stats->last_rate = sta_stats_encode_rate(status);
4632
4633 stats->fragments++;
4634 stats->packets++;
4635
4636 skb->dev = fast_rx->dev;
4637
4638 dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
4639
4640 /* The seqno index has the same property as needed
4641 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4642 * for non-QoS-data frames. Here we know it's a data
4643 * frame, so count MSDUs.
4644 */
4645 u64_stats_update_begin(&stats->syncp);
4646 stats->msdu[rx->seqno_idx]++;
4647 stats->bytes += orig_len;
4648 u64_stats_update_end(&stats->syncp);
4649
4650 if (fast_rx->internal_forward) {
4651 struct sk_buff *xmit_skb = NULL;
4652 if (is_multicast_ether_addr(da)) {
4653 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4654 } else if (!ether_addr_equal(da, sa) &&
4655 sta_info_get(rx->sdata, da)) {
4656 xmit_skb = skb;
4657 skb = NULL;
4658 }
4659
4660 if (xmit_skb) {
4661 /*
4662 * Send to wireless media and increase priority by 256
4663 * to keep the received priority instead of
4664 * reclassifying the frame (see cfg80211_classify8021d).
4665 */
4666 xmit_skb->priority += 256;
4667 xmit_skb->protocol = htons(ETH_P_802_3);
4668 skb_reset_network_header(xmit_skb);
4669 skb_reset_mac_header(xmit_skb);
4670 dev_queue_xmit(xmit_skb);
4671 }
4672
4673 if (!skb)
4674 return;
4675 }
4676
4677 /* deliver to local stack */
4678 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4679 ieee80211_deliver_skb_to_local_stack(skb, rx);
4680 }
4681
ieee80211_invoke_fast_rx(struct ieee80211_rx_data * rx,struct ieee80211_fast_rx * fast_rx)4682 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4683 struct ieee80211_fast_rx *fast_rx)
4684 {
4685 struct sk_buff *skb = rx->skb;
4686 struct ieee80211_hdr *hdr = (void *)skb->data;
4687 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4688 int orig_len = skb->len;
4689 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4690 int snap_offs = hdrlen;
4691 struct {
4692 u8 snap[sizeof(rfc1042_header)];
4693 __be16 proto;
4694 } *payload __aligned(2);
4695 struct {
4696 u8 da[ETH_ALEN];
4697 u8 sa[ETH_ALEN];
4698 } addrs __aligned(2);
4699 struct ieee80211_sta_rx_stats *stats;
4700
4701 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4702 * to a common data structure; drivers can implement that per queue
4703 * but we don't have that information in mac80211
4704 */
4705 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4706 return false;
4707
4708 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4709
4710 /* If using encryption, we also need to have:
4711 * - PN_VALIDATED: similar, but the implementation is tricky
4712 * - DECRYPTED: necessary for PN_VALIDATED
4713 */
4714 if (fast_rx->key &&
4715 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4716 return false;
4717
4718 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4719 return false;
4720
4721 if (unlikely(ieee80211_is_frag(hdr)))
4722 return false;
4723
4724 /* Since our interface address cannot be multicast, this
4725 * implicitly also rejects multicast frames without the
4726 * explicit check.
4727 *
4728 * We shouldn't get any *data* frames not addressed to us
4729 * (AP mode will accept multicast *management* frames), but
4730 * punting here will make it go through the full checks in
4731 * ieee80211_accept_frame().
4732 */
4733 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4734 return false;
4735
4736 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4737 IEEE80211_FCTL_TODS)) !=
4738 fast_rx->expected_ds_bits)
4739 return false;
4740
4741 /* assign the key to drop unencrypted frames (later)
4742 * and strip the IV/MIC if necessary
4743 */
4744 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4745 /* GCMP header length is the same */
4746 snap_offs += IEEE80211_CCMP_HDR_LEN;
4747 }
4748
4749 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4750 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4751 return false;
4752
4753 payload = (void *)(skb->data + snap_offs);
4754
4755 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4756 return false;
4757
4758 /* Don't handle these here since they require special code.
4759 * Accept AARP and IPX even though they should come with a
4760 * bridge-tunnel header - but if we get them this way then
4761 * there's little point in discarding them.
4762 */
4763 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4764 payload->proto == fast_rx->control_port_protocol))
4765 return false;
4766 }
4767
4768 /* after this point, don't punt to the slowpath! */
4769
4770 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4771 pskb_trim(skb, skb->len - fast_rx->icv_len))
4772 goto drop;
4773
4774 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4775 goto drop;
4776
4777 if (status->rx_flags & IEEE80211_RX_AMSDU) {
4778 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4779 RX_QUEUED)
4780 goto drop;
4781
4782 return true;
4783 }
4784
4785 /* do the header conversion - first grab the addresses */
4786 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4787 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4788 skb_postpull_rcsum(skb, skb->data + snap_offs,
4789 sizeof(rfc1042_header) + 2);
4790 /* remove the SNAP but leave the ethertype */
4791 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4792 /* push the addresses in front */
4793 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4794
4795 ieee80211_rx_8023(rx, fast_rx, orig_len);
4796
4797 return true;
4798 drop:
4799 dev_kfree_skb(skb);
4800
4801 if (fast_rx->uses_rss)
4802 stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
4803 else
4804 stats = &rx->link_sta->rx_stats;
4805
4806 stats->dropped++;
4807 return true;
4808 }
4809
4810 /*
4811 * This function returns whether or not the SKB
4812 * was destined for RX processing or not, which,
4813 * if consume is true, is equivalent to whether
4814 * or not the skb was consumed.
4815 */
ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data * rx,struct sk_buff * skb,bool consume)4816 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4817 struct sk_buff *skb, bool consume)
4818 {
4819 struct ieee80211_local *local = rx->local;
4820 struct ieee80211_sub_if_data *sdata = rx->sdata;
4821 struct ieee80211_hdr *hdr = (void *)skb->data;
4822 struct link_sta_info *link_sta = rx->link_sta;
4823 struct ieee80211_link_data *link = rx->link;
4824
4825 rx->skb = skb;
4826
4827 /* See if we can do fast-rx; if we have to copy we already lost,
4828 * so punt in that case. We should never have to deliver a data
4829 * frame to multiple interfaces anyway.
4830 *
4831 * We skip the ieee80211_accept_frame() call and do the necessary
4832 * checking inside ieee80211_invoke_fast_rx().
4833 */
4834 if (consume && rx->sta) {
4835 struct ieee80211_fast_rx *fast_rx;
4836
4837 fast_rx = rcu_dereference(rx->sta->fast_rx);
4838 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4839 return true;
4840 }
4841
4842 if (!ieee80211_accept_frame(rx))
4843 return false;
4844
4845 if (!consume) {
4846 struct skb_shared_hwtstamps *shwt;
4847
4848 rx->skb = skb_copy(skb, GFP_ATOMIC);
4849 if (!rx->skb) {
4850 if (net_ratelimit())
4851 wiphy_debug(local->hw.wiphy,
4852 "failed to copy skb for %s\n",
4853 sdata->name);
4854 return true;
4855 }
4856
4857 /* skb_copy() does not copy the hw timestamps, so copy it
4858 * explicitly
4859 */
4860 shwt = skb_hwtstamps(rx->skb);
4861 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
4862 }
4863
4864 if (unlikely(rx->sta && rx->sta->sta.mlo)) {
4865 /* translate to MLD addresses */
4866 if (ether_addr_equal(link->conf->addr, hdr->addr1))
4867 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
4868 if (ether_addr_equal(link_sta->addr, hdr->addr2))
4869 ether_addr_copy(hdr->addr2, rx->sta->addr);
4870 /* translate A3 only if it's the BSSID */
4871 if (!ieee80211_has_tods(hdr->frame_control) &&
4872 !ieee80211_has_fromds(hdr->frame_control)) {
4873 if (ether_addr_equal(link_sta->addr, hdr->addr3))
4874 ether_addr_copy(hdr->addr3, rx->sta->addr);
4875 else if (ether_addr_equal(link->conf->addr, hdr->addr3))
4876 ether_addr_copy(hdr->addr3, rx->sdata->vif.addr);
4877 }
4878 /* not needed for A4 since it can only carry the SA */
4879 }
4880
4881 ieee80211_invoke_rx_handlers(rx);
4882 return true;
4883 }
4884
__ieee80211_rx_handle_8023(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct list_head * list)4885 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
4886 struct ieee80211_sta *pubsta,
4887 struct sk_buff *skb,
4888 struct list_head *list)
4889 {
4890 struct ieee80211_local *local = hw_to_local(hw);
4891 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4892 struct ieee80211_fast_rx *fast_rx;
4893 struct ieee80211_rx_data rx;
4894 int link_id = -1;
4895
4896 memset(&rx, 0, sizeof(rx));
4897 rx.skb = skb;
4898 rx.local = local;
4899 rx.list = list;
4900 rx.link_id = -1;
4901
4902 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4903
4904 /* drop frame if too short for header */
4905 if (skb->len < sizeof(struct ethhdr))
4906 goto drop;
4907
4908 if (!pubsta)
4909 goto drop;
4910
4911 if (status->link_valid)
4912 link_id = status->link_id;
4913
4914 /*
4915 * TODO: Should the frame be dropped if the right link_id is not
4916 * available? Or may be it is fine in the current form to proceed with
4917 * the frame processing because with frame being in 802.3 format,
4918 * link_id is used only for stats purpose and updating the stats on
4919 * the deflink is fine?
4920 */
4921 if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
4922 goto drop;
4923
4924 fast_rx = rcu_dereference(rx.sta->fast_rx);
4925 if (!fast_rx)
4926 goto drop;
4927
4928 ieee80211_rx_8023(&rx, fast_rx, skb->len);
4929 return;
4930
4931 drop:
4932 dev_kfree_skb(skb);
4933 }
4934
ieee80211_rx_for_interface(struct ieee80211_rx_data * rx,struct sk_buff * skb,bool consume)4935 static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
4936 struct sk_buff *skb, bool consume)
4937 {
4938 struct link_sta_info *link_sta;
4939 struct ieee80211_hdr *hdr = (void *)skb->data;
4940 struct sta_info *sta;
4941 int link_id = -1;
4942
4943 /*
4944 * Look up link station first, in case there's a
4945 * chance that they might have a link address that
4946 * is identical to the MLD address, that way we'll
4947 * have the link information if needed.
4948 */
4949 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
4950 if (link_sta) {
4951 sta = link_sta->sta;
4952 link_id = link_sta->link_id;
4953 } else {
4954 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4955
4956 sta = sta_info_get_bss(rx->sdata, hdr->addr2);
4957 if (status->link_valid)
4958 link_id = status->link_id;
4959 }
4960
4961 if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
4962 return false;
4963
4964 return ieee80211_prepare_and_rx_handle(rx, skb, consume);
4965 }
4966
4967 /*
4968 * This is the actual Rx frames handler. as it belongs to Rx path it must
4969 * be called with rcu_read_lock protection.
4970 */
__ieee80211_rx_handle_packet(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct list_head * list)4971 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4972 struct ieee80211_sta *pubsta,
4973 struct sk_buff *skb,
4974 struct list_head *list)
4975 {
4976 struct ieee80211_local *local = hw_to_local(hw);
4977 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4978 struct ieee80211_sub_if_data *sdata;
4979 struct ieee80211_hdr *hdr;
4980 __le16 fc;
4981 struct ieee80211_rx_data rx;
4982 struct ieee80211_sub_if_data *prev;
4983 struct rhlist_head *tmp;
4984 int err = 0;
4985
4986 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4987 memset(&rx, 0, sizeof(rx));
4988 rx.skb = skb;
4989 rx.local = local;
4990 rx.list = list;
4991 rx.link_id = -1;
4992
4993 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4994 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4995
4996 if (ieee80211_is_mgmt(fc)) {
4997 /* drop frame if too short for header */
4998 if (skb->len < ieee80211_hdrlen(fc))
4999 err = -ENOBUFS;
5000 else
5001 err = skb_linearize(skb);
5002 } else {
5003 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
5004 }
5005
5006 if (err) {
5007 dev_kfree_skb(skb);
5008 return;
5009 }
5010
5011 hdr = (struct ieee80211_hdr *)skb->data;
5012 ieee80211_parse_qos(&rx);
5013 ieee80211_verify_alignment(&rx);
5014
5015 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
5016 ieee80211_is_beacon(hdr->frame_control) ||
5017 ieee80211_is_s1g_beacon(hdr->frame_control)))
5018 ieee80211_scan_rx(local, skb);
5019
5020 if (ieee80211_is_data(fc)) {
5021 struct sta_info *sta, *prev_sta;
5022 int link_id = -1;
5023
5024 if (status->link_valid)
5025 link_id = status->link_id;
5026
5027 if (pubsta) {
5028 if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
5029 goto out;
5030
5031 /*
5032 * In MLO connection, fetch the link_id using addr2
5033 * when the driver does not pass link_id in status.
5034 * When the address translation is already performed by
5035 * driver/hw, the valid link_id must be passed in
5036 * status.
5037 */
5038
5039 if (!status->link_valid && pubsta->mlo) {
5040 struct ieee80211_hdr *hdr = (void *)skb->data;
5041 struct link_sta_info *link_sta;
5042
5043 link_sta = link_sta_info_get_bss(rx.sdata,
5044 hdr->addr2);
5045 if (!link_sta)
5046 goto out;
5047
5048 ieee80211_rx_data_set_link(&rx, link_sta->link_id);
5049 }
5050
5051 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
5052 return;
5053 goto out;
5054 }
5055
5056 prev_sta = NULL;
5057
5058 for_each_sta_info(local, hdr->addr2, sta, tmp) {
5059 if (!prev_sta) {
5060 prev_sta = sta;
5061 continue;
5062 }
5063
5064 rx.sdata = prev_sta->sdata;
5065 if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
5066 link_id))
5067 goto out;
5068
5069 if (!status->link_valid && prev_sta->sta.mlo)
5070 continue;
5071
5072 ieee80211_prepare_and_rx_handle(&rx, skb, false);
5073
5074 prev_sta = sta;
5075 }
5076
5077 if (prev_sta) {
5078 rx.sdata = prev_sta->sdata;
5079 if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
5080 link_id))
5081 goto out;
5082
5083 if (!status->link_valid && prev_sta->sta.mlo)
5084 goto out;
5085
5086 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
5087 return;
5088 goto out;
5089 }
5090 }
5091
5092 prev = NULL;
5093
5094 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
5095 if (!ieee80211_sdata_running(sdata))
5096 continue;
5097
5098 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
5099 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
5100 continue;
5101
5102 /*
5103 * frame is destined for this interface, but if it's
5104 * not also for the previous one we handle that after
5105 * the loop to avoid copying the SKB once too much
5106 */
5107
5108 if (!prev) {
5109 prev = sdata;
5110 continue;
5111 }
5112
5113 rx.sdata = prev;
5114 ieee80211_rx_for_interface(&rx, skb, false);
5115
5116 prev = sdata;
5117 }
5118
5119 if (prev) {
5120 rx.sdata = prev;
5121
5122 if (ieee80211_rx_for_interface(&rx, skb, true))
5123 return;
5124 }
5125
5126 out:
5127 dev_kfree_skb(skb);
5128 }
5129
5130 /*
5131 * This is the receive path handler. It is called by a low level driver when an
5132 * 802.11 MPDU is received from the hardware.
5133 */
ieee80211_rx_list(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct list_head * list)5134 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
5135 struct sk_buff *skb, struct list_head *list)
5136 {
5137 struct ieee80211_local *local = hw_to_local(hw);
5138 struct ieee80211_rate *rate = NULL;
5139 struct ieee80211_supported_band *sband;
5140 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5141 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
5142
5143 WARN_ON_ONCE(softirq_count() == 0);
5144
5145 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
5146 goto drop;
5147
5148 sband = local->hw.wiphy->bands[status->band];
5149 if (WARN_ON(!sband))
5150 goto drop;
5151
5152 /*
5153 * If we're suspending, it is possible although not too likely
5154 * that we'd be receiving frames after having already partially
5155 * quiesced the stack. We can't process such frames then since
5156 * that might, for example, cause stations to be added or other
5157 * driver callbacks be invoked.
5158 */
5159 if (unlikely(local->quiescing || local->suspended))
5160 goto drop;
5161
5162 /* We might be during a HW reconfig, prevent Rx for the same reason */
5163 if (unlikely(local->in_reconfig))
5164 goto drop;
5165
5166 /*
5167 * The same happens when we're not even started,
5168 * but that's worth a warning.
5169 */
5170 if (WARN_ON(!local->started))
5171 goto drop;
5172
5173 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
5174 /*
5175 * Validate the rate, unless a PLCP error means that
5176 * we probably can't have a valid rate here anyway.
5177 */
5178
5179 switch (status->encoding) {
5180 case RX_ENC_HT:
5181 /*
5182 * rate_idx is MCS index, which can be [0-76]
5183 * as documented on:
5184 *
5185 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
5186 *
5187 * Anything else would be some sort of driver or
5188 * hardware error. The driver should catch hardware
5189 * errors.
5190 */
5191 if (WARN(status->rate_idx > 76,
5192 "Rate marked as an HT rate but passed "
5193 "status->rate_idx is not "
5194 "an MCS index [0-76]: %d (0x%02x)\n",
5195 status->rate_idx,
5196 status->rate_idx))
5197 goto drop;
5198 break;
5199 case RX_ENC_VHT:
5200 if (WARN_ONCE(status->rate_idx > 11 ||
5201 !status->nss ||
5202 status->nss > 8,
5203 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
5204 status->rate_idx, status->nss))
5205 goto drop;
5206 break;
5207 case RX_ENC_HE:
5208 if (WARN_ONCE(status->rate_idx > 11 ||
5209 !status->nss ||
5210 status->nss > 8,
5211 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
5212 status->rate_idx, status->nss))
5213 goto drop;
5214 break;
5215 default:
5216 WARN_ON_ONCE(1);
5217 fallthrough;
5218 case RX_ENC_LEGACY:
5219 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
5220 goto drop;
5221 rate = &sband->bitrates[status->rate_idx];
5222 }
5223 }
5224
5225 if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED))
5226 goto drop;
5227
5228 status->rx_flags = 0;
5229
5230 kcov_remote_start_common(skb_get_kcov_handle(skb));
5231
5232 /*
5233 * Frames with failed FCS/PLCP checksum are not returned,
5234 * all other frames are returned without radiotap header
5235 * if it was previously present.
5236 * Also, frames with less than 16 bytes are dropped.
5237 */
5238 if (!(status->flag & RX_FLAG_8023))
5239 skb = ieee80211_rx_monitor(local, skb, rate);
5240 if (skb) {
5241 if ((status->flag & RX_FLAG_8023) ||
5242 ieee80211_is_data_present(hdr->frame_control))
5243 ieee80211_tpt_led_trig_rx(local, skb->len);
5244
5245 if (status->flag & RX_FLAG_8023)
5246 __ieee80211_rx_handle_8023(hw, pubsta, skb, list);
5247 else
5248 __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
5249 }
5250
5251 kcov_remote_stop();
5252 return;
5253 drop:
5254 kfree_skb(skb);
5255 }
5256 EXPORT_SYMBOL(ieee80211_rx_list);
5257
ieee80211_rx_napi(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct napi_struct * napi)5258 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
5259 struct sk_buff *skb, struct napi_struct *napi)
5260 {
5261 struct sk_buff *tmp;
5262 LIST_HEAD(list);
5263
5264
5265 /*
5266 * key references and virtual interfaces are protected using RCU
5267 * and this requires that we are in a read-side RCU section during
5268 * receive processing
5269 */
5270 rcu_read_lock();
5271 ieee80211_rx_list(hw, pubsta, skb, &list);
5272 rcu_read_unlock();
5273
5274 if (!napi) {
5275 netif_receive_skb_list(&list);
5276 return;
5277 }
5278
5279 list_for_each_entry_safe(skb, tmp, &list, list) {
5280 skb_list_del_init(skb);
5281 napi_gro_receive(napi, skb);
5282 }
5283 }
5284 EXPORT_SYMBOL(ieee80211_rx_napi);
5285
5286 /* This is a version of the rx handler that can be called from hard irq
5287 * context. Post the skb on the queue and schedule the tasklet */
ieee80211_rx_irqsafe(struct ieee80211_hw * hw,struct sk_buff * skb)5288 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
5289 {
5290 struct ieee80211_local *local = hw_to_local(hw);
5291
5292 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
5293
5294 skb->pkt_type = IEEE80211_RX_MSG;
5295 skb_queue_tail(&local->skb_queue, skb);
5296 tasklet_schedule(&local->tasklet);
5297 }
5298 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
5299