1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
12 .hw_value = (_idx), \
13 .max_power = 30, \
14 }
15
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
19 .hw_value = (_idx), \
20 .max_power = 30, \
21 }
22
23 #define CHAN6G(_idx, _freq) { \
24 .band = NL80211_BAND_6GHZ, \
25 .center_freq = (_freq), \
26 .hw_value = (_idx), \
27 .max_power = 30, \
28 }
29
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 CHAN2G(1, 2412),
32 CHAN2G(2, 2417),
33 CHAN2G(3, 2422),
34 CHAN2G(4, 2427),
35 CHAN2G(5, 2432),
36 CHAN2G(6, 2437),
37 CHAN2G(7, 2442),
38 CHAN2G(8, 2447),
39 CHAN2G(9, 2452),
40 CHAN2G(10, 2457),
41 CHAN2G(11, 2462),
42 CHAN2G(12, 2467),
43 CHAN2G(13, 2472),
44 CHAN2G(14, 2484),
45 };
46
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(36, 5180),
49 CHAN5G(40, 5200),
50 CHAN5G(44, 5220),
51 CHAN5G(48, 5240),
52
53 CHAN5G(52, 5260),
54 CHAN5G(56, 5280),
55 CHAN5G(60, 5300),
56 CHAN5G(64, 5320),
57
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61 CHAN5G(112, 5560),
62 CHAN5G(116, 5580),
63 CHAN5G(120, 5600),
64 CHAN5G(124, 5620),
65 CHAN5G(128, 5640),
66 CHAN5G(132, 5660),
67 CHAN5G(136, 5680),
68 CHAN5G(140, 5700),
69 CHAN5G(144, 5720),
70
71 CHAN5G(149, 5745),
72 CHAN5G(153, 5765),
73 CHAN5G(157, 5785),
74 CHAN5G(161, 5805),
75 CHAN5G(165, 5825),
76 CHAN5G(169, 5845),
77 CHAN5G(173, 5865),
78 CHAN5G(177, 5885),
79 };
80
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 /* UNII-5 */
83 CHAN6G(1, 5955),
84 CHAN6G(5, 5975),
85 CHAN6G(9, 5995),
86 CHAN6G(13, 6015),
87 CHAN6G(17, 6035),
88 CHAN6G(21, 6055),
89 CHAN6G(25, 6075),
90 CHAN6G(29, 6095),
91 CHAN6G(33, 6115),
92 CHAN6G(37, 6135),
93 CHAN6G(41, 6155),
94 CHAN6G(45, 6175),
95 CHAN6G(49, 6195),
96 CHAN6G(53, 6215),
97 CHAN6G(57, 6235),
98 CHAN6G(61, 6255),
99 CHAN6G(65, 6275),
100 CHAN6G(69, 6295),
101 CHAN6G(73, 6315),
102 CHAN6G(77, 6335),
103 CHAN6G(81, 6355),
104 CHAN6G(85, 6375),
105 CHAN6G(89, 6395),
106 CHAN6G(93, 6415),
107 /* UNII-6 */
108 CHAN6G(97, 6435),
109 CHAN6G(101, 6455),
110 CHAN6G(105, 6475),
111 CHAN6G(109, 6495),
112 CHAN6G(113, 6515),
113 CHAN6G(117, 6535),
114 /* UNII-7 */
115 CHAN6G(121, 6555),
116 CHAN6G(125, 6575),
117 CHAN6G(129, 6595),
118 CHAN6G(133, 6615),
119 CHAN6G(137, 6635),
120 CHAN6G(141, 6655),
121 CHAN6G(145, 6675),
122 CHAN6G(149, 6695),
123 CHAN6G(153, 6715),
124 CHAN6G(157, 6735),
125 CHAN6G(161, 6755),
126 CHAN6G(165, 6775),
127 CHAN6G(169, 6795),
128 CHAN6G(173, 6815),
129 CHAN6G(177, 6835),
130 CHAN6G(181, 6855),
131 CHAN6G(185, 6875),
132 /* UNII-8 */
133 CHAN6G(189, 6895),
134 CHAN6G(193, 6915),
135 CHAN6G(197, 6935),
136 CHAN6G(201, 6955),
137 CHAN6G(205, 6975),
138 CHAN6G(209, 6995),
139 CHAN6G(213, 7015),
140 CHAN6G(217, 7035),
141 CHAN6G(221, 7055),
142 CHAN6G(225, 7075),
143 CHAN6G(229, 7095),
144 CHAN6G(233, 7115),
145 };
146
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159
160 struct ieee80211_rate mt76_rates[] = {
161 CCK_RATE(0, 10),
162 CCK_RATE(1, 20),
163 CCK_RATE(2, 55),
164 CCK_RATE(3, 110),
165 OFDM_RATE(11, 60),
166 OFDM_RATE(15, 90),
167 OFDM_RATE(10, 120),
168 OFDM_RATE(14, 180),
169 OFDM_RATE(9, 240),
170 OFDM_RATE(13, 360),
171 OFDM_RATE(8, 480),
172 OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
188 };
189
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200
201 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
202 return 0;
203
204 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
205 wiphy_name(hw->wiphy));
206
207 phy->leds.cdev.name = phy->leds.name;
208 phy->leds.cdev.default_trigger =
209 ieee80211_create_tpt_led_trigger(hw,
210 IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 mt76_tpt_blink,
212 ARRAY_SIZE(mt76_tpt_blink));
213
214 if (phy == &dev->phy) {
215 struct device_node *np = dev->dev->of_node;
216
217 np = of_get_child_by_name(np, "led");
218 if (np) {
219 int led_pin;
220
221 if (!of_property_read_u32(np, "led-sources", &led_pin))
222 phy->leds.pin = led_pin;
223 phy->leds.al = of_property_read_bool(np,
224 "led-active-low");
225 of_node_put(np);
226 }
227 }
228
229 return led_classdev_register(dev->dev, &phy->leds.cdev);
230 }
231
mt76_led_cleanup(struct mt76_phy * phy)232 static void mt76_led_cleanup(struct mt76_phy *phy)
233 {
234 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
235 return;
236
237 led_classdev_unregister(&phy->leds.cdev);
238 }
239
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)240 static void mt76_init_stream_cap(struct mt76_phy *phy,
241 struct ieee80211_supported_band *sband,
242 bool vht)
243 {
244 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
245 int i, nstream = hweight8(phy->antenna_mask);
246 struct ieee80211_sta_vht_cap *vht_cap;
247 u16 mcs_map = 0;
248
249 if (nstream > 1)
250 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
251 else
252 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
253
254 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
255 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
256
257 if (!vht)
258 return;
259
260 vht_cap = &sband->vht_cap;
261 if (nstream > 1)
262 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
263 else
264 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
265 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
266 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
267
268 for (i = 0; i < 8; i++) {
269 if (i < nstream)
270 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
271 else
272 mcs_map |=
273 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
274 }
275 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
276 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
277 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
278 vht_cap->vht_mcs.tx_highest |=
279 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
280 }
281
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)282 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
283 {
284 if (phy->cap.has_2ghz)
285 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
286 if (phy->cap.has_5ghz)
287 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
288 if (phy->cap.has_6ghz)
289 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
290 }
291 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
292
293 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)294 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
295 const struct ieee80211_channel *chan, int n_chan,
296 struct ieee80211_rate *rates, int n_rates,
297 bool ht, bool vht)
298 {
299 struct ieee80211_supported_band *sband = &msband->sband;
300 struct ieee80211_sta_vht_cap *vht_cap;
301 struct ieee80211_sta_ht_cap *ht_cap;
302 struct mt76_dev *dev = phy->dev;
303 void *chanlist;
304 int size;
305
306 size = n_chan * sizeof(*chan);
307 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
308 if (!chanlist)
309 return -ENOMEM;
310
311 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
312 GFP_KERNEL);
313 if (!msband->chan)
314 return -ENOMEM;
315
316 sband->channels = chanlist;
317 sband->n_channels = n_chan;
318 sband->bitrates = rates;
319 sband->n_bitrates = n_rates;
320
321 if (!ht)
322 return 0;
323
324 ht_cap = &sband->ht_cap;
325 ht_cap->ht_supported = true;
326 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
327 IEEE80211_HT_CAP_GRN_FLD |
328 IEEE80211_HT_CAP_SGI_20 |
329 IEEE80211_HT_CAP_SGI_40 |
330 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
331
332 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
333 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
334
335 mt76_init_stream_cap(phy, sband, vht);
336
337 if (!vht)
338 return 0;
339
340 vht_cap = &sband->vht_cap;
341 vht_cap->vht_supported = true;
342 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
343 IEEE80211_VHT_CAP_RXSTBC_1 |
344 IEEE80211_VHT_CAP_SHORT_GI_80 |
345 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
346
347 return 0;
348 }
349
350 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)351 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
352 int n_rates)
353 {
354 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
355
356 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
357 ARRAY_SIZE(mt76_channels_2ghz), rates,
358 n_rates, true, false);
359 }
360
361 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)362 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 int n_rates, bool vht)
364 {
365 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
366
367 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
368 ARRAY_SIZE(mt76_channels_5ghz), rates,
369 n_rates, true, vht);
370 }
371
372 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)373 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates)
375 {
376 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
377
378 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
379 ARRAY_SIZE(mt76_channels_6ghz), rates,
380 n_rates, false, false);
381 }
382
383 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)384 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
385 enum nl80211_band band)
386 {
387 struct ieee80211_supported_band *sband = &msband->sband;
388 bool found = false;
389 int i;
390
391 if (!sband)
392 return;
393
394 for (i = 0; i < sband->n_channels; i++) {
395 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
396 continue;
397
398 found = true;
399 break;
400 }
401
402 if (found) {
403 phy->chandef.chan = &sband->channels[0];
404 phy->chan_state = &msband->chan[0];
405 return;
406 }
407
408 sband->n_channels = 0;
409 phy->hw->wiphy->bands[band] = NULL;
410 }
411
412 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)413 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
414 {
415 struct mt76_dev *dev = phy->dev;
416 struct wiphy *wiphy = hw->wiphy;
417
418 SET_IEEE80211_DEV(hw, dev->dev);
419 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
420
421 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
422 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
423 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
424 WIPHY_FLAG_SUPPORTS_TDLS |
425 WIPHY_FLAG_AP_UAPSD;
426
427 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
428 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
429 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
430
431 wiphy->available_antennas_tx = phy->antenna_mask;
432 wiphy->available_antennas_rx = phy->antenna_mask;
433
434 wiphy->sar_capa = &mt76_sar_capa;
435 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
436 sizeof(struct mt76_freq_range_power),
437 GFP_KERNEL);
438 if (!phy->frp)
439 return -ENOMEM;
440
441 hw->txq_data_size = sizeof(struct mt76_txq);
442 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
443
444 if (!hw->max_tx_fragments)
445 hw->max_tx_fragments = 16;
446
447 ieee80211_hw_set(hw, SIGNAL_DBM);
448 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
449 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
450 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
451 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
452 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
453 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
454
455 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
456 ieee80211_hw_set(hw, TX_AMSDU);
457 ieee80211_hw_set(hw, TX_FRAG_LIST);
458 }
459
460 ieee80211_hw_set(hw, MFP_CAPABLE);
461 ieee80211_hw_set(hw, AP_LINK_PS);
462 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
463
464 return 0;
465 }
466
467 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)468 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
469 const struct ieee80211_ops *ops, u8 band_idx)
470 {
471 struct ieee80211_hw *hw;
472 unsigned int phy_size;
473 struct mt76_phy *phy;
474
475 phy_size = ALIGN(sizeof(*phy), 8);
476 hw = ieee80211_alloc_hw(size + phy_size, ops);
477 if (!hw)
478 return NULL;
479
480 phy = hw->priv;
481 phy->dev = dev;
482 phy->hw = hw;
483 phy->priv = hw->priv + phy_size;
484 phy->band_idx = band_idx;
485
486 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
487 hw->wiphy->interface_modes =
488 BIT(NL80211_IFTYPE_STATION) |
489 BIT(NL80211_IFTYPE_AP) |
490 #ifdef CONFIG_MAC80211_MESH
491 BIT(NL80211_IFTYPE_MESH_POINT) |
492 #endif
493 BIT(NL80211_IFTYPE_P2P_CLIENT) |
494 BIT(NL80211_IFTYPE_P2P_GO) |
495 BIT(NL80211_IFTYPE_ADHOC);
496
497 return phy;
498 }
499 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
500
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)501 int mt76_register_phy(struct mt76_phy *phy, bool vht,
502 struct ieee80211_rate *rates, int n_rates)
503 {
504 int ret;
505
506 ret = mt76_phy_init(phy, phy->hw);
507 if (ret)
508 return ret;
509
510 if (phy->cap.has_2ghz) {
511 ret = mt76_init_sband_2g(phy, rates, n_rates);
512 if (ret)
513 return ret;
514 }
515
516 if (phy->cap.has_5ghz) {
517 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
518 if (ret)
519 return ret;
520 }
521
522 if (phy->cap.has_6ghz) {
523 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
524 if (ret)
525 return ret;
526 }
527
528 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
529 ret = mt76_led_init(phy);
530 if (ret)
531 return ret;
532 }
533
534 wiphy_read_of_freq_limits(phy->hw->wiphy);
535 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
536 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
537 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
538
539 ret = ieee80211_register_hw(phy->hw);
540 if (ret)
541 return ret;
542
543 set_bit(MT76_STATE_REGISTERED, &phy->state);
544 phy->dev->phys[phy->band_idx] = phy;
545
546 return 0;
547 }
548 EXPORT_SYMBOL_GPL(mt76_register_phy);
549
mt76_unregister_phy(struct mt76_phy * phy)550 void mt76_unregister_phy(struct mt76_phy *phy)
551 {
552 struct mt76_dev *dev = phy->dev;
553
554 if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
555 return;
556
557 if (IS_ENABLED(CONFIG_MT76_LEDS))
558 mt76_led_cleanup(phy);
559 mt76_tx_status_check(dev, true);
560 ieee80211_unregister_hw(phy->hw);
561 dev->phys[phy->band_idx] = NULL;
562 }
563 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
564
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)565 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
566 {
567 struct page_pool_params pp_params = {
568 .order = 0,
569 .flags = PP_FLAG_PAGE_FRAG,
570 .nid = NUMA_NO_NODE,
571 .dev = dev->dma_dev,
572 };
573 int idx = q - dev->q_rx;
574
575 switch (idx) {
576 case MT_RXQ_MAIN:
577 case MT_RXQ_BAND1:
578 case MT_RXQ_BAND2:
579 pp_params.pool_size = 256;
580 break;
581 default:
582 pp_params.pool_size = 16;
583 break;
584 }
585
586 if (mt76_is_mmio(dev)) {
587 /* rely on page_pool for DMA mapping */
588 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
589 pp_params.dma_dir = DMA_FROM_DEVICE;
590 pp_params.max_len = PAGE_SIZE;
591 pp_params.offset = 0;
592 }
593
594 q->page_pool = page_pool_create(&pp_params);
595 if (IS_ERR(q->page_pool)) {
596 int err = PTR_ERR(q->page_pool);
597
598 q->page_pool = NULL;
599 return err;
600 }
601
602 return 0;
603 }
604 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
605
606 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)607 mt76_alloc_device(struct device *pdev, unsigned int size,
608 const struct ieee80211_ops *ops,
609 const struct mt76_driver_ops *drv_ops)
610 {
611 struct ieee80211_hw *hw;
612 struct mt76_phy *phy;
613 struct mt76_dev *dev;
614 int i;
615
616 hw = ieee80211_alloc_hw(size, ops);
617 if (!hw)
618 return NULL;
619
620 dev = hw->priv;
621 dev->hw = hw;
622 dev->dev = pdev;
623 dev->drv = drv_ops;
624 dev->dma_dev = pdev;
625
626 phy = &dev->phy;
627 phy->dev = dev;
628 phy->hw = hw;
629 phy->band_idx = MT_BAND0;
630 dev->phys[phy->band_idx] = phy;
631
632 spin_lock_init(&dev->rx_lock);
633 spin_lock_init(&dev->lock);
634 spin_lock_init(&dev->cc_lock);
635 spin_lock_init(&dev->status_lock);
636 spin_lock_init(&dev->wed_lock);
637 mutex_init(&dev->mutex);
638 init_waitqueue_head(&dev->tx_wait);
639
640 skb_queue_head_init(&dev->mcu.res_q);
641 init_waitqueue_head(&dev->mcu.wait);
642 mutex_init(&dev->mcu.mutex);
643 dev->tx_worker.fn = mt76_tx_worker;
644
645 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
646 hw->wiphy->interface_modes =
647 BIT(NL80211_IFTYPE_STATION) |
648 BIT(NL80211_IFTYPE_AP) |
649 #ifdef CONFIG_MAC80211_MESH
650 BIT(NL80211_IFTYPE_MESH_POINT) |
651 #endif
652 BIT(NL80211_IFTYPE_P2P_CLIENT) |
653 BIT(NL80211_IFTYPE_P2P_GO) |
654 BIT(NL80211_IFTYPE_ADHOC);
655
656 spin_lock_init(&dev->token_lock);
657 idr_init(&dev->token);
658
659 spin_lock_init(&dev->rx_token_lock);
660 idr_init(&dev->rx_token);
661
662 INIT_LIST_HEAD(&dev->wcid_list);
663 INIT_LIST_HEAD(&dev->sta_poll_list);
664 spin_lock_init(&dev->sta_poll_lock);
665
666 INIT_LIST_HEAD(&dev->txwi_cache);
667 INIT_LIST_HEAD(&dev->rxwi_cache);
668 dev->token_size = dev->drv->token_size;
669
670 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
671 skb_queue_head_init(&dev->rx_skb[i]);
672
673 dev->wq = alloc_ordered_workqueue("mt76", 0);
674 if (!dev->wq) {
675 ieee80211_free_hw(hw);
676 return NULL;
677 }
678
679 return dev;
680 }
681 EXPORT_SYMBOL_GPL(mt76_alloc_device);
682
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)683 int mt76_register_device(struct mt76_dev *dev, bool vht,
684 struct ieee80211_rate *rates, int n_rates)
685 {
686 struct ieee80211_hw *hw = dev->hw;
687 struct mt76_phy *phy = &dev->phy;
688 int ret;
689
690 dev_set_drvdata(dev->dev, dev);
691 ret = mt76_phy_init(phy, hw);
692 if (ret)
693 return ret;
694
695 if (phy->cap.has_2ghz) {
696 ret = mt76_init_sband_2g(phy, rates, n_rates);
697 if (ret)
698 return ret;
699 }
700
701 if (phy->cap.has_5ghz) {
702 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
703 if (ret)
704 return ret;
705 }
706
707 if (phy->cap.has_6ghz) {
708 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
709 if (ret)
710 return ret;
711 }
712
713 wiphy_read_of_freq_limits(hw->wiphy);
714 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
715 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
716 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
717
718 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
719 ret = mt76_led_init(phy);
720 if (ret)
721 return ret;
722 }
723
724 ret = ieee80211_register_hw(hw);
725 if (ret)
726 return ret;
727
728 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
729 set_bit(MT76_STATE_REGISTERED, &phy->state);
730 sched_set_fifo_low(dev->tx_worker.task);
731
732 return 0;
733 }
734 EXPORT_SYMBOL_GPL(mt76_register_device);
735
mt76_unregister_device(struct mt76_dev * dev)736 void mt76_unregister_device(struct mt76_dev *dev)
737 {
738 struct ieee80211_hw *hw = dev->hw;
739
740 if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
741 return;
742
743 if (IS_ENABLED(CONFIG_MT76_LEDS))
744 mt76_led_cleanup(&dev->phy);
745 mt76_tx_status_check(dev, true);
746 ieee80211_unregister_hw(hw);
747 }
748 EXPORT_SYMBOL_GPL(mt76_unregister_device);
749
mt76_free_device(struct mt76_dev * dev)750 void mt76_free_device(struct mt76_dev *dev)
751 {
752 mt76_worker_teardown(&dev->tx_worker);
753 if (dev->wq) {
754 destroy_workqueue(dev->wq);
755 dev->wq = NULL;
756 }
757 ieee80211_free_hw(dev->hw);
758 }
759 EXPORT_SYMBOL_GPL(mt76_free_device);
760
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)761 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
762 {
763 struct sk_buff *skb = phy->rx_amsdu[q].head;
764 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
765 struct mt76_dev *dev = phy->dev;
766
767 phy->rx_amsdu[q].head = NULL;
768 phy->rx_amsdu[q].tail = NULL;
769
770 /*
771 * Validate if the amsdu has a proper first subframe.
772 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
773 * flag of the QoS header gets flipped. In such cases, the first
774 * subframe has a LLC/SNAP header in the location of the destination
775 * address.
776 */
777 if (skb_shinfo(skb)->frag_list) {
778 int offset = 0;
779
780 if (!(status->flag & RX_FLAG_8023)) {
781 offset = ieee80211_get_hdrlen_from_skb(skb);
782
783 if ((status->flag &
784 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
785 RX_FLAG_DECRYPTED)
786 offset += 8;
787 }
788
789 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
790 dev_kfree_skb(skb);
791 return;
792 }
793 }
794 __skb_queue_tail(&dev->rx_skb[q], skb);
795 }
796
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)797 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
798 struct sk_buff *skb)
799 {
800 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
801
802 if (phy->rx_amsdu[q].head &&
803 (!status->amsdu || status->first_amsdu ||
804 status->seqno != phy->rx_amsdu[q].seqno))
805 mt76_rx_release_amsdu(phy, q);
806
807 if (!phy->rx_amsdu[q].head) {
808 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
809 phy->rx_amsdu[q].seqno = status->seqno;
810 phy->rx_amsdu[q].head = skb;
811 } else {
812 *phy->rx_amsdu[q].tail = skb;
813 phy->rx_amsdu[q].tail = &skb->next;
814 }
815
816 if (!status->amsdu || status->last_amsdu)
817 mt76_rx_release_amsdu(phy, q);
818 }
819
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)820 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
821 {
822 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
823 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
824
825 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
826 dev_kfree_skb(skb);
827 return;
828 }
829
830 #ifdef CONFIG_NL80211_TESTMODE
831 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
832 phy->test.rx_stats.packets[q]++;
833 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
834 phy->test.rx_stats.fcs_error[q]++;
835 }
836 #endif
837
838 mt76_rx_release_burst(phy, q, skb);
839 }
840 EXPORT_SYMBOL_GPL(mt76_rx);
841
mt76_has_tx_pending(struct mt76_phy * phy)842 bool mt76_has_tx_pending(struct mt76_phy *phy)
843 {
844 struct mt76_queue *q;
845 int i;
846
847 for (i = 0; i < __MT_TXQ_MAX; i++) {
848 q = phy->q_tx[i];
849 if (q && q->queued)
850 return true;
851 }
852
853 return false;
854 }
855 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
856
857 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)858 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
859 {
860 struct mt76_sband *msband;
861 int idx;
862
863 if (c->band == NL80211_BAND_2GHZ)
864 msband = &phy->sband_2g;
865 else if (c->band == NL80211_BAND_6GHZ)
866 msband = &phy->sband_6g;
867 else
868 msband = &phy->sband_5g;
869
870 idx = c - &msband->sband.channels[0];
871 return &msband->chan[idx];
872 }
873
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)874 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
875 {
876 struct mt76_channel_state *state = phy->chan_state;
877
878 state->cc_active += ktime_to_us(ktime_sub(time,
879 phy->survey_time));
880 phy->survey_time = time;
881 }
882 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
883
mt76_update_survey(struct mt76_phy * phy)884 void mt76_update_survey(struct mt76_phy *phy)
885 {
886 struct mt76_dev *dev = phy->dev;
887 ktime_t cur_time;
888
889 if (dev->drv->update_survey)
890 dev->drv->update_survey(phy);
891
892 cur_time = ktime_get_boottime();
893 mt76_update_survey_active_time(phy, cur_time);
894
895 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
896 struct mt76_channel_state *state = phy->chan_state;
897
898 spin_lock_bh(&dev->cc_lock);
899 state->cc_bss_rx += dev->cur_cc_bss_rx;
900 dev->cur_cc_bss_rx = 0;
901 spin_unlock_bh(&dev->cc_lock);
902 }
903 }
904 EXPORT_SYMBOL_GPL(mt76_update_survey);
905
mt76_set_channel(struct mt76_phy * phy)906 void mt76_set_channel(struct mt76_phy *phy)
907 {
908 struct mt76_dev *dev = phy->dev;
909 struct ieee80211_hw *hw = phy->hw;
910 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
911 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
912 int timeout = HZ / 5;
913
914 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
915 mt76_update_survey(phy);
916
917 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
918 phy->chandef.width != chandef->width)
919 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
920
921 phy->chandef = *chandef;
922 phy->chan_state = mt76_channel_state(phy, chandef->chan);
923
924 if (!offchannel)
925 phy->main_chan = chandef->chan;
926
927 if (chandef->chan != phy->main_chan)
928 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
929 }
930 EXPORT_SYMBOL_GPL(mt76_set_channel);
931
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)932 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
933 struct survey_info *survey)
934 {
935 struct mt76_phy *phy = hw->priv;
936 struct mt76_dev *dev = phy->dev;
937 struct mt76_sband *sband;
938 struct ieee80211_channel *chan;
939 struct mt76_channel_state *state;
940 int ret = 0;
941
942 mutex_lock(&dev->mutex);
943 if (idx == 0 && dev->drv->update_survey)
944 mt76_update_survey(phy);
945
946 if (idx >= phy->sband_2g.sband.n_channels +
947 phy->sband_5g.sband.n_channels) {
948 idx -= (phy->sband_2g.sband.n_channels +
949 phy->sband_5g.sband.n_channels);
950 sband = &phy->sband_6g;
951 } else if (idx >= phy->sband_2g.sband.n_channels) {
952 idx -= phy->sband_2g.sband.n_channels;
953 sband = &phy->sband_5g;
954 } else {
955 sband = &phy->sband_2g;
956 }
957
958 if (idx >= sband->sband.n_channels) {
959 ret = -ENOENT;
960 goto out;
961 }
962
963 chan = &sband->sband.channels[idx];
964 state = mt76_channel_state(phy, chan);
965
966 memset(survey, 0, sizeof(*survey));
967 survey->channel = chan;
968 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
969 survey->filled |= dev->drv->survey_flags;
970 if (state->noise)
971 survey->filled |= SURVEY_INFO_NOISE_DBM;
972
973 if (chan == phy->main_chan) {
974 survey->filled |= SURVEY_INFO_IN_USE;
975
976 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
977 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
978 }
979
980 survey->time_busy = div_u64(state->cc_busy, 1000);
981 survey->time_rx = div_u64(state->cc_rx, 1000);
982 survey->time = div_u64(state->cc_active, 1000);
983 survey->noise = state->noise;
984
985 spin_lock_bh(&dev->cc_lock);
986 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
987 survey->time_tx = div_u64(state->cc_tx, 1000);
988 spin_unlock_bh(&dev->cc_lock);
989
990 out:
991 mutex_unlock(&dev->mutex);
992
993 return ret;
994 }
995 EXPORT_SYMBOL_GPL(mt76_get_survey);
996
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)997 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
998 struct ieee80211_key_conf *key)
999 {
1000 struct ieee80211_key_seq seq;
1001 int i;
1002
1003 wcid->rx_check_pn = false;
1004
1005 if (!key)
1006 return;
1007
1008 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1009 return;
1010
1011 wcid->rx_check_pn = true;
1012
1013 /* data frame */
1014 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1015 ieee80211_get_key_rx_seq(key, i, &seq);
1016 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1017 }
1018
1019 /* robust management frame */
1020 ieee80211_get_key_rx_seq(key, -1, &seq);
1021 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1022
1023 }
1024 EXPORT_SYMBOL(mt76_wcid_key_setup);
1025
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1026 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1027 {
1028 int signal = -128;
1029 u8 chains;
1030
1031 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1032 int cur, diff;
1033
1034 cur = *chain_signal;
1035 if (!(chains & BIT(0)) ||
1036 cur > 0)
1037 continue;
1038
1039 if (cur > signal)
1040 swap(cur, signal);
1041
1042 diff = signal - cur;
1043 if (diff == 0)
1044 signal += 3;
1045 else if (diff <= 2)
1046 signal += 2;
1047 else if (diff <= 6)
1048 signal += 1;
1049 }
1050
1051 return signal;
1052 }
1053 EXPORT_SYMBOL(mt76_rx_signal);
1054
1055 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1056 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1057 struct ieee80211_hw **hw,
1058 struct ieee80211_sta **sta)
1059 {
1060 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1061 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1062 struct mt76_rx_status mstat;
1063
1064 mstat = *((struct mt76_rx_status *)skb->cb);
1065 memset(status, 0, sizeof(*status));
1066
1067 status->flag = mstat.flag;
1068 status->freq = mstat.freq;
1069 status->enc_flags = mstat.enc_flags;
1070 status->encoding = mstat.encoding;
1071 status->bw = mstat.bw;
1072 if (status->encoding == RX_ENC_EHT) {
1073 status->eht.ru = mstat.eht.ru;
1074 status->eht.gi = mstat.eht.gi;
1075 } else {
1076 status->he_ru = mstat.he_ru;
1077 status->he_gi = mstat.he_gi;
1078 status->he_dcm = mstat.he_dcm;
1079 }
1080 status->rate_idx = mstat.rate_idx;
1081 status->nss = mstat.nss;
1082 status->band = mstat.band;
1083 status->signal = mstat.signal;
1084 status->chains = mstat.chains;
1085 status->ampdu_reference = mstat.ampdu_ref;
1086 status->device_timestamp = mstat.timestamp;
1087 status->mactime = mstat.timestamp;
1088 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1089 if (status->signal <= -128)
1090 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1091
1092 if (ieee80211_is_beacon(hdr->frame_control) ||
1093 ieee80211_is_probe_resp(hdr->frame_control))
1094 status->boottime_ns = ktime_get_boottime_ns();
1095
1096 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1097 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1098 sizeof(mstat.chain_signal));
1099 memcpy(status->chain_signal, mstat.chain_signal,
1100 sizeof(mstat.chain_signal));
1101
1102 *sta = wcid_to_sta(mstat.wcid);
1103 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1104 }
1105
1106 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1107 mt76_check_ccmp_pn(struct sk_buff *skb)
1108 {
1109 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1110 struct mt76_wcid *wcid = status->wcid;
1111 struct ieee80211_hdr *hdr;
1112 int security_idx;
1113 int ret;
1114
1115 if (!(status->flag & RX_FLAG_DECRYPTED))
1116 return;
1117
1118 if (status->flag & RX_FLAG_ONLY_MONITOR)
1119 return;
1120
1121 if (!wcid || !wcid->rx_check_pn)
1122 return;
1123
1124 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1125 if (status->flag & RX_FLAG_8023)
1126 goto skip_hdr_check;
1127
1128 hdr = mt76_skb_get_hdr(skb);
1129 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1130 /*
1131 * Validate the first fragment both here and in mac80211
1132 * All further fragments will be validated by mac80211 only.
1133 */
1134 if (ieee80211_is_frag(hdr) &&
1135 !ieee80211_is_first_frag(hdr->frame_control))
1136 return;
1137 }
1138
1139 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1140 *
1141 * the recipient shall maintain a single replay counter for received
1142 * individually addressed robust Management frames that are received
1143 * with the To DS subfield equal to 0, [...]
1144 */
1145 if (ieee80211_is_mgmt(hdr->frame_control) &&
1146 !ieee80211_has_tods(hdr->frame_control))
1147 security_idx = IEEE80211_NUM_TIDS;
1148
1149 skip_hdr_check:
1150 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1151 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1152 sizeof(status->iv));
1153 if (ret <= 0) {
1154 status->flag |= RX_FLAG_ONLY_MONITOR;
1155 return;
1156 }
1157
1158 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1159
1160 if (status->flag & RX_FLAG_IV_STRIPPED)
1161 status->flag |= RX_FLAG_PN_VALIDATED;
1162 }
1163
1164 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1165 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1166 int len)
1167 {
1168 struct mt76_wcid *wcid = status->wcid;
1169 struct ieee80211_rx_status info = {
1170 .enc_flags = status->enc_flags,
1171 .rate_idx = status->rate_idx,
1172 .encoding = status->encoding,
1173 .band = status->band,
1174 .nss = status->nss,
1175 .bw = status->bw,
1176 };
1177 struct ieee80211_sta *sta;
1178 u32 airtime;
1179 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1180
1181 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1182 spin_lock(&dev->cc_lock);
1183 dev->cur_cc_bss_rx += airtime;
1184 spin_unlock(&dev->cc_lock);
1185
1186 if (!wcid || !wcid->sta)
1187 return;
1188
1189 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1190 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1191 }
1192
1193 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1194 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1195 {
1196 struct mt76_wcid *wcid;
1197 int wcid_idx;
1198
1199 if (!dev->rx_ampdu_len)
1200 return;
1201
1202 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1203 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1204 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1205 else
1206 wcid = NULL;
1207 dev->rx_ampdu_status.wcid = wcid;
1208
1209 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1210
1211 dev->rx_ampdu_len = 0;
1212 dev->rx_ampdu_ref = 0;
1213 }
1214
1215 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1216 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1217 {
1218 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1219 struct mt76_wcid *wcid = status->wcid;
1220
1221 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1222 return;
1223
1224 if (!wcid || !wcid->sta) {
1225 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1226
1227 if (status->flag & RX_FLAG_8023)
1228 return;
1229
1230 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1231 return;
1232
1233 wcid = NULL;
1234 }
1235
1236 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1237 status->ampdu_ref != dev->rx_ampdu_ref)
1238 mt76_airtime_flush_ampdu(dev);
1239
1240 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1241 if (!dev->rx_ampdu_len ||
1242 status->ampdu_ref != dev->rx_ampdu_ref) {
1243 dev->rx_ampdu_status = *status;
1244 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1245 dev->rx_ampdu_ref = status->ampdu_ref;
1246 }
1247
1248 dev->rx_ampdu_len += skb->len;
1249 return;
1250 }
1251
1252 mt76_airtime_report(dev, status, skb->len);
1253 }
1254
1255 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1256 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1257 {
1258 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1259 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1260 struct ieee80211_sta *sta;
1261 struct ieee80211_hw *hw;
1262 struct mt76_wcid *wcid = status->wcid;
1263 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1264 bool ps;
1265
1266 hw = mt76_phy_hw(dev, status->phy_idx);
1267 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1268 !(status->flag & RX_FLAG_8023)) {
1269 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1270 if (sta)
1271 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1272 }
1273
1274 mt76_airtime_check(dev, skb);
1275
1276 if (!wcid || !wcid->sta)
1277 return;
1278
1279 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1280
1281 if (status->signal <= 0)
1282 ewma_signal_add(&wcid->rssi, -status->signal);
1283
1284 wcid->inactive_count = 0;
1285
1286 if (status->flag & RX_FLAG_8023)
1287 return;
1288
1289 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1290 return;
1291
1292 if (ieee80211_is_pspoll(hdr->frame_control)) {
1293 ieee80211_sta_pspoll(sta);
1294 return;
1295 }
1296
1297 if (ieee80211_has_morefrags(hdr->frame_control) ||
1298 !(ieee80211_is_mgmt(hdr->frame_control) ||
1299 ieee80211_is_data(hdr->frame_control)))
1300 return;
1301
1302 ps = ieee80211_has_pm(hdr->frame_control);
1303
1304 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1305 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1306 ieee80211_sta_uapsd_trigger(sta, tidno);
1307
1308 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1309 return;
1310
1311 if (ps)
1312 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1313
1314 if (dev->drv->sta_ps)
1315 dev->drv->sta_ps(dev, sta, ps);
1316
1317 if (!ps)
1318 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1319
1320 ieee80211_sta_ps_transition(sta, ps);
1321 }
1322
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1323 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1324 struct napi_struct *napi)
1325 {
1326 struct ieee80211_sta *sta;
1327 struct ieee80211_hw *hw;
1328 struct sk_buff *skb, *tmp;
1329 LIST_HEAD(list);
1330
1331 spin_lock(&dev->rx_lock);
1332 while ((skb = __skb_dequeue(frames)) != NULL) {
1333 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1334
1335 mt76_check_ccmp_pn(skb);
1336 skb_shinfo(skb)->frag_list = NULL;
1337 mt76_rx_convert(dev, skb, &hw, &sta);
1338 ieee80211_rx_list(hw, sta, skb, &list);
1339
1340 /* subsequent amsdu frames */
1341 while (nskb) {
1342 skb = nskb;
1343 nskb = nskb->next;
1344 skb->next = NULL;
1345
1346 mt76_rx_convert(dev, skb, &hw, &sta);
1347 ieee80211_rx_list(hw, sta, skb, &list);
1348 }
1349 }
1350 spin_unlock(&dev->rx_lock);
1351
1352 if (!napi) {
1353 netif_receive_skb_list(&list);
1354 return;
1355 }
1356
1357 list_for_each_entry_safe(skb, tmp, &list, list) {
1358 skb_list_del_init(skb);
1359 napi_gro_receive(napi, skb);
1360 }
1361 }
1362
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1363 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1364 struct napi_struct *napi)
1365 {
1366 struct sk_buff_head frames;
1367 struct sk_buff *skb;
1368
1369 __skb_queue_head_init(&frames);
1370
1371 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1372 mt76_check_sta(dev, skb);
1373 if (mtk_wed_device_active(&dev->mmio.wed))
1374 __skb_queue_tail(&frames, skb);
1375 else
1376 mt76_rx_aggr_reorder(skb, &frames);
1377 }
1378
1379 mt76_rx_complete(dev, &frames, napi);
1380 }
1381 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1382
1383 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1384 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1385 struct ieee80211_sta *sta)
1386 {
1387 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1388 struct mt76_dev *dev = phy->dev;
1389 int ret;
1390 int i;
1391
1392 mutex_lock(&dev->mutex);
1393
1394 ret = dev->drv->sta_add(dev, vif, sta);
1395 if (ret)
1396 goto out;
1397
1398 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1399 struct mt76_txq *mtxq;
1400
1401 if (!sta->txq[i])
1402 continue;
1403
1404 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1405 mtxq->wcid = wcid->idx;
1406 }
1407
1408 ewma_signal_init(&wcid->rssi);
1409 if (phy->band_idx == MT_BAND1)
1410 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1411 wcid->phy_idx = phy->band_idx;
1412 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1413
1414 mt76_packet_id_init(wcid);
1415 out:
1416 mutex_unlock(&dev->mutex);
1417
1418 return ret;
1419 }
1420
__mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1421 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1422 struct ieee80211_sta *sta)
1423 {
1424 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1425 int i, idx = wcid->idx;
1426
1427 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1428 mt76_rx_aggr_stop(dev, wcid, i);
1429
1430 if (dev->drv->sta_remove)
1431 dev->drv->sta_remove(dev, vif, sta);
1432
1433 mt76_packet_id_flush(dev, wcid);
1434
1435 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1436 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1437 }
1438 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1439
1440 static void
mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1441 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1442 struct ieee80211_sta *sta)
1443 {
1444 mutex_lock(&dev->mutex);
1445 __mt76_sta_remove(dev, vif, sta);
1446 mutex_unlock(&dev->mutex);
1447 }
1448
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1449 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1450 struct ieee80211_sta *sta,
1451 enum ieee80211_sta_state old_state,
1452 enum ieee80211_sta_state new_state)
1453 {
1454 struct mt76_phy *phy = hw->priv;
1455 struct mt76_dev *dev = phy->dev;
1456
1457 if (old_state == IEEE80211_STA_NOTEXIST &&
1458 new_state == IEEE80211_STA_NONE)
1459 return mt76_sta_add(phy, vif, sta);
1460
1461 if (old_state == IEEE80211_STA_AUTH &&
1462 new_state == IEEE80211_STA_ASSOC &&
1463 dev->drv->sta_assoc)
1464 dev->drv->sta_assoc(dev, vif, sta);
1465
1466 if (old_state == IEEE80211_STA_NONE &&
1467 new_state == IEEE80211_STA_NOTEXIST)
1468 mt76_sta_remove(dev, vif, sta);
1469
1470 return 0;
1471 }
1472 EXPORT_SYMBOL_GPL(mt76_sta_state);
1473
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1474 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1475 struct ieee80211_sta *sta)
1476 {
1477 struct mt76_phy *phy = hw->priv;
1478 struct mt76_dev *dev = phy->dev;
1479 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1480
1481 mutex_lock(&dev->mutex);
1482 spin_lock_bh(&dev->status_lock);
1483 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1484 spin_unlock_bh(&dev->status_lock);
1485 mutex_unlock(&dev->mutex);
1486 }
1487 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1488
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)1489 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1490 int *dbm)
1491 {
1492 struct mt76_phy *phy = hw->priv;
1493 int n_chains = hweight8(phy->antenna_mask);
1494 int delta = mt76_tx_power_nss_delta(n_chains);
1495
1496 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1497
1498 return 0;
1499 }
1500 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1501
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1502 int mt76_init_sar_power(struct ieee80211_hw *hw,
1503 const struct cfg80211_sar_specs *sar)
1504 {
1505 struct mt76_phy *phy = hw->priv;
1506 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1507 int i;
1508
1509 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1510 return -EINVAL;
1511
1512 for (i = 0; i < sar->num_sub_specs; i++) {
1513 u32 index = sar->sub_specs[i].freq_range_index;
1514 /* SAR specifies power limitaton in 0.25dbm */
1515 s32 power = sar->sub_specs[i].power >> 1;
1516
1517 if (power > 127 || power < -127)
1518 power = 127;
1519
1520 phy->frp[index].range = &capa->freq_ranges[index];
1521 phy->frp[index].power = power;
1522 }
1523
1524 return 0;
1525 }
1526 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1527
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1528 int mt76_get_sar_power(struct mt76_phy *phy,
1529 struct ieee80211_channel *chan,
1530 int power)
1531 {
1532 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1533 int freq, i;
1534
1535 if (!capa || !phy->frp)
1536 return power;
1537
1538 if (power > 127 || power < -127)
1539 power = 127;
1540
1541 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1542 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1543 if (phy->frp[i].range &&
1544 freq >= phy->frp[i].range->start_freq &&
1545 freq < phy->frp[i].range->end_freq) {
1546 power = min_t(int, phy->frp[i].power, power);
1547 break;
1548 }
1549 }
1550
1551 return power;
1552 }
1553 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1554
1555 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1556 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1557 {
1558 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1559 ieee80211_csa_finish(vif);
1560 }
1561
mt76_csa_finish(struct mt76_dev * dev)1562 void mt76_csa_finish(struct mt76_dev *dev)
1563 {
1564 if (!dev->csa_complete)
1565 return;
1566
1567 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1568 IEEE80211_IFACE_ITER_RESUME_ALL,
1569 __mt76_csa_finish, dev);
1570
1571 dev->csa_complete = 0;
1572 }
1573 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1574
1575 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1576 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1577 {
1578 struct mt76_dev *dev = priv;
1579
1580 if (!vif->bss_conf.csa_active)
1581 return;
1582
1583 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1584 }
1585
mt76_csa_check(struct mt76_dev * dev)1586 void mt76_csa_check(struct mt76_dev *dev)
1587 {
1588 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1589 IEEE80211_IFACE_ITER_RESUME_ALL,
1590 __mt76_csa_check, dev);
1591 }
1592 EXPORT_SYMBOL_GPL(mt76_csa_check);
1593
1594 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1595 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1596 {
1597 return 0;
1598 }
1599 EXPORT_SYMBOL_GPL(mt76_set_tim);
1600
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1601 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1602 {
1603 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1604 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1605 u8 *hdr, *pn = status->iv;
1606
1607 __skb_push(skb, 8);
1608 memmove(skb->data, skb->data + 8, hdr_len);
1609 hdr = skb->data + hdr_len;
1610
1611 hdr[0] = pn[5];
1612 hdr[1] = pn[4];
1613 hdr[2] = 0;
1614 hdr[3] = 0x20 | (key_id << 6);
1615 hdr[4] = pn[3];
1616 hdr[5] = pn[2];
1617 hdr[6] = pn[1];
1618 hdr[7] = pn[0];
1619
1620 status->flag &= ~RX_FLAG_IV_STRIPPED;
1621 }
1622 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1623
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1624 int mt76_get_rate(struct mt76_dev *dev,
1625 struct ieee80211_supported_band *sband,
1626 int idx, bool cck)
1627 {
1628 int i, offset = 0, len = sband->n_bitrates;
1629
1630 if (cck) {
1631 if (sband != &dev->phy.sband_2g.sband)
1632 return 0;
1633
1634 idx &= ~BIT(2); /* short preamble */
1635 } else if (sband == &dev->phy.sband_2g.sband) {
1636 offset = 4;
1637 }
1638
1639 for (i = offset; i < len; i++) {
1640 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1641 return i;
1642 }
1643
1644 return 0;
1645 }
1646 EXPORT_SYMBOL_GPL(mt76_get_rate);
1647
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1648 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1649 const u8 *mac)
1650 {
1651 struct mt76_phy *phy = hw->priv;
1652
1653 set_bit(MT76_SCANNING, &phy->state);
1654 }
1655 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1656
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1657 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1658 {
1659 struct mt76_phy *phy = hw->priv;
1660
1661 clear_bit(MT76_SCANNING, &phy->state);
1662 }
1663 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1664
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1665 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1666 {
1667 struct mt76_phy *phy = hw->priv;
1668 struct mt76_dev *dev = phy->dev;
1669
1670 mutex_lock(&dev->mutex);
1671 *tx_ant = phy->antenna_mask;
1672 *rx_ant = phy->antenna_mask;
1673 mutex_unlock(&dev->mutex);
1674
1675 return 0;
1676 }
1677 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1678
1679 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,u32 flags)1680 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1681 int ring_base, u32 flags)
1682 {
1683 struct mt76_queue *hwq;
1684 int err;
1685
1686 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1687 if (!hwq)
1688 return ERR_PTR(-ENOMEM);
1689
1690 hwq->flags = flags;
1691
1692 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1693 if (err < 0)
1694 return ERR_PTR(err);
1695
1696 return hwq;
1697 }
1698 EXPORT_SYMBOL_GPL(mt76_init_queue);
1699
mt76_calculate_default_rate(struct mt76_phy * phy,struct ieee80211_vif * vif,int rateidx)1700 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1701 struct ieee80211_vif *vif, int rateidx)
1702 {
1703 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1704 struct cfg80211_chan_def *chandef = mvif->ctx ?
1705 &mvif->ctx->def :
1706 &phy->chandef;
1707 int offset = 0;
1708
1709 if (chandef->chan->band != NL80211_BAND_2GHZ)
1710 offset = 4;
1711
1712 /* pick the lowest rate for hidden nodes */
1713 if (rateidx < 0)
1714 rateidx = 0;
1715
1716 rateidx += offset;
1717 if (rateidx >= ARRAY_SIZE(mt76_rates))
1718 rateidx = offset;
1719
1720 return mt76_rates[rateidx].hw_value;
1721 }
1722 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1723
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1724 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1725 struct mt76_sta_stats *stats, bool eht)
1726 {
1727 int i, ei = wi->initial_stat_idx;
1728 u64 *data = wi->data;
1729
1730 wi->sta_count++;
1731
1732 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1733 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1734 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1735 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1736 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1737 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1738 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1739 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1740 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1741 if (eht) {
1742 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1743 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1744 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1745 }
1746
1747 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1748 data[ei++] += stats->tx_bw[i];
1749
1750 for (i = 0; i < (eht ? 14 : 12); i++)
1751 data[ei++] += stats->tx_mcs[i];
1752
1753 for (i = 0; i < 4; i++)
1754 data[ei++] += stats->tx_nss[i];
1755
1756 wi->worker_stat_count = ei - wi->initial_stat_idx;
1757 }
1758 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1759
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1760 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1761 {
1762 #ifdef CONFIG_PAGE_POOL_STATS
1763 struct page_pool_stats stats = {};
1764 int i;
1765
1766 mt76_for_each_q_rx(dev, i)
1767 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1768
1769 page_pool_ethtool_stats_get(data, &stats);
1770 *index += page_pool_ethtool_stats_get_count();
1771 #endif
1772 }
1773 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1774
mt76_phy_dfs_state(struct mt76_phy * phy)1775 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1776 {
1777 struct ieee80211_hw *hw = phy->hw;
1778 struct mt76_dev *dev = phy->dev;
1779
1780 if (dev->region == NL80211_DFS_UNSET ||
1781 test_bit(MT76_SCANNING, &phy->state))
1782 return MT_DFS_STATE_DISABLED;
1783
1784 if (!hw->conf.radar_enabled) {
1785 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1786 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1787 return MT_DFS_STATE_ACTIVE;
1788
1789 return MT_DFS_STATE_DISABLED;
1790 }
1791
1792 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1793 return MT_DFS_STATE_CAC;
1794
1795 return MT_DFS_STATE_ACTIVE;
1796 }
1797 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1798