1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7921.h"
8 #include "../dma.h"
9 #include "mac.h"
10 #include "mcu.h"
11 
mt7921_rx_get_wcid(struct mt7921_dev * dev,u16 idx,bool unicast)12 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
13 					    u16 idx, bool unicast)
14 {
15 	struct mt7921_sta *sta;
16 	struct mt76_wcid *wcid;
17 
18 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
19 		return NULL;
20 
21 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
22 	if (unicast || !wcid)
23 		return wcid;
24 
25 	if (!wcid->sta)
26 		return NULL;
27 
28 	sta = container_of(wcid, struct mt7921_sta, wcid);
29 	if (!sta->vif)
30 		return NULL;
31 
32 	return &sta->vif->sta.wcid;
33 }
34 
mt7921_sta_ps(struct mt76_dev * mdev,struct ieee80211_sta * sta,bool ps)35 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
36 {
37 }
38 EXPORT_SYMBOL_GPL(mt7921_sta_ps);
39 
mt7921_mac_wtbl_update(struct mt7921_dev * dev,int idx,u32 mask)40 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
41 {
42 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
43 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
44 
45 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
46 			 0, 5000);
47 }
48 
mt7921_mac_sta_poll(struct mt7921_dev * dev)49 void mt7921_mac_sta_poll(struct mt7921_dev *dev)
50 {
51 	static const u8 ac_to_tid[] = {
52 		[IEEE80211_AC_BE] = 0,
53 		[IEEE80211_AC_BK] = 1,
54 		[IEEE80211_AC_VI] = 4,
55 		[IEEE80211_AC_VO] = 6
56 	};
57 	struct ieee80211_sta *sta;
58 	struct mt7921_sta *msta;
59 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
60 	LIST_HEAD(sta_poll_list);
61 	struct rate_info *rate;
62 	int i;
63 
64 	spin_lock_bh(&dev->sta_poll_lock);
65 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
66 	spin_unlock_bh(&dev->sta_poll_lock);
67 
68 	while (true) {
69 		bool clear = false;
70 		u32 addr, val;
71 		u16 idx;
72 		u8 bw;
73 
74 		spin_lock_bh(&dev->sta_poll_lock);
75 		if (list_empty(&sta_poll_list)) {
76 			spin_unlock_bh(&dev->sta_poll_lock);
77 			break;
78 		}
79 		msta = list_first_entry(&sta_poll_list,
80 					struct mt7921_sta, poll_list);
81 		list_del_init(&msta->poll_list);
82 		spin_unlock_bh(&dev->sta_poll_lock);
83 
84 		idx = msta->wcid.idx;
85 		addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET);
86 
87 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
88 			u32 tx_last = msta->airtime_ac[i];
89 			u32 rx_last = msta->airtime_ac[i + 4];
90 
91 			msta->airtime_ac[i] = mt76_rr(dev, addr);
92 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
93 
94 			tx_time[i] = msta->airtime_ac[i] - tx_last;
95 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
96 
97 			if ((tx_last | rx_last) & BIT(30))
98 				clear = true;
99 
100 			addr += 8;
101 		}
102 
103 		if (clear) {
104 			mt7921_mac_wtbl_update(dev, idx,
105 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
106 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
107 		}
108 
109 		if (!msta->wcid.sta)
110 			continue;
111 
112 		sta = container_of((void *)msta, struct ieee80211_sta,
113 				   drv_priv);
114 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
115 			u8 q = mt76_connac_lmac_mapping(i);
116 			u32 tx_cur = tx_time[q];
117 			u32 rx_cur = rx_time[q];
118 			u8 tid = ac_to_tid[i];
119 
120 			if (!tx_cur && !rx_cur)
121 				continue;
122 
123 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
124 						       rx_cur);
125 		}
126 
127 		/* We don't support reading GI info from txs packets.
128 		 * For accurate tx status reporting and AQL improvement,
129 		 * we need to make sure that flags match so polling GI
130 		 * from per-sta counters directly.
131 		 */
132 		rate = &msta->wcid.rate;
133 		addr = mt7921_mac_wtbl_lmac_addr(idx,
134 						 MT_WTBL_TXRX_CAP_RATE_OFFSET);
135 		val = mt76_rr(dev, addr);
136 
137 		switch (rate->bw) {
138 		case RATE_INFO_BW_160:
139 			bw = IEEE80211_STA_RX_BW_160;
140 			break;
141 		case RATE_INFO_BW_80:
142 			bw = IEEE80211_STA_RX_BW_80;
143 			break;
144 		case RATE_INFO_BW_40:
145 			bw = IEEE80211_STA_RX_BW_40;
146 			break;
147 		default:
148 			bw = IEEE80211_STA_RX_BW_20;
149 			break;
150 		}
151 
152 		if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
153 			u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
154 
155 			rate->he_gi = (val & (0x3 << offs)) >> offs;
156 		} else if (rate->flags &
157 			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
158 			if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
159 				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
160 			else
161 				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
162 		}
163 	}
164 }
165 EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll);
166 
167 static void
mt7921_get_status_freq_info(struct mt7921_dev * dev,struct mt76_phy * mphy,struct mt76_rx_status * status,u8 chfreq)168 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
169 			    struct mt76_rx_status *status, u8 chfreq)
170 {
171 	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
172 	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
173 	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
174 		status->freq = mphy->chandef.chan->center_freq;
175 		status->band = mphy->chandef.chan->band;
176 		return;
177 	}
178 
179 	if (chfreq > 180) {
180 		status->band = NL80211_BAND_6GHZ;
181 		chfreq = (chfreq - 181) * 4 + 1;
182 	} else if (chfreq > 14) {
183 		status->band = NL80211_BAND_5GHZ;
184 	} else {
185 		status->band = NL80211_BAND_2GHZ;
186 	}
187 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
188 }
189 
190 static void
mt7921_mac_rssi_iter(void * priv,u8 * mac,struct ieee80211_vif * vif)191 mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
192 {
193 	struct sk_buff *skb = priv;
194 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
195 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
196 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
197 
198 	if (status->signal > 0)
199 		return;
200 
201 	if (!ether_addr_equal(vif->addr, hdr->addr1))
202 		return;
203 
204 	ewma_rssi_add(&mvif->rssi, -status->signal);
205 }
206 
207 static void
mt7921_mac_assoc_rssi(struct mt7921_dev * dev,struct sk_buff * skb)208 mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
209 {
210 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
211 
212 	if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
213 	    !ieee80211_is_auth(hdr->frame_control))
214 		return;
215 
216 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
217 		IEEE80211_IFACE_ITER_RESUME_ALL,
218 		mt7921_mac_rssi_iter, skb);
219 }
220 
221 static int
mt7921_mac_fill_rx(struct mt7921_dev * dev,struct sk_buff * skb)222 mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
223 {
224 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
225 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
226 	bool hdr_trans, unicast, insert_ccmp_hdr = false;
227 	u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
228 	u16 hdr_gap;
229 	__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
230 	struct mt76_phy *mphy = &dev->mt76.phy;
231 	struct mt7921_phy *phy = &dev->phy;
232 	struct ieee80211_supported_band *sband;
233 	u32 csum_status = *(u32 *)skb->cb;
234 	u32 rxd0 = le32_to_cpu(rxd[0]);
235 	u32 rxd1 = le32_to_cpu(rxd[1]);
236 	u32 rxd2 = le32_to_cpu(rxd[2]);
237 	u32 rxd3 = le32_to_cpu(rxd[3]);
238 	u32 rxd4 = le32_to_cpu(rxd[4]);
239 	struct mt7921_sta *msta = NULL;
240 	u16 seq_ctrl = 0;
241 	__le16 fc = 0;
242 	u8 mode = 0;
243 	int i, idx;
244 
245 	memset(status, 0, sizeof(*status));
246 
247 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
248 		return -EINVAL;
249 
250 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
251 		return -EINVAL;
252 
253 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
254 		return -EINVAL;
255 
256 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
257 	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
258 		return -EINVAL;
259 
260 	/* ICV error or CCMP/BIP/WPI MIC error */
261 	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
262 		status->flag |= RX_FLAG_ONLY_MONITOR;
263 
264 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
265 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
266 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
267 	status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
268 
269 	if (status->wcid) {
270 		msta = container_of(status->wcid, struct mt7921_sta, wcid);
271 		spin_lock_bh(&dev->sta_poll_lock);
272 		if (list_empty(&msta->poll_list))
273 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
274 		spin_unlock_bh(&dev->sta_poll_lock);
275 	}
276 
277 	mt7921_get_status_freq_info(dev, mphy, status, chfreq);
278 
279 	switch (status->band) {
280 	case NL80211_BAND_5GHZ:
281 		sband = &mphy->sband_5g.sband;
282 		break;
283 	case NL80211_BAND_6GHZ:
284 		sband = &mphy->sband_6g.sband;
285 		break;
286 	default:
287 		sband = &mphy->sband_2g.sband;
288 		break;
289 	}
290 
291 	if (!sband->channels)
292 		return -EINVAL;
293 
294 	if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
295 	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
296 		skb->ip_summed = CHECKSUM_UNNECESSARY;
297 
298 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
299 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
300 
301 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
302 		status->flag |= RX_FLAG_MMIC_ERROR;
303 
304 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
305 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
306 		status->flag |= RX_FLAG_DECRYPTED;
307 		status->flag |= RX_FLAG_IV_STRIPPED;
308 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
309 	}
310 
311 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
312 
313 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
314 		return -EINVAL;
315 
316 	rxd += 6;
317 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
318 		u32 v0 = le32_to_cpu(rxd[0]);
319 		u32 v2 = le32_to_cpu(rxd[2]);
320 
321 		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
322 		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
323 		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
324 
325 		rxd += 4;
326 		if ((u8 *)rxd - skb->data >= skb->len)
327 			return -EINVAL;
328 	}
329 
330 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
331 		u8 *data = (u8 *)rxd;
332 
333 		if (status->flag & RX_FLAG_DECRYPTED) {
334 			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
335 			case MT_CIPHER_AES_CCMP:
336 			case MT_CIPHER_CCMP_CCX:
337 			case MT_CIPHER_CCMP_256:
338 				insert_ccmp_hdr =
339 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
340 				fallthrough;
341 			case MT_CIPHER_TKIP:
342 			case MT_CIPHER_TKIP_NO_MIC:
343 			case MT_CIPHER_GCMP:
344 			case MT_CIPHER_GCMP_256:
345 				status->iv[0] = data[5];
346 				status->iv[1] = data[4];
347 				status->iv[2] = data[3];
348 				status->iv[3] = data[2];
349 				status->iv[4] = data[1];
350 				status->iv[5] = data[0];
351 				break;
352 			default:
353 				break;
354 			}
355 		}
356 		rxd += 4;
357 		if ((u8 *)rxd - skb->data >= skb->len)
358 			return -EINVAL;
359 	}
360 
361 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
362 		status->timestamp = le32_to_cpu(rxd[0]);
363 		status->flag |= RX_FLAG_MACTIME_START;
364 
365 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
366 			status->flag |= RX_FLAG_AMPDU_DETAILS;
367 
368 			/* all subframes of an A-MPDU have the same timestamp */
369 			if (phy->rx_ampdu_ts != status->timestamp) {
370 				if (!++phy->ampdu_ref)
371 					phy->ampdu_ref++;
372 			}
373 			phy->rx_ampdu_ts = status->timestamp;
374 
375 			status->ampdu_ref = phy->ampdu_ref;
376 		}
377 
378 		rxd += 2;
379 		if ((u8 *)rxd - skb->data >= skb->len)
380 			return -EINVAL;
381 	}
382 
383 	/* RXD Group 3 - P-RXV */
384 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
385 		u32 v0, v1;
386 		int ret;
387 
388 		rxv = rxd;
389 		rxd += 2;
390 		if ((u8 *)rxd - skb->data >= skb->len)
391 			return -EINVAL;
392 
393 		v0 = le32_to_cpu(rxv[0]);
394 		v1 = le32_to_cpu(rxv[1]);
395 
396 		if (v0 & MT_PRXV_HT_AD_CODE)
397 			status->enc_flags |= RX_ENC_FLAG_LDPC;
398 
399 		ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
400 						    rxv, &mode);
401 		if (ret < 0)
402 			return ret;
403 
404 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
405 			rxd += 6;
406 			if ((u8 *)rxd - skb->data >= skb->len)
407 				return -EINVAL;
408 
409 			rxv = rxd;
410 			/* Monitor mode would use RCPI described in GROUP 5
411 			 * instead.
412 			 */
413 			v1 = le32_to_cpu(rxv[0]);
414 
415 			rxd += 12;
416 			if ((u8 *)rxd - skb->data >= skb->len)
417 				return -EINVAL;
418 		}
419 
420 		status->chains = mphy->antenna_mask;
421 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
422 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
423 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
424 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
425 		status->signal = -128;
426 		for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
427 			if (!(status->chains & BIT(i)) ||
428 			    status->chain_signal[i] >= 0)
429 				continue;
430 
431 			status->signal = max(status->signal,
432 					     status->chain_signal[i]);
433 		}
434 	}
435 
436 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
437 	status->amsdu = !!amsdu_info;
438 	if (status->amsdu) {
439 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
440 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
441 	}
442 
443 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
444 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
445 		struct ieee80211_vif *vif;
446 		int err;
447 
448 		if (!msta || !msta->vif)
449 			return -EINVAL;
450 
451 		vif = container_of((void *)msta->vif, struct ieee80211_vif,
452 				   drv_priv);
453 		err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
454 		if (err)
455 			return err;
456 
457 		hdr_trans = false;
458 	} else {
459 		skb_pull(skb, hdr_gap);
460 		if (!hdr_trans && status->amsdu) {
461 			memmove(skb->data + 2, skb->data,
462 				ieee80211_get_hdrlen_from_skb(skb));
463 			skb_pull(skb, 2);
464 		}
465 	}
466 
467 	if (!hdr_trans) {
468 		struct ieee80211_hdr *hdr;
469 
470 		if (insert_ccmp_hdr) {
471 			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
472 
473 			mt76_insert_ccmp_hdr(skb, key_id);
474 		}
475 
476 		hdr = mt76_skb_get_hdr(skb);
477 		fc = hdr->frame_control;
478 		if (ieee80211_is_data_qos(fc)) {
479 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
480 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
481 		}
482 	} else {
483 		status->flag |= RX_FLAG_8023;
484 	}
485 
486 	mt7921_mac_assoc_rssi(dev, skb);
487 
488 	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
489 		mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
490 
491 	if (!status->wcid || !ieee80211_is_data_qos(fc))
492 		return 0;
493 
494 	status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
495 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
496 	status->qos_ctl = qos_ctl;
497 
498 	return 0;
499 }
500 
mt7921_tx_check_aggr(struct ieee80211_sta * sta,__le32 * txwi)501 static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
502 {
503 	struct mt7921_sta *msta;
504 	u16 fc, tid;
505 	u32 val;
506 
507 	if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
508 		return;
509 
510 	tid = le32_get_bits(txwi[1], MT_TXD1_TID);
511 	if (tid >= 6) /* skip VO queue */
512 		return;
513 
514 	val = le32_to_cpu(txwi[2]);
515 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
516 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
517 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
518 		return;
519 
520 	msta = (struct mt7921_sta *)sta->drv_priv;
521 	if (!test_and_set_bit(tid, &msta->ampdu_state))
522 		ieee80211_start_tx_ba_session(sta, tid, 0);
523 }
524 
mt7921_mac_add_txs(struct mt7921_dev * dev,void * data)525 void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
526 {
527 	struct mt7921_sta *msta = NULL;
528 	struct mt76_wcid *wcid;
529 	__le32 *txs_data = data;
530 	u16 wcidx;
531 	u8 pid;
532 
533 	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
534 		return;
535 
536 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
537 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
538 
539 	if (pid < MT_PACKET_ID_FIRST)
540 		return;
541 
542 	if (wcidx >= MT7921_WTBL_SIZE)
543 		return;
544 
545 	rcu_read_lock();
546 
547 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
548 	if (!wcid)
549 		goto out;
550 
551 	msta = container_of(wcid, struct mt7921_sta, wcid);
552 
553 	mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
554 	if (!wcid->sta)
555 		goto out;
556 
557 	spin_lock_bh(&dev->sta_poll_lock);
558 	if (list_empty(&msta->poll_list))
559 		list_add_tail(&msta->poll_list, &dev->sta_poll_list);
560 	spin_unlock_bh(&dev->sta_poll_lock);
561 
562 out:
563 	rcu_read_unlock();
564 }
565 
mt7921_txwi_free(struct mt7921_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,bool clear_status,struct list_head * free_list)566 void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
567 		      struct ieee80211_sta *sta, bool clear_status,
568 		      struct list_head *free_list)
569 {
570 	struct mt76_dev *mdev = &dev->mt76;
571 	__le32 *txwi;
572 	u16 wcid_idx;
573 
574 	mt76_connac_txp_skb_unmap(mdev, t);
575 	if (!t->skb)
576 		goto out;
577 
578 	txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
579 	if (sta) {
580 		struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
581 
582 		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
583 			mt7921_tx_check_aggr(sta, txwi);
584 
585 		wcid_idx = wcid->idx;
586 	} else {
587 		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
588 	}
589 
590 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
591 out:
592 	t->skb = NULL;
593 	mt76_put_txwi(mdev, t);
594 }
595 EXPORT_SYMBOL_GPL(mt7921_txwi_free);
596 
mt7921_mac_tx_free(struct mt7921_dev * dev,void * data,int len)597 static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
598 {
599 	struct mt76_connac_tx_free *free = data;
600 	__le32 *tx_info = (__le32 *)(data + sizeof(*free));
601 	struct mt76_dev *mdev = &dev->mt76;
602 	struct mt76_txwi_cache *txwi;
603 	struct ieee80211_sta *sta = NULL;
604 	struct sk_buff *skb, *tmp;
605 	void *end = data + len;
606 	LIST_HEAD(free_list);
607 	bool wake = false;
608 	u8 i, count;
609 
610 	/* clean DMA queues and unmap buffers first */
611 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
612 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
613 
614 	count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
615 	if (WARN_ON_ONCE((void *)&tx_info[count] > end))
616 		return;
617 
618 	for (i = 0; i < count; i++) {
619 		u32 msdu, info = le32_to_cpu(tx_info[i]);
620 		u8 stat;
621 
622 		/* 1'b1: new wcid pair.
623 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
624 		 */
625 		if (info & MT_TX_FREE_PAIR) {
626 			struct mt7921_sta *msta;
627 			struct mt76_wcid *wcid;
628 			u16 idx;
629 
630 			count++;
631 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
632 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
633 			sta = wcid_to_sta(wcid);
634 			if (!sta)
635 				continue;
636 
637 			msta = container_of(wcid, struct mt7921_sta, wcid);
638 			spin_lock_bh(&dev->sta_poll_lock);
639 			if (list_empty(&msta->poll_list))
640 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
641 			spin_unlock_bh(&dev->sta_poll_lock);
642 			continue;
643 		}
644 
645 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
646 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
647 
648 		txwi = mt76_token_release(mdev, msdu, &wake);
649 		if (!txwi)
650 			continue;
651 
652 		mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
653 	}
654 
655 	if (wake)
656 		mt76_set_tx_blocked(&dev->mt76, false);
657 
658 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
659 		skb_list_del_init(skb);
660 		napi_consume_skb(skb, 1);
661 	}
662 
663 	rcu_read_lock();
664 	mt7921_mac_sta_poll(dev);
665 	rcu_read_unlock();
666 
667 	mt76_worker_schedule(&dev->mt76.tx_worker);
668 }
669 
mt7921_rx_check(struct mt76_dev * mdev,void * data,int len)670 bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
671 {
672 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
673 	__le32 *rxd = (__le32 *)data;
674 	__le32 *end = (__le32 *)&rxd[len / 4];
675 	enum rx_pkt_type type;
676 
677 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
678 
679 	switch (type) {
680 	case PKT_TYPE_TXRX_NOTIFY:
681 		/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
682 		mt7921_mac_tx_free(dev, data, len); /* mmio */
683 		return false;
684 	case PKT_TYPE_TXS:
685 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
686 			mt7921_mac_add_txs(dev, rxd);
687 		return false;
688 	default:
689 		return true;
690 	}
691 }
692 EXPORT_SYMBOL_GPL(mt7921_rx_check);
693 
mt7921_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb)694 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
695 			 struct sk_buff *skb)
696 {
697 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
698 	__le32 *rxd = (__le32 *)skb->data;
699 	__le32 *end = (__le32 *)&skb->data[skb->len];
700 	enum rx_pkt_type type;
701 	u16 flag;
702 
703 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
704 	flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
705 
706 	if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
707 		type = PKT_TYPE_NORMAL_MCU;
708 
709 	switch (type) {
710 	case PKT_TYPE_TXRX_NOTIFY:
711 		/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
712 		mt7921_mac_tx_free(dev, skb->data, skb->len);
713 		napi_consume_skb(skb, 1);
714 		break;
715 	case PKT_TYPE_RX_EVENT:
716 		mt7921_mcu_rx_event(dev, skb);
717 		break;
718 	case PKT_TYPE_TXS:
719 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
720 			mt7921_mac_add_txs(dev, rxd);
721 		dev_kfree_skb(skb);
722 		break;
723 	case PKT_TYPE_NORMAL_MCU:
724 	case PKT_TYPE_NORMAL:
725 		if (!mt7921_mac_fill_rx(dev, skb)) {
726 			mt76_rx(&dev->mt76, q, skb);
727 			return;
728 		}
729 		fallthrough;
730 	default:
731 		dev_kfree_skb(skb);
732 		break;
733 	}
734 }
735 EXPORT_SYMBOL_GPL(mt7921_queue_rx_skb);
736 
mt7921_mac_reset_counters(struct mt7921_phy * phy)737 void mt7921_mac_reset_counters(struct mt7921_phy *phy)
738 {
739 	struct mt7921_dev *dev = phy->dev;
740 	int i;
741 
742 	for (i = 0; i < 4; i++) {
743 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
744 		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
745 	}
746 
747 	dev->mt76.phy.survey_time = ktime_get_boottime();
748 	memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
749 
750 	/* reset airtime counters */
751 	mt76_rr(dev, MT_MIB_SDR9(0));
752 	mt76_rr(dev, MT_MIB_SDR36(0));
753 	mt76_rr(dev, MT_MIB_SDR37(0));
754 
755 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
756 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
757 }
758 
mt7921_mac_set_timing(struct mt7921_phy * phy)759 void mt7921_mac_set_timing(struct mt7921_phy *phy)
760 {
761 	s16 coverage_class = phy->coverage_class;
762 	struct mt7921_dev *dev = phy->dev;
763 	u32 val, reg_offset;
764 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
765 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
766 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
767 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
768 	bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
769 	int sifs = is_2ghz ? 10 : 16, offset;
770 
771 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
772 		return;
773 
774 	mt76_set(dev, MT_ARB_SCR(0),
775 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
776 	udelay(1);
777 
778 	offset = 3 * coverage_class;
779 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
780 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
781 
782 	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
783 	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
784 	mt76_wr(dev, MT_TMAC_ICR0(0),
785 		FIELD_PREP(MT_IFS_EIFS, 360) |
786 		FIELD_PREP(MT_IFS_RIFS, 2) |
787 		FIELD_PREP(MT_IFS_SIFS, sifs) |
788 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
789 
790 	if (phy->slottime < 20 || !is_2ghz)
791 		val = MT7921_CFEND_RATE_DEFAULT;
792 	else
793 		val = MT7921_CFEND_RATE_11B;
794 
795 	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
796 	mt76_clear(dev, MT_ARB_SCR(0),
797 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
798 }
799 
800 static u8
mt7921_phy_get_nf(struct mt7921_phy * phy,int idx)801 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
802 {
803 	return 0;
804 }
805 
806 static void
mt7921_phy_update_channel(struct mt76_phy * mphy,int idx)807 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
808 {
809 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
810 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
811 	struct mt76_channel_state *state;
812 	u64 busy_time, tx_time, rx_time, obss_time;
813 	int nf;
814 
815 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
816 				   MT_MIB_SDR9_BUSY_MASK);
817 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
818 				 MT_MIB_SDR36_TXTIME_MASK);
819 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
820 				 MT_MIB_SDR37_RXTIME_MASK);
821 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
822 				   MT_MIB_OBSSTIME_MASK);
823 
824 	nf = mt7921_phy_get_nf(phy, idx);
825 	if (!phy->noise)
826 		phy->noise = nf << 4;
827 	else if (nf)
828 		phy->noise += nf - (phy->noise >> 4);
829 
830 	state = mphy->chan_state;
831 	state->cc_busy += busy_time;
832 	state->cc_tx += tx_time;
833 	state->cc_rx += rx_time + obss_time;
834 	state->cc_bss_rx += rx_time;
835 	state->noise = -(phy->noise >> 4);
836 }
837 
mt7921_update_channel(struct mt76_phy * mphy)838 void mt7921_update_channel(struct mt76_phy *mphy)
839 {
840 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
841 
842 	if (mt76_connac_pm_wake(mphy, &dev->pm))
843 		return;
844 
845 	mt7921_phy_update_channel(mphy, 0);
846 	/* reset obss airtime */
847 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
848 
849 	mt76_connac_power_save_sched(mphy, &dev->pm);
850 }
851 EXPORT_SYMBOL_GPL(mt7921_update_channel);
852 
853 static void
mt7921_vif_connect_iter(void * priv,u8 * mac,struct ieee80211_vif * vif)854 mt7921_vif_connect_iter(void *priv, u8 *mac,
855 			struct ieee80211_vif *vif)
856 {
857 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
858 	struct mt7921_dev *dev = mvif->phy->dev;
859 	struct ieee80211_hw *hw = mt76_hw(dev);
860 
861 	if (vif->type == NL80211_IFTYPE_STATION)
862 		ieee80211_disconnect(vif, true);
863 
864 	mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
865 	mt7921_mcu_set_tx(dev, vif);
866 
867 	if (vif->type == NL80211_IFTYPE_AP) {
868 		mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
869 					    true);
870 		mt7921_mcu_sta_update(dev, NULL, vif, true,
871 				      MT76_STA_INFO_STATE_NONE);
872 		mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
873 	}
874 }
875 
876 /* system error recovery */
mt7921_mac_reset_work(struct work_struct * work)877 void mt7921_mac_reset_work(struct work_struct *work)
878 {
879 	struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
880 					      reset_work);
881 	struct ieee80211_hw *hw = mt76_hw(dev);
882 	struct mt76_connac_pm *pm = &dev->pm;
883 	int i, ret;
884 
885 	dev_dbg(dev->mt76.dev, "chip reset\n");
886 	dev->hw_full_reset = true;
887 	ieee80211_stop_queues(hw);
888 
889 	cancel_delayed_work_sync(&dev->mphy.mac_work);
890 	cancel_delayed_work_sync(&pm->ps_work);
891 	cancel_work_sync(&pm->wake_work);
892 
893 	for (i = 0; i < 10; i++) {
894 		mutex_lock(&dev->mt76.mutex);
895 		ret = mt7921_dev_reset(dev);
896 		mutex_unlock(&dev->mt76.mutex);
897 
898 		if (!ret)
899 			break;
900 	}
901 
902 	if (i == 10)
903 		dev_err(dev->mt76.dev, "chip reset failed\n");
904 
905 	if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
906 		struct cfg80211_scan_info info = {
907 			.aborted = true,
908 		};
909 
910 		ieee80211_scan_completed(dev->mphy.hw, &info);
911 	}
912 
913 	dev->hw_full_reset = false;
914 	pm->suspended = false;
915 	ieee80211_wake_queues(hw);
916 	ieee80211_iterate_active_interfaces(hw,
917 					    IEEE80211_IFACE_ITER_RESUME_ALL,
918 					    mt7921_vif_connect_iter, NULL);
919 	mt76_connac_power_save_sched(&dev->mt76.phy, pm);
920 }
921 
mt7921_reset(struct mt76_dev * mdev)922 void mt7921_reset(struct mt76_dev *mdev)
923 {
924 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
925 	struct mt76_connac_pm *pm = &dev->pm;
926 
927 	if (!dev->hw_init_done)
928 		return;
929 
930 	if (dev->hw_full_reset)
931 		return;
932 
933 	if (pm->suspended)
934 		return;
935 
936 	queue_work(dev->mt76.wq, &dev->reset_work);
937 }
938 EXPORT_SYMBOL_GPL(mt7921_reset);
939 
mt7921_mac_update_mib_stats(struct mt7921_phy * phy)940 void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
941 {
942 	struct mt7921_dev *dev = phy->dev;
943 	struct mib_stats *mib = &phy->mib;
944 	int i, aggr0 = 0, aggr1;
945 	u32 val;
946 
947 	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
948 					   MT_MIB_SDR3_FCS_ERR_MASK);
949 	mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
950 					    MT_MIB_ACK_FAIL_COUNT_MASK);
951 	mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
952 					   MT_MIB_BA_FAIL_COUNT_MASK);
953 	mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
954 				       MT_MIB_RTS_COUNT_MASK);
955 	mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
956 					       MT_MIB_RTS_FAIL_COUNT_MASK);
957 
958 	mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0));
959 	mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0));
960 	mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0));
961 
962 	val = mt76_rr(dev, MT_MIB_SDR32(0));
963 	mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val);
964 	mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val);
965 
966 	val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0));
967 	mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val);
968 	mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val);
969 
970 	val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0));
971 	mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val);
972 	mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val);
973 	mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val);
974 	mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val);
975 
976 	mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0));
977 	mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0));
978 	mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0));
979 	mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0));
980 
981 	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
982 		val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
983 		mib->tx_amsdu[i] += val;
984 		mib->tx_amsdu_cnt += val;
985 	}
986 
987 	for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
988 		u32 val2;
989 
990 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
991 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
992 
993 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
994 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
995 		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
996 		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
997 	}
998 }
999 
mt7921_mac_work(struct work_struct * work)1000 void mt7921_mac_work(struct work_struct *work)
1001 {
1002 	struct mt7921_phy *phy;
1003 	struct mt76_phy *mphy;
1004 
1005 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1006 					       mac_work.work);
1007 	phy = mphy->priv;
1008 
1009 	mt7921_mutex_acquire(phy->dev);
1010 
1011 	mt76_update_survey(mphy);
1012 	if (++mphy->mac_work_count == 2) {
1013 		mphy->mac_work_count = 0;
1014 
1015 		mt7921_mac_update_mib_stats(phy);
1016 	}
1017 
1018 	mt7921_mutex_release(phy->dev);
1019 
1020 	mt76_tx_status_check(mphy->dev, false);
1021 	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
1022 				     MT7921_WATCHDOG_TIME);
1023 }
1024 
mt7921_pm_wake_work(struct work_struct * work)1025 void mt7921_pm_wake_work(struct work_struct *work)
1026 {
1027 	struct mt7921_dev *dev;
1028 	struct mt76_phy *mphy;
1029 
1030 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1031 						pm.wake_work);
1032 	mphy = dev->phy.mt76;
1033 
1034 	if (!mt7921_mcu_drv_pmctrl(dev)) {
1035 		struct mt76_dev *mdev = &dev->mt76;
1036 		int i;
1037 
1038 		if (mt76_is_sdio(mdev)) {
1039 			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1040 			mt76_worker_schedule(&mdev->sdio.txrx_worker);
1041 		} else {
1042 			local_bh_disable();
1043 			mt76_for_each_q_rx(mdev, i)
1044 				napi_schedule(&mdev->napi[i]);
1045 			local_bh_enable();
1046 			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1047 			mt76_connac_tx_cleanup(mdev);
1048 		}
1049 		if (test_bit(MT76_STATE_RUNNING, &mphy->state))
1050 			ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1051 						     MT7921_WATCHDOG_TIME);
1052 	}
1053 
1054 	ieee80211_wake_queues(mphy->hw);
1055 	wake_up(&dev->pm.wait);
1056 }
1057 
mt7921_pm_power_save_work(struct work_struct * work)1058 void mt7921_pm_power_save_work(struct work_struct *work)
1059 {
1060 	struct mt7921_dev *dev;
1061 	unsigned long delta;
1062 	struct mt76_phy *mphy;
1063 
1064 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1065 						pm.ps_work.work);
1066 	mphy = dev->phy.mt76;
1067 
1068 	delta = dev->pm.idle_timeout;
1069 	if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
1070 	    test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
1071 	    dev->fw_assert)
1072 		goto out;
1073 
1074 	if (mutex_is_locked(&dev->mt76.mutex))
1075 		/* if mt76 mutex is held we should not put the device
1076 		 * to sleep since we are currently accessing device
1077 		 * register map. We need to wait for the next power_save
1078 		 * trigger.
1079 		 */
1080 		goto out;
1081 
1082 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1083 		delta = dev->pm.last_activity + delta - jiffies;
1084 		goto out;
1085 	}
1086 
1087 	if (!mt7921_mcu_fw_pmctrl(dev)) {
1088 		cancel_delayed_work_sync(&mphy->mac_work);
1089 		return;
1090 	}
1091 out:
1092 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1093 }
1094 
mt7921_coredump_work(struct work_struct * work)1095 void mt7921_coredump_work(struct work_struct *work)
1096 {
1097 	struct mt7921_dev *dev;
1098 	char *dump, *data;
1099 
1100 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1101 						coredump.work.work);
1102 
1103 	if (time_is_after_jiffies(dev->coredump.last_activity +
1104 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1105 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1106 				   MT76_CONNAC_COREDUMP_TIMEOUT);
1107 		return;
1108 	}
1109 
1110 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1111 	data = dump;
1112 
1113 	while (true) {
1114 		struct sk_buff *skb;
1115 
1116 		spin_lock_bh(&dev->mt76.lock);
1117 		skb = __skb_dequeue(&dev->coredump.msg_list);
1118 		spin_unlock_bh(&dev->mt76.lock);
1119 
1120 		if (!skb)
1121 			break;
1122 
1123 		skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
1124 		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1125 			dev_kfree_skb(skb);
1126 			continue;
1127 		}
1128 
1129 		memcpy(data, skb->data, skb->len);
1130 		data += skb->len;
1131 
1132 		dev_kfree_skb(skb);
1133 	}
1134 
1135 	if (dump)
1136 		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1137 			      GFP_KERNEL);
1138 
1139 	mt7921_reset(&dev->mt76);
1140 }
1141 
1142 /* usb_sdio */
1143 static void
mt7921_usb_sdio_write_txwi(struct mt7921_dev * dev,struct mt76_wcid * wcid,enum mt76_txq_id qid,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,int pid,struct sk_buff * skb)1144 mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
1145 			   enum mt76_txq_id qid, struct ieee80211_sta *sta,
1146 			   struct ieee80211_key_conf *key, int pid,
1147 			   struct sk_buff *skb)
1148 {
1149 	__le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
1150 
1151 	memset(txwi, 0, MT_SDIO_TXD_SIZE);
1152 	mt76_connac2_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
1153 	skb_push(skb, MT_SDIO_TXD_SIZE);
1154 }
1155 
mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)1156 int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1157 				   enum mt76_txq_id qid, struct mt76_wcid *wcid,
1158 				   struct ieee80211_sta *sta,
1159 				   struct mt76_tx_info *tx_info)
1160 {
1161 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1162 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1163 	struct ieee80211_key_conf *key = info->control.hw_key;
1164 	struct sk_buff *skb = tx_info->skb;
1165 	int err, pad, pktid, type;
1166 
1167 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
1168 		return -EINVAL;
1169 
1170 	if (!wcid)
1171 		wcid = &dev->mt76.global_wcid;
1172 
1173 	if (sta) {
1174 		struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
1175 
1176 		if (time_after(jiffies, msta->last_txs + HZ / 4)) {
1177 			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1178 			msta->last_txs = jiffies;
1179 		}
1180 	}
1181 
1182 	pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
1183 	mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
1184 
1185 	type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0;
1186 	mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
1187 	pad = round_up(skb->len, 4) - skb->len;
1188 	if (mt76_is_usb(mdev))
1189 		pad += 4;
1190 
1191 	err = mt76_skb_adjust_pad(skb, pad);
1192 	if (err)
1193 		/* Release pktid in case of error. */
1194 		idr_remove(&wcid->pktid, pktid);
1195 
1196 	return err;
1197 }
1198 EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_prepare_skb);
1199 
mt7921_usb_sdio_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue_entry * e)1200 void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
1201 				     struct mt76_queue_entry *e)
1202 {
1203 	__le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
1204 	unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
1205 	struct ieee80211_sta *sta;
1206 	struct mt76_wcid *wcid;
1207 	u16 idx;
1208 
1209 	idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1210 	wcid = rcu_dereference(mdev->wcid[idx]);
1211 	sta = wcid_to_sta(wcid);
1212 
1213 	if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1214 		mt7921_tx_check_aggr(sta, txwi);
1215 
1216 	skb_pull(e->skb, headroom);
1217 	mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1218 }
1219 EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb);
1220 
mt7921_usb_sdio_tx_status_data(struct mt76_dev * mdev,u8 * update)1221 bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
1222 {
1223 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1224 
1225 	mt7921_mutex_acquire(dev);
1226 	mt7921_mac_sta_poll(dev);
1227 	mt7921_mutex_release(dev);
1228 
1229 	return false;
1230 }
1231 EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
1232 
1233 #if IS_ENABLED(CONFIG_IPV6)
mt7921_set_ipv6_ns_work(struct work_struct * work)1234 void mt7921_set_ipv6_ns_work(struct work_struct *work)
1235 {
1236 	struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
1237 						ipv6_ns_work);
1238 	struct sk_buff *skb;
1239 	int ret = 0;
1240 
1241 	do {
1242 		skb = skb_dequeue(&dev->ipv6_ns_list);
1243 
1244 		if (!skb)
1245 			break;
1246 
1247 		mt7921_mutex_acquire(dev);
1248 		ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1249 					    MCU_UNI_CMD(OFFLOAD), true);
1250 		mt7921_mutex_release(dev);
1251 
1252 	} while (!ret);
1253 
1254 	if (ret)
1255 		skb_queue_purge(&dev->ipv6_ns_list);
1256 }
1257 #endif
1258