1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
4  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
5  */
6 
7 #ifndef MT7601U_H
8 #define MT7601U_H
9 
10 #include <linux/bitfield.h>
11 #include <linux/kernel.h>
12 #include <linux/device.h>
13 #include <linux/mutex.h>
14 #include <linux/usb.h>
15 #include <linux/completion.h>
16 #include <net/mac80211.h>
17 #include <linux/debugfs.h>
18 #include <linux/average.h>
19 
20 #include "regs.h"
21 
22 #define MT_CALIBRATE_INTERVAL		(4 * HZ)
23 
24 #define MT_FREQ_CAL_INIT_DELAY		(30 * HZ)
25 #define MT_FREQ_CAL_CHECK_INTERVAL	(10 * HZ)
26 #define MT_FREQ_CAL_ADJ_INTERVAL	(HZ / 2)
27 
28 #define MT_BBP_REG_VERSION		0x00
29 
30 #define MT_USB_AGGR_SIZE_LIMIT		28 /* * 1024B */
31 #define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
32 #define MT_RX_ORDER			3
33 #define MT_RX_URB_SIZE			(PAGE_SIZE << MT_RX_ORDER)
34 
35 struct mt7601u_dma_buf {
36 	struct urb *urb;
37 	void *buf;
38 	dma_addr_t dma;
39 	size_t len;
40 };
41 
42 struct mt7601u_mcu {
43 	struct mutex mutex;
44 
45 	u8 msg_seq;
46 
47 	struct mt7601u_dma_buf resp;
48 	struct completion resp_cmpl;
49 };
50 
51 struct mt7601u_freq_cal {
52 	struct delayed_work work;
53 	u8 freq;
54 	bool enabled;
55 	bool adjusting;
56 };
57 
58 struct mac_stats {
59 	u64 rx_stat[6];
60 	u64 tx_stat[6];
61 	u64 aggr_stat[2];
62 	u64 aggr_n[32];
63 	u64 zero_len_del[2];
64 };
65 
66 #define N_RX_ENTRIES	16
67 struct mt7601u_rx_queue {
68 	struct mt7601u_dev *dev;
69 
70 	struct mt7601u_dma_buf_rx {
71 		struct urb *urb;
72 		struct page *p;
73 	} e[N_RX_ENTRIES];
74 
75 	unsigned int start;
76 	unsigned int end;
77 	unsigned int entries;
78 	unsigned int pending;
79 };
80 
81 #define N_TX_ENTRIES	64
82 
83 struct mt7601u_tx_queue {
84 	struct mt7601u_dev *dev;
85 
86 	struct mt7601u_dma_buf_tx {
87 		struct urb *urb;
88 		struct sk_buff *skb;
89 	} e[N_TX_ENTRIES];
90 
91 	unsigned int start;
92 	unsigned int end;
93 	unsigned int entries;
94 	unsigned int used;
95 	unsigned int fifo_seq;
96 };
97 
98 /* WCID allocation:
99  *     0: mcast wcid
100  *     1: bssid wcid
101  *  1...: STAs
102  * ...7e: group wcids
103  *    7f: reserved
104  */
105 #define N_WCIDS		128
106 #define GROUP_WCID(idx)	(N_WCIDS - 2 - idx)
107 
108 struct mt7601u_eeprom_params;
109 
110 #define MT_EE_TEMPERATURE_SLOPE		39
111 #define MT_FREQ_OFFSET_INVALID		-128
112 
113 enum mt_temp_mode {
114 	MT_TEMP_MODE_NORMAL,
115 	MT_TEMP_MODE_HIGH,
116 	MT_TEMP_MODE_LOW,
117 };
118 
119 enum mt_bw {
120 	MT_BW_20,
121 	MT_BW_40,
122 };
123 
124 enum {
125 	MT7601U_STATE_INITIALIZED,
126 	MT7601U_STATE_REMOVED,
127 	MT7601U_STATE_WLAN_RUNNING,
128 	MT7601U_STATE_MCU_RUNNING,
129 	MT7601U_STATE_SCANNING,
130 	MT7601U_STATE_READING_STATS,
131 	MT7601U_STATE_MORE_STATS,
132 };
133 
134 DECLARE_EWMA(rssi, 10, 4);
135 
136 /**
137  * struct mt7601u_dev - adapter structure
138  * @lock:		protects @wcid->tx_rate.
139  * @mac_lock:		locks out mac80211's tx status and rx paths.
140  * @tx_lock:		protects @tx_q and changes of MT7601U_STATE_*_STATS
141  *			flags in @state.
142  * @rx_lock:		protects @rx_q.
143  * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
144  * @mutex:		ensures exclusive access from mac80211 callbacks.
145  * @vendor_req_mutex:	protects @vend_buf, ensures atomicity of read/write
146  *			accesses
147  * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
148  *			(accesses to RF and BBP).
149  * @hw_atomic_mutex:	ensures exclusive access to HW during critical
150  *			operations (power management, channel switch).
151  */
152 struct mt7601u_dev {
153 	struct ieee80211_hw *hw;
154 	struct device *dev;
155 
156 	unsigned long state;
157 
158 	struct mutex mutex;
159 
160 	unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
161 
162 	struct cfg80211_chan_def chandef;
163 	struct ieee80211_supported_band *sband_2g;
164 
165 	struct mt7601u_mcu mcu;
166 
167 	struct delayed_work cal_work;
168 	struct delayed_work mac_work;
169 
170 	struct workqueue_struct *stat_wq;
171 	struct delayed_work stat_work;
172 
173 	struct mt76_wcid *mon_wcid;
174 	struct mt76_wcid __rcu *wcid[N_WCIDS];
175 
176 	spinlock_t lock;
177 	spinlock_t mac_lock;
178 
179 	const u16 *beacon_offsets;
180 
181 	u8 macaddr[ETH_ALEN];
182 	struct mt7601u_eeprom_params *ee;
183 
184 	struct mutex vendor_req_mutex;
185 	void *vend_buf;
186 
187 	struct mutex reg_atomic_mutex;
188 	struct mutex hw_atomic_mutex;
189 
190 	u32 rxfilter;
191 	u32 debugfs_reg;
192 
193 	u8 out_eps[8];
194 	u8 in_eps[8];
195 	u16 out_max_packet;
196 	u16 in_max_packet;
197 
198 	/* TX */
199 	spinlock_t tx_lock;
200 	struct tasklet_struct tx_tasklet;
201 	struct mt7601u_tx_queue *tx_q;
202 	struct sk_buff_head tx_skb_done;
203 
204 	atomic_t avg_ampdu_len;
205 
206 	/* RX */
207 	spinlock_t rx_lock;
208 	struct tasklet_struct rx_tasklet;
209 	struct mt7601u_rx_queue rx_q;
210 
211 	/* Connection monitoring things */
212 	spinlock_t con_mon_lock;
213 	u8 ap_bssid[ETH_ALEN];
214 
215 	s8 bcn_freq_off;
216 	u8 bcn_phy_mode;
217 
218 	struct ewma_rssi avg_rssi;
219 
220 	u8 agc_save;
221 
222 	struct mt7601u_freq_cal freq_cal;
223 
224 	bool tssi_read_trig;
225 
226 	s8 tssi_init;
227 	s8 tssi_init_hvga;
228 	s16 tssi_init_hvga_offset_db;
229 
230 	int prev_pwr_diff;
231 
232 	enum mt_temp_mode temp_mode;
233 	int curr_temp;
234 	int dpd_temp;
235 	s8 raw_temp;
236 	bool pll_lock_protect;
237 
238 	u8 bw;
239 	bool chan_ext_below;
240 
241 	/* PA mode */
242 	u32 rf_pa_mode[2];
243 
244 	struct mac_stats stats;
245 };
246 
247 struct mt7601u_tssi_params {
248 	char tssi0;
249 	int trgt_power;
250 };
251 
252 struct mt76_wcid {
253 	u8 idx;
254 	u8 hw_key_idx;
255 
256 	u16 tx_rate;
257 	bool tx_rate_set;
258 	u8 tx_rate_nss;
259 };
260 
261 struct mt76_vif {
262 	u8 idx;
263 
264 	struct mt76_wcid group_wcid;
265 };
266 
267 struct mt76_sta {
268 	struct mt76_wcid wcid;
269 	u16 agg_ssn[IEEE80211_NUM_TIDS];
270 };
271 
272 struct mt76_reg_pair {
273 	u32 reg;
274 	u32 value;
275 };
276 
277 struct mt7601u_rxwi;
278 
279 extern const struct ieee80211_ops mt7601u_ops;
280 
281 void mt7601u_init_debugfs(struct mt7601u_dev *dev);
282 
283 u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
284 void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
285 u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
286 u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
287 void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
288 		     const void *data, int len);
289 
290 int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
291 bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
292 	       int timeout);
293 bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
294 		    int timeout);
295 
296 /* Compatibility with mt76 */
297 #define mt76_rmw_field(_dev, _reg, _field, _val)	\
298 	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
299 
mt76_rr(struct mt7601u_dev * dev,u32 offset)300 static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
301 {
302 	return mt7601u_rr(dev, offset);
303 }
304 
mt76_wr(struct mt7601u_dev * dev,u32 offset,u32 val)305 static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
306 {
307 	return mt7601u_wr(dev, offset, val);
308 }
309 
310 static inline u32
mt76_rmw(struct mt7601u_dev * dev,u32 offset,u32 mask,u32 val)311 mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
312 {
313 	return mt7601u_rmw(dev, offset, mask, val);
314 }
315 
mt76_set(struct mt7601u_dev * dev,u32 offset,u32 val)316 static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
317 {
318 	return mt76_rmw(dev, offset, 0, val);
319 }
320 
mt76_clear(struct mt7601u_dev * dev,u32 offset,u32 val)321 static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
322 {
323 	return mt76_rmw(dev, offset, val, 0);
324 }
325 
326 int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
327 			    const struct mt76_reg_pair *data, int len);
328 int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
329 			     const u32 *data, int n);
330 void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
331 
332 /* Init */
333 struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
334 int mt7601u_init_hardware(struct mt7601u_dev *dev);
335 int mt7601u_register_device(struct mt7601u_dev *dev);
336 void mt7601u_cleanup(struct mt7601u_dev *dev);
337 
338 int mt7601u_mac_start(struct mt7601u_dev *dev);
339 void mt7601u_mac_stop(struct mt7601u_dev *dev);
340 
341 /* PHY */
342 int mt7601u_phy_init(struct mt7601u_dev *dev);
343 int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
344 void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
345 void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
346 int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
347 void mt7601u_agc_save(struct mt7601u_dev *dev);
348 void mt7601u_agc_restore(struct mt7601u_dev *dev);
349 int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
350 			    struct cfg80211_chan_def *chandef);
351 void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
352 int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
353 			 struct mt7601u_rxwi *rxwi, u16 rate);
354 void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
355 			       struct ieee80211_bss_conf *info);
356 
357 /* MAC */
358 void mt7601u_mac_work(struct work_struct *work);
359 void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
360 				int ht_mode);
361 void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
362 void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
363 void
364 mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
365 void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
366 
367 /* TX */
368 void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
369 		struct sk_buff *skb);
370 int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
371 		    u16 queue, const struct ieee80211_tx_queue_params *params);
372 void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
373 void mt7601u_tx_stat(struct work_struct *work);
374 
375 /* util */
376 void mt76_remove_hdr_pad(struct sk_buff *skb);
377 int mt76_insert_hdr_pad(struct sk_buff *skb);
378 
379 u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
380 
mt7601u_mac_set_ctrlch(struct mt7601u_dev * dev,bool below)381 static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
382 {
383 	return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
384 }
385 
386 int mt7601u_dma_init(struct mt7601u_dev *dev);
387 void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
388 
389 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
390 			   struct mt76_wcid *wcid, int hw_q);
391 
392 #endif
393