1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
4
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
10
rtw_set_channel_mac(struct rtw_dev * rtwdev,u8 channel,u8 bw,u8 primary_ch_idx)11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
12 u8 primary_ch_idx)
13 {
14 u8 txsc40 = 0, txsc20 = 0;
15 u32 value32;
16 u8 value8;
17
18 txsc20 = primary_ch_idx;
19 if (bw == RTW_CHANNEL_WIDTH_80) {
20 if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
21 txsc40 = RTW_SC_40_UPPER;
22 else
23 txsc40 = RTW_SC_40_LOWER;
24 }
25 rtw_write8(rtwdev, REG_DATA_SC,
26 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
27
28 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
29 value32 &= ~BIT_RFMOD;
30 switch (bw) {
31 case RTW_CHANNEL_WIDTH_80:
32 value32 |= BIT_RFMOD_80M;
33 break;
34 case RTW_CHANNEL_WIDTH_40:
35 value32 |= BIT_RFMOD_40M;
36 break;
37 case RTW_CHANNEL_WIDTH_20:
38 default:
39 break;
40 }
41 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
42
43 if (rtw_chip_wcpu_11n(rtwdev))
44 return;
45
46 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
47 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
48 rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
49
50 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
51 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
52
53 value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
54 value8 = value8 & ~BIT_CHECK_CCK_EN;
55 if (IS_CH_5G_BAND(channel))
56 value8 |= BIT_CHECK_CCK_EN;
57 rtw_write8(rtwdev, REG_CCK_CHECK, value8);
58 }
59 EXPORT_SYMBOL(rtw_set_channel_mac);
60
rtw_mac_pre_system_cfg(struct rtw_dev * rtwdev)61 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
62 {
63 u32 value32;
64 u8 value8;
65
66 rtw_write8(rtwdev, REG_RSV_CTRL, 0);
67
68 if (rtw_chip_wcpu_11n(rtwdev)) {
69 if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
70 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
71 else
72 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
73 return 0;
74 }
75
76 switch (rtw_hci_type(rtwdev)) {
77 case RTW_HCI_TYPE_PCIE:
78 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
79 break;
80 case RTW_HCI_TYPE_USB:
81 break;
82 default:
83 return -EINVAL;
84 }
85
86 /* config PIN Mux */
87 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
88 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
89 rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
90
91 value32 = rtw_read32(rtwdev, REG_LED_CFG);
92 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
93 rtw_write32(rtwdev, REG_LED_CFG, value32);
94
95 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
96 value32 |= BIT_WLRFE_4_5_EN;
97 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
98
99 /* disable BB/RF */
100 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
101 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
102 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
103
104 value8 = rtw_read8(rtwdev, REG_RF_CTRL);
105 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
106 rtw_write8(rtwdev, REG_RF_CTRL, value8);
107
108 value32 = rtw_read32(rtwdev, REG_WLRF1);
109 value32 &= ~BIT_WLRF1_BBRF_EN;
110 rtw_write32(rtwdev, REG_WLRF1, value32);
111
112 return 0;
113 }
114
do_pwr_poll_cmd(struct rtw_dev * rtwdev,u32 addr,u32 mask,u32 target)115 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
116 {
117 u32 val;
118
119 target &= mask;
120
121 return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
122 50, 50 * RTW_PWR_POLLING_CNT, false,
123 rtwdev, addr) == 0;
124 }
125
rtw_pwr_cmd_polling(struct rtw_dev * rtwdev,const struct rtw_pwr_seq_cmd * cmd)126 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
127 const struct rtw_pwr_seq_cmd *cmd)
128 {
129 u8 value;
130 u32 offset;
131
132 if (cmd->base == RTW_PWR_ADDR_SDIO)
133 offset = cmd->offset | SDIO_LOCAL_OFFSET;
134 else
135 offset = cmd->offset;
136
137 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
138 return 0;
139
140 if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
141 goto err;
142
143 /* if PCIE, toggle BIT_PFM_WOWL and try again */
144 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
145 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
146 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
147 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
148 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
149 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
150 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
151
152 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
153 return 0;
154
155 err:
156 rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
157 offset, cmd->mask, cmd->value);
158 return -EBUSY;
159 }
160
rtw_sub_pwr_seq_parser(struct rtw_dev * rtwdev,u8 intf_mask,u8 cut_mask,const struct rtw_pwr_seq_cmd * cmd)161 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
162 u8 cut_mask,
163 const struct rtw_pwr_seq_cmd *cmd)
164 {
165 const struct rtw_pwr_seq_cmd *cur_cmd;
166 u32 offset;
167 u8 value;
168
169 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
170 if (!(cur_cmd->intf_mask & intf_mask) ||
171 !(cur_cmd->cut_mask & cut_mask))
172 continue;
173
174 switch (cur_cmd->cmd) {
175 case RTW_PWR_CMD_WRITE:
176 offset = cur_cmd->offset;
177
178 if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
179 offset |= SDIO_LOCAL_OFFSET;
180
181 value = rtw_read8(rtwdev, offset);
182 value &= ~cur_cmd->mask;
183 value |= (cur_cmd->value & cur_cmd->mask);
184 rtw_write8(rtwdev, offset, value);
185 break;
186 case RTW_PWR_CMD_POLLING:
187 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
188 return -EBUSY;
189 break;
190 case RTW_PWR_CMD_DELAY:
191 if (cur_cmd->value == RTW_PWR_DELAY_US)
192 udelay(cur_cmd->offset);
193 else
194 mdelay(cur_cmd->offset);
195 break;
196 case RTW_PWR_CMD_READ:
197 break;
198 default:
199 return -EINVAL;
200 }
201 }
202
203 return 0;
204 }
205
rtw_pwr_seq_parser(struct rtw_dev * rtwdev,const struct rtw_pwr_seq_cmd ** cmd_seq)206 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
207 const struct rtw_pwr_seq_cmd **cmd_seq)
208 {
209 u8 cut_mask;
210 u8 intf_mask;
211 u8 cut;
212 u32 idx = 0;
213 const struct rtw_pwr_seq_cmd *cmd;
214 int ret;
215
216 cut = rtwdev->hal.cut_version;
217 cut_mask = cut_version_to_mask(cut);
218 switch (rtw_hci_type(rtwdev)) {
219 case RTW_HCI_TYPE_PCIE:
220 intf_mask = BIT(2);
221 break;
222 case RTW_HCI_TYPE_USB:
223 intf_mask = BIT(1);
224 break;
225 default:
226 return -EINVAL;
227 }
228
229 do {
230 cmd = cmd_seq[idx];
231 if (!cmd)
232 break;
233
234 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
235 if (ret)
236 return -EBUSY;
237
238 idx++;
239 } while (1);
240
241 return 0;
242 }
243
rtw_mac_power_switch(struct rtw_dev * rtwdev,bool pwr_on)244 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
245 {
246 struct rtw_chip_info *chip = rtwdev->chip;
247 const struct rtw_pwr_seq_cmd **pwr_seq;
248 u8 rpwm;
249 bool cur_pwr;
250
251 if (rtw_chip_wcpu_11ac(rtwdev)) {
252 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
253
254 /* Check FW still exist or not */
255 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
256 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
257 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
258 }
259 }
260
261 if (rtw_read8(rtwdev, REG_CR) == 0xea)
262 cur_pwr = false;
263 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
264 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
265 cur_pwr = false;
266 else
267 cur_pwr = true;
268
269 if (pwr_on == cur_pwr)
270 return -EALREADY;
271
272 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
273 if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
274 return -EINVAL;
275
276 return 0;
277 }
278
__rtw_mac_init_system_cfg(struct rtw_dev * rtwdev)279 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
280 {
281 u8 sys_func_en = rtwdev->chip->sys_func_en;
282 u8 value8;
283 u32 value, tmp;
284
285 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
286 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
287 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
288
289 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
290 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
291 rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
292
293 /* disable boot-from-flash for driver's DL FW */
294 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
295 if (tmp & BIT_BOOT_FSPI_EN) {
296 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
297 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
298 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
299 }
300
301 return 0;
302 }
303
__rtw_mac_init_system_cfg_legacy(struct rtw_dev * rtwdev)304 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
305 {
306 rtw_write8(rtwdev, REG_CR, 0xff);
307 mdelay(2);
308 rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
309 mdelay(2);
310
311 rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
312 rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
313
314 rtw_write16(rtwdev, REG_CR, 0x2ff);
315
316 return 0;
317 }
318
rtw_mac_init_system_cfg(struct rtw_dev * rtwdev)319 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
320 {
321 if (rtw_chip_wcpu_11n(rtwdev))
322 return __rtw_mac_init_system_cfg_legacy(rtwdev);
323
324 return __rtw_mac_init_system_cfg(rtwdev);
325 }
326
rtw_mac_power_on(struct rtw_dev * rtwdev)327 int rtw_mac_power_on(struct rtw_dev *rtwdev)
328 {
329 int ret = 0;
330
331 ret = rtw_mac_pre_system_cfg(rtwdev);
332 if (ret)
333 goto err;
334
335 ret = rtw_mac_power_switch(rtwdev, true);
336 if (ret == -EALREADY) {
337 rtw_mac_power_switch(rtwdev, false);
338 ret = rtw_mac_power_switch(rtwdev, true);
339 if (ret)
340 goto err;
341 } else if (ret) {
342 goto err;
343 }
344
345 ret = rtw_mac_init_system_cfg(rtwdev);
346 if (ret)
347 goto err;
348
349 return 0;
350
351 err:
352 rtw_err(rtwdev, "mac power on failed");
353 return ret;
354 }
355
rtw_mac_power_off(struct rtw_dev * rtwdev)356 void rtw_mac_power_off(struct rtw_dev *rtwdev)
357 {
358 rtw_mac_power_switch(rtwdev, false);
359 }
360
check_firmware_size(const u8 * data,u32 size)361 static bool check_firmware_size(const u8 *data, u32 size)
362 {
363 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
364 u32 dmem_size;
365 u32 imem_size;
366 u32 emem_size;
367 u32 real_size;
368
369 dmem_size = le32_to_cpu(fw_hdr->dmem_size);
370 imem_size = le32_to_cpu(fw_hdr->imem_size);
371 emem_size = (fw_hdr->mem_usage & BIT(4)) ?
372 le32_to_cpu(fw_hdr->emem_size) : 0;
373
374 dmem_size += FW_HDR_CHKSUM_SIZE;
375 imem_size += FW_HDR_CHKSUM_SIZE;
376 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
377 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
378 if (real_size != size)
379 return false;
380
381 return true;
382 }
383
wlan_cpu_enable(struct rtw_dev * rtwdev,bool enable)384 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
385 {
386 if (enable) {
387 /* cpu io interface enable */
388 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
389
390 /* cpu enable */
391 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
392 } else {
393 /* cpu io interface disable */
394 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
395
396 /* cpu disable */
397 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
398 }
399 }
400
401 #define DLFW_RESTORE_REG_NUM 6
402
download_firmware_reg_backup(struct rtw_dev * rtwdev,struct rtw_backup_info * bckp)403 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
404 struct rtw_backup_info *bckp)
405 {
406 u8 tmp;
407 u8 bckp_idx = 0;
408
409 /* set HIQ to hi priority */
410 bckp[bckp_idx].len = 1;
411 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
412 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
413 bckp_idx++;
414 tmp = RTW_DMA_MAPPING_HIGH << 6;
415 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
416
417 /* DLFW only use HIQ, map HIQ to hi priority */
418 bckp[bckp_idx].len = 1;
419 bckp[bckp_idx].reg = REG_CR;
420 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
421 bckp_idx++;
422 bckp[bckp_idx].len = 4;
423 bckp[bckp_idx].reg = REG_H2CQ_CSR;
424 bckp[bckp_idx].val = BIT_H2CQ_FULL;
425 bckp_idx++;
426 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
427 rtw_write8(rtwdev, REG_CR, tmp);
428 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
429
430 /* Config hi priority queue and public priority queue page number */
431 bckp[bckp_idx].len = 2;
432 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
433 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
434 bckp_idx++;
435 bckp[bckp_idx].len = 4;
436 bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
437 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
438 bckp_idx++;
439 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
440 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
441
442 /* Disable beacon related functions */
443 tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
444 bckp[bckp_idx].len = 1;
445 bckp[bckp_idx].reg = REG_BCN_CTRL;
446 bckp[bckp_idx].val = tmp;
447 bckp_idx++;
448 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
449 rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
450
451 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
452 }
453
download_firmware_reset_platform(struct rtw_dev * rtwdev)454 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
455 {
456 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
457 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
458 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
459 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
460 }
461
download_firmware_reg_restore(struct rtw_dev * rtwdev,struct rtw_backup_info * bckp,u8 bckp_num)462 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
463 struct rtw_backup_info *bckp,
464 u8 bckp_num)
465 {
466 rtw_restore_reg(rtwdev, bckp, bckp_num);
467 }
468
469 #define TX_DESC_SIZE 48
470
send_firmware_pkt_rsvd_page(struct rtw_dev * rtwdev,u16 pg_addr,const u8 * data,u32 size)471 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
472 const u8 *data, u32 size)
473 {
474 u8 *buf;
475 int ret;
476
477 buf = kmemdup(data, size, GFP_KERNEL);
478 if (!buf)
479 return -ENOMEM;
480
481 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
482 kfree(buf);
483 return ret;
484 }
485
486 static int
send_firmware_pkt(struct rtw_dev * rtwdev,u16 pg_addr,const u8 * data,u32 size)487 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
488 {
489 int ret;
490
491 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
492 !((size + TX_DESC_SIZE) & (512 - 1)))
493 size += 1;
494
495 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
496 if (ret)
497 rtw_err(rtwdev, "failed to download rsvd page\n");
498
499 return ret;
500 }
501
502 static int
iddma_enable(struct rtw_dev * rtwdev,u32 src,u32 dst,u32 ctrl)503 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
504 {
505 rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
506 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
507 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
508
509 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
510 return -EBUSY;
511
512 return 0;
513 }
514
iddma_download_firmware(struct rtw_dev * rtwdev,u32 src,u32 dst,u32 len,u8 first)515 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
516 u32 len, u8 first)
517 {
518 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
519
520 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
521 return -EBUSY;
522
523 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
524 if (!first)
525 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
526
527 if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
528 return -EBUSY;
529
530 return 0;
531 }
532
rtw_ddma_to_fw_fifo(struct rtw_dev * rtwdev,u32 ocp_src,u32 size)533 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size)
534 {
535 u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE;
536
537 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) {
538 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n");
539 return -EBUSY;
540 }
541
542 ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN;
543
544 if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) {
545 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n");
546 return -EBUSY;
547 }
548
549 return 0;
550 }
551
552 static bool
check_fw_checksum(struct rtw_dev * rtwdev,u32 addr)553 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
554 {
555 u8 fw_ctrl;
556
557 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
558
559 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
560 if (addr < OCPBASE_DMEM_88XX) {
561 fw_ctrl |= BIT_IMEM_DW_OK;
562 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
563 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
564 } else {
565 fw_ctrl |= BIT_DMEM_DW_OK;
566 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
567 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
568 }
569
570 rtw_err(rtwdev, "invalid fw checksum\n");
571
572 return false;
573 }
574
575 if (addr < OCPBASE_DMEM_88XX) {
576 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
577 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
578 } else {
579 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
580 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
581 }
582
583 return true;
584 }
585
586 static int
download_firmware_to_mem(struct rtw_dev * rtwdev,const u8 * data,u32 src,u32 dst,u32 size)587 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
588 u32 src, u32 dst, u32 size)
589 {
590 struct rtw_chip_info *chip = rtwdev->chip;
591 u32 desc_size = chip->tx_pkt_desc_sz;
592 u8 first_part;
593 u32 mem_offset;
594 u32 residue_size;
595 u32 pkt_size;
596 u32 max_size = 0x1000;
597 u32 val;
598 int ret;
599
600 mem_offset = 0;
601 first_part = 1;
602 residue_size = size;
603
604 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
605 val |= BIT_DDMACH0_RESET_CHKSUM_STS;
606 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
607
608 while (residue_size) {
609 if (residue_size >= max_size)
610 pkt_size = max_size;
611 else
612 pkt_size = residue_size;
613
614 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
615 data + mem_offset, pkt_size);
616 if (ret)
617 return ret;
618
619 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
620 src + desc_size,
621 dst + mem_offset, pkt_size,
622 first_part);
623 if (ret)
624 return ret;
625
626 first_part = 0;
627 mem_offset += pkt_size;
628 residue_size -= pkt_size;
629 }
630
631 if (!check_fw_checksum(rtwdev, dst))
632 return -EINVAL;
633
634 return 0;
635 }
636
637 static int
start_download_firmware(struct rtw_dev * rtwdev,const u8 * data,u32 size)638 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
639 {
640 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
641 const u8 *cur_fw;
642 u16 val;
643 u32 imem_size;
644 u32 dmem_size;
645 u32 emem_size;
646 u32 addr;
647 int ret;
648
649 dmem_size = le32_to_cpu(fw_hdr->dmem_size);
650 imem_size = le32_to_cpu(fw_hdr->imem_size);
651 emem_size = (fw_hdr->mem_usage & BIT(4)) ?
652 le32_to_cpu(fw_hdr->emem_size) : 0;
653 dmem_size += FW_HDR_CHKSUM_SIZE;
654 imem_size += FW_HDR_CHKSUM_SIZE;
655 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
656
657 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
658 val |= BIT_MCUFWDL_EN;
659 rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
660
661 cur_fw = data + FW_HDR_SIZE;
662 addr = le32_to_cpu(fw_hdr->dmem_addr);
663 addr &= ~BIT(31);
664 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
665 if (ret)
666 return ret;
667
668 cur_fw = data + FW_HDR_SIZE + dmem_size;
669 addr = le32_to_cpu(fw_hdr->imem_addr);
670 addr &= ~BIT(31);
671 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
672 if (ret)
673 return ret;
674
675 if (emem_size) {
676 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
677 addr = le32_to_cpu(fw_hdr->emem_addr);
678 addr &= ~BIT(31);
679 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
680 emem_size);
681 if (ret)
682 return ret;
683 }
684
685 return 0;
686 }
687
download_firmware_validate(struct rtw_dev * rtwdev)688 static int download_firmware_validate(struct rtw_dev *rtwdev)
689 {
690 u32 fw_key;
691
692 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
693 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
694 if (fw_key == ILLEGAL_KEY_GROUP)
695 rtw_err(rtwdev, "invalid fw key\n");
696 return -EINVAL;
697 }
698
699 return 0;
700 }
701
download_firmware_end_flow(struct rtw_dev * rtwdev)702 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
703 {
704 u16 fw_ctrl;
705
706 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
707
708 /* Check IMEM & DMEM checksum is OK or not */
709 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
710 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
711 return;
712
713 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
714 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
715 }
716
__rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)717 static int __rtw_download_firmware(struct rtw_dev *rtwdev,
718 struct rtw_fw_state *fw)
719 {
720 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
721 const u8 *data = fw->firmware->data;
722 u32 size = fw->firmware->size;
723 u32 ltecoex_bckp;
724 int ret;
725
726 if (!check_firmware_size(data, size))
727 return -EINVAL;
728
729 if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp))
730 return -EBUSY;
731
732 wlan_cpu_enable(rtwdev, false);
733
734 download_firmware_reg_backup(rtwdev, bckp);
735 download_firmware_reset_platform(rtwdev);
736
737 ret = start_download_firmware(rtwdev, data, size);
738 if (ret)
739 goto dlfw_fail;
740
741 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
742
743 download_firmware_end_flow(rtwdev);
744
745 wlan_cpu_enable(rtwdev, true);
746
747 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
748 return -EBUSY;
749
750 ret = download_firmware_validate(rtwdev);
751 if (ret)
752 goto dlfw_fail;
753
754 /* reset desc and index */
755 rtw_hci_setup(rtwdev);
756
757 rtwdev->h2c.last_box_num = 0;
758 rtwdev->h2c.seq = 0;
759
760 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
761
762 return 0;
763
764 dlfw_fail:
765 /* Disable FWDL_EN */
766 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
767 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
768
769 return ret;
770 }
771
en_download_firmware_legacy(struct rtw_dev * rtwdev,bool en)772 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
773 {
774 int try;
775
776 if (en) {
777 wlan_cpu_enable(rtwdev, false);
778 wlan_cpu_enable(rtwdev, true);
779
780 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
781
782 for (try = 0; try < 10; try++) {
783 if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
784 goto fwdl_ready;
785 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
786 msleep(20);
787 }
788 rtw_err(rtwdev, "failed to check fw download ready\n");
789 fwdl_ready:
790 rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
791 } else {
792 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
793 }
794 }
795
796 static void
write_firmware_page(struct rtw_dev * rtwdev,u32 page,const u8 * data,u32 size)797 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
798 {
799 u32 val32;
800 u32 block_nr;
801 u32 remain_size;
802 u32 write_addr = FW_START_ADDR_LEGACY;
803 const __le32 *ptr = (const __le32 *)data;
804 u32 block;
805 __le32 remain_data = 0;
806
807 block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
808 remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
809
810 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
811 val32 &= ~BIT_ROM_PGE;
812 val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
813 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
814
815 for (block = 0; block < block_nr; block++) {
816 rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
817
818 write_addr += DLFW_BLK_SIZE_LEGACY;
819 ptr++;
820 }
821
822 if (remain_size) {
823 memcpy(&remain_data, ptr, remain_size);
824 rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
825 }
826 }
827
828 static int
download_firmware_legacy(struct rtw_dev * rtwdev,const u8 * data,u32 size)829 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
830 {
831 u32 page;
832 u32 total_page;
833 u32 last_page_size;
834
835 data += sizeof(struct rtw_fw_hdr_legacy);
836 size -= sizeof(struct rtw_fw_hdr_legacy);
837
838 total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
839 last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
840
841 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
842
843 for (page = 0; page < total_page; page++) {
844 write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
845 data += DLFW_PAGE_SIZE_LEGACY;
846 }
847 if (last_page_size)
848 write_firmware_page(rtwdev, page, data, last_page_size);
849
850 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
851 rtw_err(rtwdev, "failed to check download firmware report\n");
852 return -EINVAL;
853 }
854
855 return 0;
856 }
857
download_firmware_validate_legacy(struct rtw_dev * rtwdev)858 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
859 {
860 u32 val32;
861 int try;
862
863 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
864 val32 |= BIT_MCUFWDL_RDY;
865 val32 &= ~BIT_WINTINI_RDY;
866 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
867
868 wlan_cpu_enable(rtwdev, false);
869 wlan_cpu_enable(rtwdev, true);
870
871 for (try = 0; try < 10; try++) {
872 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
873 if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
874 return 0;
875 msleep(20);
876 }
877
878 rtw_err(rtwdev, "failed to validate firmware\n");
879 return -EINVAL;
880 }
881
__rtw_download_firmware_legacy(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)882 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
883 struct rtw_fw_state *fw)
884 {
885 int ret = 0;
886
887 en_download_firmware_legacy(rtwdev, true);
888 ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
889 en_download_firmware_legacy(rtwdev, false);
890 if (ret)
891 goto out;
892
893 ret = download_firmware_validate_legacy(rtwdev);
894 if (ret)
895 goto out;
896
897 /* reset desc and index */
898 rtw_hci_setup(rtwdev);
899
900 rtwdev->h2c.last_box_num = 0;
901 rtwdev->h2c.seq = 0;
902
903 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
904
905 out:
906 return ret;
907 }
908
rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)909 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
910 {
911 if (rtw_chip_wcpu_11n(rtwdev))
912 return __rtw_download_firmware_legacy(rtwdev, fw);
913
914 return __rtw_download_firmware(rtwdev, fw);
915 }
916
get_priority_queues(struct rtw_dev * rtwdev,u32 queues)917 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
918 {
919 const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
920 u32 prio_queues = 0;
921
922 if (queues & BIT(IEEE80211_AC_VO))
923 prio_queues |= BIT(rqpn->dma_map_vo);
924 if (queues & BIT(IEEE80211_AC_VI))
925 prio_queues |= BIT(rqpn->dma_map_vi);
926 if (queues & BIT(IEEE80211_AC_BE))
927 prio_queues |= BIT(rqpn->dma_map_be);
928 if (queues & BIT(IEEE80211_AC_BK))
929 prio_queues |= BIT(rqpn->dma_map_bk);
930
931 return prio_queues;
932 }
933
__rtw_mac_flush_prio_queue(struct rtw_dev * rtwdev,u32 prio_queue,bool drop)934 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
935 u32 prio_queue, bool drop)
936 {
937 struct rtw_chip_info *chip = rtwdev->chip;
938 const struct rtw_prioq_addr *addr;
939 bool wsize;
940 u16 avail_page, rsvd_page;
941 int i;
942
943 if (prio_queue >= RTW_DMA_MAPPING_MAX)
944 return;
945
946 addr = &chip->prioq_addrs->prio[prio_queue];
947 wsize = chip->prioq_addrs->wsize;
948
949 /* check if all of the reserved pages are available for 100 msecs */
950 for (i = 0; i < 5; i++) {
951 rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) :
952 rtw_read8(rtwdev, addr->rsvd);
953 avail_page = wsize ? rtw_read16(rtwdev, addr->avail) :
954 rtw_read8(rtwdev, addr->avail);
955 if (rsvd_page == avail_page)
956 return;
957
958 msleep(20);
959 }
960
961 /* priority queue is still not empty, throw a warning,
962 *
963 * Note that if we want to flush the tx queue when having a lot of
964 * traffic (ex, 100Mbps up), some of the packets could be dropped.
965 * And it requires like ~2secs to flush the full priority queue.
966 */
967 if (!drop)
968 rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
969 }
970
rtw_mac_flush_prio_queues(struct rtw_dev * rtwdev,u32 prio_queues,bool drop)971 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
972 u32 prio_queues, bool drop)
973 {
974 u32 q;
975
976 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
977 if (prio_queues & BIT(q))
978 __rtw_mac_flush_prio_queue(rtwdev, q, drop);
979 }
980
rtw_mac_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)981 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
982 {
983 u32 prio_queues = 0;
984
985 /* If all of the hardware queues are requested to flush,
986 * or the priority queues are not mapped yet,
987 * flush all of the priority queues
988 */
989 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
990 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
991 else
992 prio_queues = get_priority_queues(rtwdev, queues);
993
994 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
995 }
996
txdma_queue_mapping(struct rtw_dev * rtwdev)997 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
998 {
999 struct rtw_chip_info *chip = rtwdev->chip;
1000 const struct rtw_rqpn *rqpn = NULL;
1001 u16 txdma_pq_map = 0;
1002
1003 switch (rtw_hci_type(rtwdev)) {
1004 case RTW_HCI_TYPE_PCIE:
1005 rqpn = &chip->rqpn_table[1];
1006 break;
1007 case RTW_HCI_TYPE_USB:
1008 if (rtwdev->hci.bulkout_num == 2)
1009 rqpn = &chip->rqpn_table[2];
1010 else if (rtwdev->hci.bulkout_num == 3)
1011 rqpn = &chip->rqpn_table[3];
1012 else if (rtwdev->hci.bulkout_num == 4)
1013 rqpn = &chip->rqpn_table[4];
1014 else
1015 return -EINVAL;
1016 break;
1017 default:
1018 return -EINVAL;
1019 }
1020
1021 rtwdev->fifo.rqpn = rqpn;
1022 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
1023 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
1024 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
1025 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
1026 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
1027 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
1028 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
1029
1030 rtw_write8(rtwdev, REG_CR, 0);
1031 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
1032 if (rtw_chip_wcpu_11ac(rtwdev))
1033 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
1034
1035 return 0;
1036 }
1037
set_trx_fifo_info(struct rtw_dev * rtwdev)1038 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
1039 {
1040 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1041 struct rtw_chip_info *chip = rtwdev->chip;
1042 u16 cur_pg_addr;
1043 u8 csi_buf_pg_num = chip->csi_buf_pg_num;
1044
1045 /* config rsvd page num */
1046 fifo->rsvd_drv_pg_num = 8;
1047 fifo->txff_pg_num = chip->txff_size >> 7;
1048 if (rtw_chip_wcpu_11n(rtwdev))
1049 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
1050 else
1051 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
1052 RSVD_PG_H2C_EXTRAINFO_NUM +
1053 RSVD_PG_H2C_STATICINFO_NUM +
1054 RSVD_PG_H2CQ_NUM +
1055 RSVD_PG_CPU_INSTRUCTION_NUM +
1056 RSVD_PG_FW_TXBUF_NUM +
1057 csi_buf_pg_num;
1058
1059 if (fifo->rsvd_pg_num > fifo->txff_pg_num)
1060 return -ENOMEM;
1061
1062 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
1063 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
1064
1065 cur_pg_addr = fifo->txff_pg_num;
1066 if (rtw_chip_wcpu_11ac(rtwdev)) {
1067 cur_pg_addr -= csi_buf_pg_num;
1068 fifo->rsvd_csibuf_addr = cur_pg_addr;
1069 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
1070 fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
1071 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
1072 fifo->rsvd_cpu_instr_addr = cur_pg_addr;
1073 cur_pg_addr -= RSVD_PG_H2CQ_NUM;
1074 fifo->rsvd_h2cq_addr = cur_pg_addr;
1075 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
1076 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
1077 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
1078 fifo->rsvd_h2c_info_addr = cur_pg_addr;
1079 }
1080 cur_pg_addr -= fifo->rsvd_drv_pg_num;
1081 fifo->rsvd_drv_addr = cur_pg_addr;
1082
1083 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
1084 rtw_err(rtwdev, "wrong rsvd driver address\n");
1085 return -EINVAL;
1086 }
1087
1088 return 0;
1089 }
1090
__priority_queue_cfg(struct rtw_dev * rtwdev,const struct rtw_page_table * pg_tbl,u16 pubq_num)1091 static int __priority_queue_cfg(struct rtw_dev *rtwdev,
1092 const struct rtw_page_table *pg_tbl,
1093 u16 pubq_num)
1094 {
1095 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1096 struct rtw_chip_info *chip = rtwdev->chip;
1097
1098 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
1099 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
1100 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
1101 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
1102 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
1103 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
1104
1105 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
1106 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
1107
1108 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
1109 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
1110 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
1111 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
1112 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
1113
1114 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
1115 return -EBUSY;
1116
1117 rtw_write8(rtwdev, REG_CR + 3, 0);
1118
1119 return 0;
1120 }
1121
__priority_queue_cfg_legacy(struct rtw_dev * rtwdev,const struct rtw_page_table * pg_tbl,u16 pubq_num)1122 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
1123 const struct rtw_page_table *pg_tbl,
1124 u16 pubq_num)
1125 {
1126 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1127 struct rtw_chip_info *chip = rtwdev->chip;
1128 u32 val32;
1129
1130 val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
1131 rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
1132 val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
1133 rtw_write32(rtwdev, REG_RQPN, val32);
1134
1135 rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
1136 rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
1137 rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
1138 rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
1139 rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
1140 rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
1141
1142 rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
1143
1144 if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
1145 return -EBUSY;
1146
1147 return 0;
1148 }
1149
priority_queue_cfg(struct rtw_dev * rtwdev)1150 static int priority_queue_cfg(struct rtw_dev *rtwdev)
1151 {
1152 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1153 struct rtw_chip_info *chip = rtwdev->chip;
1154 const struct rtw_page_table *pg_tbl = NULL;
1155 u16 pubq_num;
1156 int ret;
1157
1158 ret = set_trx_fifo_info(rtwdev);
1159 if (ret)
1160 return ret;
1161
1162 switch (rtw_hci_type(rtwdev)) {
1163 case RTW_HCI_TYPE_PCIE:
1164 pg_tbl = &chip->page_table[1];
1165 break;
1166 case RTW_HCI_TYPE_USB:
1167 if (rtwdev->hci.bulkout_num == 2)
1168 pg_tbl = &chip->page_table[2];
1169 else if (rtwdev->hci.bulkout_num == 3)
1170 pg_tbl = &chip->page_table[3];
1171 else if (rtwdev->hci.bulkout_num == 4)
1172 pg_tbl = &chip->page_table[4];
1173 else
1174 return -EINVAL;
1175 break;
1176 default:
1177 return -EINVAL;
1178 }
1179
1180 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
1181 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
1182 if (rtw_chip_wcpu_11n(rtwdev))
1183 return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
1184 else
1185 return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
1186 }
1187
init_h2c(struct rtw_dev * rtwdev)1188 static int init_h2c(struct rtw_dev *rtwdev)
1189 {
1190 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1191 u8 value8;
1192 u32 value32;
1193 u32 h2cq_addr;
1194 u32 h2cq_size;
1195 u32 h2cq_free;
1196 u32 wp, rp;
1197
1198 if (rtw_chip_wcpu_11n(rtwdev))
1199 return 0;
1200
1201 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
1202 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
1203
1204 value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
1205 value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1206 rtw_write32(rtwdev, REG_H2C_HEAD, value32);
1207
1208 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
1209 value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1210 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
1211
1212 value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
1213 value32 &= 0xFFFC0000;
1214 value32 |= (h2cq_addr + h2cq_size);
1215 rtw_write32(rtwdev, REG_H2C_TAIL, value32);
1216
1217 value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1218 value8 = (u8)((value8 & 0xFC) | 0x01);
1219 rtw_write8(rtwdev, REG_H2C_INFO, value8);
1220
1221 value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1222 value8 = (u8)((value8 & 0xFB) | 0x04);
1223 rtw_write8(rtwdev, REG_H2C_INFO, value8);
1224
1225 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
1226 value8 = (u8)((value8 & 0x7f) | 0x80);
1227 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
1228
1229 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
1230 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
1231 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
1232
1233 if (h2cq_size != h2cq_free) {
1234 rtw_err(rtwdev, "H2C queue mismatch\n");
1235 return -EINVAL;
1236 }
1237
1238 return 0;
1239 }
1240
rtw_init_trx_cfg(struct rtw_dev * rtwdev)1241 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
1242 {
1243 int ret;
1244
1245 ret = txdma_queue_mapping(rtwdev);
1246 if (ret)
1247 return ret;
1248
1249 ret = priority_queue_cfg(rtwdev);
1250 if (ret)
1251 return ret;
1252
1253 ret = init_h2c(rtwdev);
1254 if (ret)
1255 return ret;
1256
1257 return 0;
1258 }
1259
rtw_drv_info_cfg(struct rtw_dev * rtwdev)1260 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1261 {
1262 u8 value8;
1263
1264 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1265 if (rtw_chip_wcpu_11ac(rtwdev)) {
1266 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1267 value8 &= 0xF0;
1268 /* For rxdesc len = 0 issue */
1269 value8 |= 0xF;
1270 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1271 }
1272 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1273 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1274
1275 return 0;
1276 }
1277
rtw_mac_init(struct rtw_dev * rtwdev)1278 int rtw_mac_init(struct rtw_dev *rtwdev)
1279 {
1280 struct rtw_chip_info *chip = rtwdev->chip;
1281 int ret;
1282
1283 ret = rtw_init_trx_cfg(rtwdev);
1284 if (ret)
1285 return ret;
1286
1287 ret = chip->ops->mac_init(rtwdev);
1288 if (ret)
1289 return ret;
1290
1291 ret = rtw_drv_info_cfg(rtwdev);
1292 if (ret)
1293 return ret;
1294
1295 rtw_hci_interface_cfg(rtwdev);
1296
1297 return 0;
1298 }
1299