1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2022 Realtek Corporation
3 */
4
5 #include "coex.h"
6 #include "debug.h"
7 #include "mac.h"
8 #include "phy.h"
9 #include "reg.h"
10 #include "rtw8852b.h"
11 #include "rtw8852b_rfk.h"
12 #include "rtw8852b_rfk_table.h"
13 #include "rtw8852b_table.h"
14
15 #define RTW8852B_RXDCK_VER 0x1
16 #define RTW8852B_IQK_VER 0x2a
17 #define RTW8852B_IQK_SS 2
18 #define RTW8852B_RXK_GROUP_NR 4
19 #define RTW8852B_TSSI_PATH_NR 2
20 #define RTW8852B_RF_REL_VERSION 34
21 #define RTW8852B_DPK_VER 0x0d
22 #define RTW8852B_DPK_RF_PATH 2
23 #define RTW8852B_DPK_KIP_REG_NUM 2
24
25 #define _TSSI_DE_MASK GENMASK(21, 12)
26 #define ADDC_T_AVG 100
27 #define DPK_TXAGC_LOWER 0x2e
28 #define DPK_TXAGC_UPPER 0x3f
29 #define DPK_TXAGC_INVAL 0xff
30 #define RFREG_MASKRXBB 0x003e0
31 #define RFREG_MASKMODE 0xf0000
32
33 enum rtw8852b_dpk_id {
34 LBK_RXIQK = 0x06,
35 SYNC = 0x10,
36 MDPK_IDL = 0x11,
37 MDPK_MPA = 0x12,
38 GAIN_LOSS = 0x13,
39 GAIN_CAL = 0x14,
40 DPK_RXAGC = 0x15,
41 KIP_PRESET = 0x16,
42 KIP_RESTORE = 0x17,
43 DPK_TXAGC = 0x19,
44 D_KIP_PRESET = 0x28,
45 D_TXAGC = 0x29,
46 D_RXAGC = 0x2a,
47 D_SYNC = 0x2b,
48 D_GAIN_LOSS = 0x2c,
49 D_MDPK_IDL = 0x2d,
50 D_GAIN_NORM = 0x2f,
51 D_KIP_THERMAL = 0x30,
52 D_KIP_RESTORE = 0x31
53 };
54
55 enum dpk_agc_step {
56 DPK_AGC_STEP_SYNC_DGAIN,
57 DPK_AGC_STEP_GAIN_ADJ,
58 DPK_AGC_STEP_GAIN_LOSS_IDX,
59 DPK_AGC_STEP_GL_GT_CRITERION,
60 DPK_AGC_STEP_GL_LT_CRITERION,
61 DPK_AGC_STEP_SET_TX_GAIN,
62 };
63
64 enum rtw8852b_iqk_type {
65 ID_TXAGC = 0x0,
66 ID_FLOK_COARSE = 0x1,
67 ID_FLOK_FINE = 0x2,
68 ID_TXK = 0x3,
69 ID_RXAGC = 0x4,
70 ID_RXK = 0x5,
71 ID_NBTXK = 0x6,
72 ID_NBRXK = 0x7,
73 ID_FLOK_VBUFFER = 0x8,
74 ID_A_FLOK_COARSE = 0x9,
75 ID_G_FLOK_COARSE = 0xa,
76 ID_A_FLOK_FINE = 0xb,
77 ID_G_FLOK_FINE = 0xc,
78 ID_IQK_RESTORE = 0x10,
79 };
80
81 static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820};
82 static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18};
83 static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = {
84 {0x5634, 0x5630, 0x5630, 0x5630},
85 {0x7634, 0x7630, 0x7630, 0x7630} };
86 static const u32 _tssi_cw_default_mask[4] = {
87 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
88 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858};
89 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860};
90 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838};
91 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840};
92 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848};
93 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850};
94 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828};
95 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830};
96 static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352};
97 static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f};
98 static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0};
99 static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360};
100 static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f};
101 static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1};
102 static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
103 static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6};
104 static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
105 static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b};
106 static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
107 static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6};
108 static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
109 static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24};
110
111 static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800};
112 static const u32 rtw8852b_backup_rf_regs[] = {
113 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005
114 };
115
116 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs)
117 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs)
118
119 static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = {
120 {0x20fc, 0xffff0000, 0x0303},
121 {0x5864, 0x18000000, 0x3},
122 {0x7864, 0x18000000, 0x3},
123 {0x12b8, 0x40000000, 0x1},
124 {0x32b8, 0x40000000, 0x1},
125 {0x030c, 0xff000000, 0x13},
126 {0x032c, 0xffff0000, 0x0041},
127 {0x12b8, 0x10000000, 0x1},
128 {0x58c8, 0x01000000, 0x1},
129 {0x78c8, 0x01000000, 0x1},
130 {0x5864, 0xc0000000, 0x3},
131 {0x7864, 0xc0000000, 0x3},
132 {0x2008, 0x01ffffff, 0x1ffffff},
133 {0x0c1c, 0x00000004, 0x1},
134 {0x0700, 0x08000000, 0x1},
135 {0x0c70, 0x000003ff, 0x3ff},
136 {0x0c60, 0x00000003, 0x3},
137 {0x0c6c, 0x00000001, 0x1},
138 {0x58ac, 0x08000000, 0x1},
139 {0x78ac, 0x08000000, 0x1},
140 {0x0c3c, 0x00000200, 0x1},
141 {0x2344, 0x80000000, 0x1},
142 {0x4490, 0x80000000, 0x1},
143 {0x12a0, 0x00007000, 0x7},
144 {0x12a0, 0x00008000, 0x1},
145 {0x12a0, 0x00070000, 0x3},
146 {0x12a0, 0x00080000, 0x1},
147 {0x32a0, 0x00070000, 0x3},
148 {0x32a0, 0x00080000, 0x1},
149 {0x0700, 0x01000000, 0x1},
150 {0x0700, 0x06000000, 0x2},
151 {0x20fc, 0xffff0000, 0x3333},
152 };
153
154 static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = {
155 {0x20fc, 0xffff0000, 0x0303},
156 {0x12b8, 0x40000000, 0x0},
157 {0x32b8, 0x40000000, 0x0},
158 {0x5864, 0xc0000000, 0x0},
159 {0x7864, 0xc0000000, 0x0},
160 {0x2008, 0x01ffffff, 0x0000000},
161 {0x0c1c, 0x00000004, 0x0},
162 {0x0700, 0x08000000, 0x0},
163 {0x0c70, 0x0000001f, 0x03},
164 {0x0c70, 0x000003e0, 0x03},
165 {0x12a0, 0x000ff000, 0x00},
166 {0x32a0, 0x000ff000, 0x00},
167 {0x0700, 0x07000000, 0x0},
168 {0x20fc, 0xffff0000, 0x0000},
169 {0x58c8, 0x01000000, 0x0},
170 {0x78c8, 0x01000000, 0x0},
171 {0x0c3c, 0x00000200, 0x0},
172 {0x2344, 0x80000000, 0x0},
173 };
174
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])175 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
176 {
177 u32 i;
178
179 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
180 backup_bb_reg_val[i] =
181 rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
182 MASKDWORD);
183 rtw89_debug(rtwdev, RTW89_DBG_RFK,
184 "[RFK]backup bb reg : %x, value =%x\n",
185 rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
186 }
187 }
188
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)189 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
190 u8 rf_path)
191 {
192 u32 i;
193
194 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
195 backup_rf_reg_val[i] =
196 rtw89_read_rf(rtwdev, rf_path,
197 rtw8852b_backup_rf_regs[i], RFREG_MASK);
198 rtw89_debug(rtwdev, RTW89_DBG_RFK,
199 "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
200 rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
201 }
202 }
203
_rfk_restore_bb_reg(struct rtw89_dev * rtwdev,const u32 backup_bb_reg_val[])204 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
205 const u32 backup_bb_reg_val[])
206 {
207 u32 i;
208
209 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
210 rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
211 MASKDWORD, backup_bb_reg_val[i]);
212 rtw89_debug(rtwdev, RTW89_DBG_RFK,
213 "[RFK]restore bb reg : %x, value =%x\n",
214 rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
215 }
216 }
217
_rfk_restore_rf_reg(struct rtw89_dev * rtwdev,const u32 backup_rf_reg_val[],u8 rf_path)218 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
219 const u32 backup_rf_reg_val[], u8 rf_path)
220 {
221 u32 i;
222
223 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
224 rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i],
225 RFREG_MASK, backup_rf_reg_val[i]);
226
227 rtw89_debug(rtwdev, RTW89_DBG_RFK,
228 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
229 rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
230 }
231 }
232
_rfk_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)233 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
234 enum rtw89_rf_path path, bool is_bybb)
235 {
236 if (is_bybb)
237 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
238 else
239 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
240 }
241
_rfk_drf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)242 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
243 enum rtw89_rf_path path, bool is_bybb)
244 {
245 if (is_bybb)
246 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
247 else
248 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
249 }
250
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path)251 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
252 {
253 bool fail = true;
254 u32 val;
255 int ret;
256
257 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
258 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
259 if (ret)
260 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
261
262 udelay(200);
263
264 if (!ret)
265 fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
266 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
267
268 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
269 val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
270 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val);
271
272 return fail;
273 }
274
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)275 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
276 {
277 u8 val;
278
279 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
280 rtwdev->dbcc_en, phy_idx);
281
282 if (!rtwdev->dbcc_en) {
283 val = RF_AB;
284 } else {
285 if (phy_idx == RTW89_PHY_0)
286 val = RF_A;
287 else
288 val = RF_B;
289 }
290 return val;
291 }
292
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)293 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
294 enum rtw89_rf_path path)
295 {
296 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
297 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
298 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
299 mdelay(1);
300 }
301
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)302 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
303 {
304 u8 path, dck_tune;
305 u32 rf_reg5;
306
307 rtw89_debug(rtwdev, RTW89_DBG_RFK,
308 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
309 RTW8852B_RXDCK_VER, rtwdev->hal.cv);
310
311 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
312 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
313 dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
314
315 if (rtwdev->is_tssi_mode[path])
316 rtw89_phy_write32_mask(rtwdev,
317 R_P0_TSSI_TRK + (path << 13),
318 B_P0_TSSI_TRK_EN, 0x1);
319
320 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
321 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
322 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
323 _set_rx_dck(rtwdev, phy, path);
324 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
325 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
326
327 if (rtwdev->is_tssi_mode[path])
328 rtw89_phy_write32_mask(rtwdev,
329 R_P0_TSSI_TRK + (path << 13),
330 B_P0_TSSI_TRK_EN, 0x0);
331 }
332 }
333
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)334 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
335 {
336 u32 rf_reg5;
337 u32 rck_val;
338 u32 val;
339 int ret;
340
341 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
342
343 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
344
345 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
346 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
347
348 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
349 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
350
351 /* RCK trigger */
352 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
353
354 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
355 false, rtwdev, path, RR_RCKS, BIT(3));
356
357 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
358
359 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
360 rck_val, ret);
361
362 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
363 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
364
365 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
366 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
367 }
368
_afe_init(struct rtw89_dev * rtwdev)369 static void _afe_init(struct rtw89_dev *rtwdev)
370 {
371 rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf);
372
373 rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl);
374 }
375
_drck(struct rtw89_dev * rtwdev)376 static void _drck(struct rtw89_dev *rtwdev)
377 {
378 u32 rck_d;
379 u32 val;
380 int ret;
381
382 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
383 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1);
384
385 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
386 false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE);
387 if (ret)
388 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
389
390 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0);
391 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
392 udelay(1);
393 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
394 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS);
395 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0);
396 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d);
397
398 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n",
399 rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD));
400 }
401
_addck_backup(struct rtw89_dev * rtwdev)402 static void _addck_backup(struct rtw89_dev *rtwdev)
403 {
404 struct rtw89_dack_info *dack = &rtwdev->dack;
405
406 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
407 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
408 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
409
410 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
411 dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
412 dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
413 }
414
_addck_reload(struct rtw89_dev * rtwdev)415 static void _addck_reload(struct rtw89_dev *rtwdev)
416 {
417 struct rtw89_dack_info *dack = &rtwdev->dack;
418
419 /* S0 */
420 rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]);
421 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6);
422 rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f);
423 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3);
424
425 /* S1 */
426 rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]);
427 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6);
428 rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f);
429 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3);
430 }
431
_dack_backup_s0(struct rtw89_dev * rtwdev)432 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
433 {
434 struct rtw89_dack_info *dack = &rtwdev->dack;
435 u8 i;
436
437 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
438
439 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
440 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
441 dack->msbk_d[0][0][i] =
442 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
443 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
444 dack->msbk_d[0][1][i] =
445 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
446 }
447
448 dack->biask_d[0][0] =
449 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
450 dack->biask_d[0][1] =
451 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
452
453 dack->dadck_d[0][0] =
454 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
455 dack->dadck_d[0][1] =
456 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
457 }
458
_dack_backup_s1(struct rtw89_dev * rtwdev)459 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
460 {
461 struct rtw89_dack_info *dack = &rtwdev->dack;
462 u8 i;
463
464 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
465
466 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
467 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
468 dack->msbk_d[1][0][i] =
469 rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
470 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
471 dack->msbk_d[1][1][i] =
472 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
473 }
474
475 dack->biask_d[1][0] =
476 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
477 dack->biask_d[1][1] =
478 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
479
480 dack->dadck_d[1][0] =
481 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
482 dack->dadck_d[1][1] =
483 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
484 }
485
_check_addc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)486 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
487 {
488 s32 dc_re = 0, dc_im = 0;
489 u32 tmp;
490 u32 i;
491
492 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
493 &rtw8852b_check_addc_defs_a_tbl,
494 &rtw8852b_check_addc_defs_b_tbl);
495
496 for (i = 0; i < ADDC_T_AVG; i++) {
497 tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
498 dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
499 dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
500 }
501
502 dc_re /= ADDC_T_AVG;
503 dc_im /= ADDC_T_AVG;
504
505 rtw89_debug(rtwdev, RTW89_DBG_RFK,
506 "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
507 }
508
_addck(struct rtw89_dev * rtwdev)509 static void _addck(struct rtw89_dev *rtwdev)
510 {
511 struct rtw89_dack_info *dack = &rtwdev->dack;
512 u32 val;
513 int ret;
514
515 /* S0 */
516 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
517 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0);
518 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
519 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
520 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
521 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
522 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
523 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
524 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1);
525 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
526
527 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
528 _check_addc(rtwdev, RF_PATH_A);
529
530 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
531 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
532 udelay(1);
533 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
534
535 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
536 false, rtwdev, R_ADDCKR0, BIT(0));
537 if (ret) {
538 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
539 dack->addck_timeout[0] = true;
540 }
541 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
542 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
543 _check_addc(rtwdev, RF_PATH_A);
544
545 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0);
546 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
547 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
548 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
549 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
550
551 /* S1 */
552 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
553 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
554 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
555 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
556 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
557 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
558 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1);
559 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
560
561 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
562 _check_addc(rtwdev, RF_PATH_B);
563
564 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
565 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
566 udelay(1);
567 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
568
569 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
570 false, rtwdev, R_ADDCKR1, BIT(0));
571 if (ret) {
572 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
573 dack->addck_timeout[1] = true;
574 }
575 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
576 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
577 _check_addc(rtwdev, RF_PATH_B);
578
579 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0);
580 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
581 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
582 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
583 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
584 }
585
_check_dadc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)586 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
587 {
588 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
589 &rtw8852b_check_dadc_en_defs_a_tbl,
590 &rtw8852b_check_dadc_en_defs_b_tbl);
591
592 _check_addc(rtwdev, path);
593
594 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
595 &rtw8852b_check_dadc_dis_defs_a_tbl,
596 &rtw8852b_check_dadc_dis_defs_b_tbl);
597 }
598
_dack_s0_check_done(struct rtw89_dev * rtwdev,bool part1)599 static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1)
600 {
601 if (part1) {
602 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
603 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0)
604 return false;
605 } else {
606 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
607 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
608 return false;
609 }
610
611 return true;
612 }
613
_dack_s0(struct rtw89_dev * rtwdev)614 static void _dack_s0(struct rtw89_dev *rtwdev)
615 {
616 struct rtw89_dack_info *dack = &rtwdev->dack;
617 bool done;
618 int ret;
619
620 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl);
621
622 ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
623 false, rtwdev, true);
624 if (ret) {
625 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
626 dack->msbk_timeout[0] = true;
627 }
628 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
629
630 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl);
631
632 ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
633 false, rtwdev, false);
634 if (ret) {
635 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
636 dack->dadck_timeout[0] = true;
637 }
638 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
639
640 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl);
641
642 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
643
644 _dack_backup_s0(rtwdev);
645 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
646 }
647
_dack_s1_check_done(struct rtw89_dev * rtwdev,bool part1)648 static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1)
649 {
650 if (part1) {
651 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 &&
652 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0)
653 return false;
654 } else {
655 if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 &&
656 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0)
657 return false;
658 }
659
660 return true;
661 }
662
_dack_s1(struct rtw89_dev * rtwdev)663 static void _dack_s1(struct rtw89_dev *rtwdev)
664 {
665 struct rtw89_dack_info *dack = &rtwdev->dack;
666 bool done;
667 int ret;
668
669 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl);
670
671 ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
672 false, rtwdev, true);
673 if (ret) {
674 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
675 dack->msbk_timeout[1] = true;
676 }
677 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
678
679 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl);
680
681 ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
682 false, rtwdev, false);
683 if (ret) {
684 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
685 dack->dadck_timeout[1] = true;
686 }
687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
688
689 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl);
690
691 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
692
693 _check_dadc(rtwdev, RF_PATH_B);
694 _dack_backup_s1(rtwdev);
695 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
696 }
697
_dack(struct rtw89_dev * rtwdev)698 static void _dack(struct rtw89_dev *rtwdev)
699 {
700 _dack_s0(rtwdev);
701 _dack_s1(rtwdev);
702 }
703
_dack_dump(struct rtw89_dev * rtwdev)704 static void _dack_dump(struct rtw89_dev *rtwdev)
705 {
706 struct rtw89_dack_info *dack = &rtwdev->dack;
707 u8 i;
708 u8 t;
709
710 rtw89_debug(rtwdev, RTW89_DBG_RFK,
711 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
712 dack->addck_d[0][0], dack->addck_d[0][1]);
713 rtw89_debug(rtwdev, RTW89_DBG_RFK,
714 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
715 dack->addck_d[1][0], dack->addck_d[1][1]);
716 rtw89_debug(rtwdev, RTW89_DBG_RFK,
717 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
718 dack->dadck_d[0][0], dack->dadck_d[0][1]);
719 rtw89_debug(rtwdev, RTW89_DBG_RFK,
720 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
721 dack->dadck_d[1][0], dack->dadck_d[1][1]);
722 rtw89_debug(rtwdev, RTW89_DBG_RFK,
723 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
724 dack->biask_d[0][0], dack->biask_d[0][1]);
725 rtw89_debug(rtwdev, RTW89_DBG_RFK,
726 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
727 dack->biask_d[1][0], dack->biask_d[1][1]);
728
729 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
730 for (i = 0; i < 0x10; i++) {
731 t = dack->msbk_d[0][0][i];
732 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
733 }
734
735 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
736 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
737 t = dack->msbk_d[0][1][i];
738 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
739 }
740
741 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
742 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
743 t = dack->msbk_d[1][0][i];
744 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
745 }
746
747 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
748 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
749 t = dack->msbk_d[1][1][i];
750 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
751 }
752 }
753
_dac_cal(struct rtw89_dev * rtwdev,bool force)754 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
755 {
756 struct rtw89_dack_info *dack = &rtwdev->dack;
757 u32 rf0_0, rf1_0;
758
759 dack->dack_done = false;
760 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n");
761 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
762
763 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
764 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
765 _afe_init(rtwdev);
766 _drck(rtwdev);
767
768 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
769 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
770 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
771 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
772 _addck(rtwdev);
773 _addck_backup(rtwdev);
774 _addck_reload(rtwdev);
775
776 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
777 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
778 _dack(rtwdev);
779 _dack_dump(rtwdev);
780 dack->dack_done = true;
781
782 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
783 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
784 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
785 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
786 dack->dack_cnt++;
787 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
788 }
789
_iqk_rxk_setting(struct rtw89_dev * rtwdev,u8 path)790 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
791 {
792 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
793 u32 tmp;
794
795 switch (iqk_info->iqk_band[path]) {
796 case RTW89_BAND_2G:
797 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
798 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
799 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
800 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
801 break;
802 case RTW89_BAND_5G:
803 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
804 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
805 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
806 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
807 break;
808 default:
809 break;
810 }
811 }
812
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)813 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
814 u8 path, u8 ktype)
815 {
816 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
817 u32 iqk_cmd;
818 bool fail;
819
820 switch (ktype) {
821 case ID_FLOK_COARSE:
822 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
823 iqk_cmd = 0x108 | (1 << (4 + path));
824 break;
825 case ID_FLOK_FINE:
826 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
827 iqk_cmd = 0x208 | (1 << (4 + path));
828 break;
829 case ID_FLOK_VBUFFER:
830 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
831 iqk_cmd = 0x308 | (1 << (4 + path));
832 break;
833 case ID_TXK:
834 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
835 iqk_cmd = 0x008 | (1 << (path + 4)) |
836 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
837 break;
838 case ID_RXAGC:
839 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
840 break;
841 case ID_RXK:
842 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
843 iqk_cmd = 0x008 | (1 << (path + 4)) |
844 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
845 break;
846 case ID_NBTXK:
847 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
848 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
849 iqk_cmd = 0x408 | (1 << (4 + path));
850 break;
851 case ID_NBRXK:
852 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
853 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
854 iqk_cmd = 0x608 | (1 << (4 + path));
855 break;
856 default:
857 return false;
858 }
859
860 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
861 udelay(1);
862 fail = _iqk_check_cal(rtwdev, path);
863 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
864
865 return fail;
866 }
867
_rxk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)868 static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
869 u8 path)
870 {
871 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
872 bool kfail = false;
873 bool fail;
874 u8 gp;
875
876 for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
877 switch (iqk_info->iqk_band[path]) {
878 case RTW89_BAND_2G:
879 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
880 _g_idxrxgain[gp]);
881 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
882 _g_idxattc2[gp]);
883 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
884 _g_idxattc1[gp]);
885 break;
886 case RTW89_BAND_5G:
887 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
888 _a_idxrxgain[gp]);
889 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
890 _a_idxattc2[gp]);
891 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
892 _a_idxattc1[gp]);
893 break;
894 default:
895 break;
896 }
897
898 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
899 B_CFIR_LUT_SEL, 0x1);
900 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
901 B_CFIR_LUT_SET, 0x0);
902 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
903 B_CFIR_LUT_GP_V1, gp);
904 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
905 rtw89_phy_write32_mask(rtwdev, R_IQKINF,
906 BIT(16 + gp + path * 4), fail);
907 kfail |= fail;
908 }
909 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
910
911 if (kfail) {
912 iqk_info->nb_rxcfir[path] = 0x40000002;
913 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
914 B_IQK_RES_RXCFIR, 0x0);
915 iqk_info->is_wb_rxiqk[path] = false;
916 } else {
917 iqk_info->nb_rxcfir[path] = 0x40000000;
918 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
919 B_IQK_RES_RXCFIR, 0x5);
920 iqk_info->is_wb_rxiqk[path] = true;
921 }
922
923 return kfail;
924 }
925
_iqk_nbrxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)926 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
927 u8 path)
928 {
929 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
930 const u8 gp = 0x3;
931 bool kfail = false;
932 bool fail;
933
934 switch (iqk_info->iqk_band[path]) {
935 case RTW89_BAND_2G:
936 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
937 _g_idxrxgain[gp]);
938 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
939 _g_idxattc2[gp]);
940 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
941 _g_idxattc1[gp]);
942 break;
943 case RTW89_BAND_5G:
944 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
945 _a_idxrxgain[gp]);
946 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
947 _a_idxattc2[gp]);
948 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
949 _a_idxattc1[gp]);
950 break;
951 default:
952 break;
953 }
954
955 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
956 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
957 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
958 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
959 udelay(1);
960
961 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
962 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
963 kfail |= fail;
964 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
965
966 if (!kfail)
967 iqk_info->nb_rxcfir[path] =
968 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2;
969 else
970 iqk_info->nb_rxcfir[path] = 0x40000002;
971
972 return kfail;
973 }
974
_iqk_rxclk_setting(struct rtw89_dev * rtwdev,u8 path)975 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
976 {
977 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
978
979 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
980 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
981 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
982 udelay(1);
983 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
984 udelay(1);
985 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
986 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
987 udelay(1);
988 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
989 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2);
990 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
991 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2);
992 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
993 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
994 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
995 } else {
996 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
997 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
998 udelay(1);
999 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1000 udelay(1);
1001 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1002 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1003 udelay(1);
1004 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1005 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1);
1006 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
1007 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1);
1008 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
1009 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1010 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0);
1011 }
1012 }
1013
_txk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1014 static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1015 {
1016 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1017 bool kfail = false;
1018 bool fail;
1019 u8 gp;
1020
1021 for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
1022 switch (iqk_info->iqk_band[path]) {
1023 case RTW89_BAND_2G:
1024 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1025 _g_power_range[gp]);
1026 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1027 _g_track_range[gp]);
1028 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1029 _g_gain_bb[gp]);
1030 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1031 MASKDWORD, _g_itqt[gp]);
1032 break;
1033 case RTW89_BAND_5G:
1034 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1035 _a_power_range[gp]);
1036 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1037 _a_track_range[gp]);
1038 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1039 _a_gain_bb[gp]);
1040 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1041 MASKDWORD, _a_itqt[gp]);
1042 break;
1043 default:
1044 break;
1045 }
1046
1047 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1048 B_CFIR_LUT_SEL, 0x1);
1049 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1050 B_CFIR_LUT_SET, 0x1);
1051 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1052 B_CFIR_LUT_G2, 0x0);
1053 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1054 B_CFIR_LUT_GP, gp);
1055 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1056 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1057 rtw89_phy_write32_mask(rtwdev, R_IQKINF,
1058 BIT(8 + gp + path * 4), fail);
1059 kfail |= fail;
1060 }
1061
1062 if (kfail) {
1063 iqk_info->nb_txcfir[path] = 0x40000002;
1064 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1065 B_IQK_RES_TXCFIR, 0x0);
1066 iqk_info->is_wb_txiqk[path] = false;
1067 } else {
1068 iqk_info->nb_txcfir[path] = 0x40000000;
1069 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1070 B_IQK_RES_TXCFIR, 0x5);
1071 iqk_info->is_wb_txiqk[path] = true;
1072 }
1073
1074 return kfail;
1075 }
1076
_iqk_nbtxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1077 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1078 {
1079 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1080 bool kfail;
1081 u8 gp = 0x2;
1082
1083 switch (iqk_info->iqk_band[path]) {
1084 case RTW89_BAND_2G:
1085 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1086 _g_power_range[gp]);
1087 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1088 _g_track_range[gp]);
1089 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1090 _g_gain_bb[gp]);
1091 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1092 MASKDWORD, _g_itqt[gp]);
1093 break;
1094 case RTW89_BAND_5G:
1095 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1096 _a_power_range[gp]);
1097 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1098 _a_track_range[gp]);
1099 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1100 _a_gain_bb[gp]);
1101 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1102 MASKDWORD, _a_itqt[gp]);
1103 break;
1104 default:
1105 break;
1106 }
1107
1108 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1109 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1110 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1111 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
1112 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1113 kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1114
1115 if (!kfail)
1116 iqk_info->nb_txcfir[path] =
1117 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1118 MASKDWORD) | 0x2;
1119 else
1120 iqk_info->nb_txcfir[path] = 0x40000002;
1121
1122 return kfail;
1123 }
1124
_lok_res_table(struct rtw89_dev * rtwdev,u8 path,u8 ibias)1125 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1126 {
1127 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1128
1129 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1130 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1131
1132 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1133 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1134 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1135 else
1136 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1137 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1138 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1139 rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1);
1140
1141 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path,
1142 rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK));
1143 }
1144
_lok_finetune_check(struct rtw89_dev * rtwdev,u8 path)1145 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1146 {
1147 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1148 bool is_fail1, is_fail2;
1149 u32 vbuff_i;
1150 u32 vbuff_q;
1151 u32 core_i;
1152 u32 core_q;
1153 u32 tmp;
1154 u8 ch;
1155
1156 tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1157 core_i = FIELD_GET(RR_TXMO_COI, tmp);
1158 core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1159 ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR;
1160
1161 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1162 is_fail1 = true;
1163 else
1164 is_fail1 = false;
1165
1166 iqk_info->lok_idac[ch][path] = tmp;
1167
1168 tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1169 vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp);
1170 vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp);
1171
1172 if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1173 is_fail2 = true;
1174 else
1175 is_fail2 = false;
1176
1177 iqk_info->lok_vbuf[ch][path] = tmp;
1178
1179 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1180 "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path,
1181 iqk_info->lok_idac[ch][path]);
1182 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1183 "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path,
1184 iqk_info->lok_vbuf[ch][path]);
1185
1186 return is_fail1 | is_fail2;
1187 }
1188
_iqk_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1189 static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1190 {
1191 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1192 bool tmp;
1193
1194 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1195
1196 switch (iqk_info->iqk_band[path]) {
1197 case RTW89_BAND_2G:
1198 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1199 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1200 break;
1201 case RTW89_BAND_5G:
1202 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1203 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1204 break;
1205 default:
1206 break;
1207 }
1208
1209 switch (iqk_info->iqk_band[path]) {
1210 case RTW89_BAND_2G:
1211 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1212 break;
1213 case RTW89_BAND_5G:
1214 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1215 break;
1216 default:
1217 break;
1218 }
1219
1220 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1221 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1222 iqk_info->lok_cor_fail[0][path] = tmp;
1223
1224 switch (iqk_info->iqk_band[path]) {
1225 case RTW89_BAND_2G:
1226 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1227 break;
1228 case RTW89_BAND_5G:
1229 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1230 break;
1231 default:
1232 break;
1233 }
1234
1235 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1236 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1237
1238 switch (iqk_info->iqk_band[path]) {
1239 case RTW89_BAND_2G:
1240 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1241 break;
1242 case RTW89_BAND_5G:
1243 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1244 break;
1245 default:
1246 break;
1247 }
1248
1249 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1250 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1251 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1252 iqk_info->lok_fin_fail[0][path] = tmp;
1253
1254 switch (iqk_info->iqk_band[path]) {
1255 case RTW89_BAND_2G:
1256 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1257 break;
1258 case RTW89_BAND_5G:
1259 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1260 break;
1261 default:
1262 break;
1263 }
1264
1265 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1266 _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1267
1268 return _lok_finetune_check(rtwdev, path);
1269 }
1270
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1271 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1272 {
1273 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1274
1275 switch (iqk_info->iqk_band[path]) {
1276 case RTW89_BAND_2G:
1277 rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00);
1278 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1279 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1280 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1281 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1282 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1283 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1284 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1285 udelay(1);
1286 break;
1287 case RTW89_BAND_5G:
1288 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1289 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1290 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1291 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1292 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1293 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1294 udelay(1);
1295 break;
1296 default:
1297 break;
1298 }
1299 }
1300
_iqk_txclk_setting(struct rtw89_dev * rtwdev,u8 path)1301 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1302 {
1303 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1304 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1305 udelay(1);
1306 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1307 udelay(1);
1308 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1309 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1310 udelay(1);
1311 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1312 }
1313
_iqk_info_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1314 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1315 {
1316 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1317 u32 tmp;
1318 bool flag;
1319
1320 flag = iqk_info->lok_cor_fail[0][path];
1321 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1322 flag = iqk_info->lok_fin_fail[0][path];
1323 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1324 flag = iqk_info->iqk_tx_fail[0][path];
1325 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1326 flag = iqk_info->iqk_rx_fail[0][path];
1327 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1328
1329 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1330 iqk_info->bp_iqkenable[path] = tmp;
1331 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1332 iqk_info->bp_txkresult[path] = tmp;
1333 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1334 iqk_info->bp_rxkresult[path] = tmp;
1335
1336 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times);
1337
1338 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1339 if (tmp)
1340 iqk_info->iqk_fail_cnt++;
1341 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1342 iqk_info->iqk_fail_cnt);
1343 }
1344
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1345 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1346 {
1347 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1348 bool lok_is_fail = false;
1349 const int try = 3;
1350 u8 ibias = 0x1;
1351 u8 i;
1352
1353 _iqk_txclk_setting(rtwdev, path);
1354
1355 /* LOK */
1356 for (i = 0; i < try; i++) {
1357 _lok_res_table(rtwdev, path, ibias++);
1358 _iqk_txk_setting(rtwdev, path);
1359 lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1360 if (!lok_is_fail)
1361 break;
1362 }
1363
1364 if (lok_is_fail)
1365 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path);
1366
1367 /* TXK */
1368 if (iqk_info->is_nbiqk)
1369 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1370 else
1371 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1372
1373 /* RX */
1374 _iqk_rxclk_setting(rtwdev, path);
1375 _iqk_rxk_setting(rtwdev, path);
1376 if (iqk_info->is_nbiqk)
1377 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1378 else
1379 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1380
1381 _iqk_info_iqk(rtwdev, phy_idx, path);
1382 }
1383
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path)1384 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
1385 {
1386 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1387 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1388 u32 reg_rf18;
1389 u32 reg_35c;
1390 u8 idx;
1391 u8 get_empty_table = false;
1392
1393 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1394 if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1395 get_empty_table = true;
1396 break;
1397 }
1398 }
1399 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
1400
1401 if (!get_empty_table) {
1402 idx = iqk_info->iqk_table_idx[path] + 1;
1403 if (idx > 1)
1404 idx = 0;
1405 }
1406 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
1407
1408 reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1409 reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1410
1411 iqk_info->iqk_band[path] = chan->band_type;
1412 iqk_info->iqk_bw[path] = chan->band_width;
1413 iqk_info->iqk_ch[path] = chan->channel;
1414 iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1415 iqk_info->iqk_table_idx[path] = idx;
1416
1417 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1418 path, reg_rf18, idx);
1419 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1420 path, reg_rf18);
1421 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1422 iqk_info->iqk_times, idx);
1423 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1424 idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1425
1426 if (reg_35c == 0x01)
1427 iqk_info->syn1to2 = 0x1;
1428 else
1429 iqk_info->syn1to2 = 0x0;
1430
1431 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1432 "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path,
1433 iqk_info->syn1to2);
1434
1435 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER);
1436 /* 2GHz/5GHz/6GHz = 0/1/2 */
1437 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1438 iqk_info->iqk_band[path]);
1439 /* 20/40/80 = 0/1/2 */
1440 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1441 iqk_info->iqk_bw[path]);
1442 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1443 iqk_info->iqk_ch[path]);
1444 }
1445
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1446 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1447 {
1448 _iqk_by_path(rtwdev, phy_idx, path);
1449 }
1450
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1451 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1452 {
1453 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1454 bool fail;
1455
1456 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1457 iqk_info->nb_txcfir[path]);
1458 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1459 iqk_info->nb_rxcfir[path]);
1460 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1461 0x00000e19 + (path << 4));
1462 fail = _iqk_check_cal(rtwdev, path);
1463
1464 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail);
1465
1466 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1467 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1468 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1469 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0);
1470 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0);
1471 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1472 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1473 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1474 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1475 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1476 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1477 }
1478
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1479 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1480 enum rtw89_phy_idx phy_idx, u8 path)
1481 {
1482 const struct rtw89_reg3_def *def;
1483 int size;
1484 u8 kpath;
1485 int i;
1486
1487 rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1488
1489 kpath = _kpath(rtwdev, phy_idx);
1490
1491 switch (kpath) {
1492 case RF_A:
1493 case RF_B:
1494 return;
1495 default:
1496 size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01);
1497 def = rtw8852b_restore_nondbcc_path01;
1498 break;
1499 }
1500
1501 for (i = 0; i < size; i++, def++)
1502 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1503 }
1504
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1505 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1506 {
1507 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1508 u8 idx;
1509
1510 idx = iqk_info->iqk_table_idx[path];
1511 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx);
1512
1513 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1514 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1515
1516 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1517 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1518 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1519 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1520
1521 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path,
1522 rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD));
1523 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path,
1524 rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD));
1525 }
1526
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1527 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1528 enum rtw89_phy_idx phy_idx, u8 path)
1529 {
1530 const struct rtw89_reg3_def *def;
1531 int size;
1532 u8 kpath;
1533 int i;
1534
1535 kpath = _kpath(rtwdev, phy_idx);
1536
1537 switch (kpath) {
1538 case RF_A:
1539 case RF_B:
1540 return;
1541 default:
1542 size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01);
1543 def = rtw8852b_set_nondbcc_path01;
1544 break;
1545 }
1546
1547 for (i = 0; i < size; i++, def++)
1548 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1549 }
1550
_iqk_init(struct rtw89_dev * rtwdev)1551 static void _iqk_init(struct rtw89_dev *rtwdev)
1552 {
1553 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1554 u8 idx, path;
1555
1556 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1557 if (iqk_info->is_iqk_init)
1558 return;
1559
1560 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1561 iqk_info->is_iqk_init = true;
1562 iqk_info->is_nbiqk = false;
1563 iqk_info->iqk_fft_en = false;
1564 iqk_info->iqk_sram_en = false;
1565 iqk_info->iqk_cfir_en = false;
1566 iqk_info->iqk_xym_en = false;
1567 iqk_info->iqk_times = 0x0;
1568
1569 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1570 iqk_info->iqk_channel[idx] = 0x0;
1571 for (path = 0; path < RTW8852B_IQK_SS; path++) {
1572 iqk_info->lok_cor_fail[idx][path] = false;
1573 iqk_info->lok_fin_fail[idx][path] = false;
1574 iqk_info->iqk_tx_fail[idx][path] = false;
1575 iqk_info->iqk_rx_fail[idx][path] = false;
1576 iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1577 iqk_info->iqk_table_idx[path] = 0x0;
1578 }
1579 }
1580 }
1581
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)1582 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1583 {
1584 u32 rf_mode;
1585 u8 path;
1586 int ret;
1587
1588 for (path = 0; path < RF_PATH_MAX; path++) {
1589 if (!(kpath & BIT(path)))
1590 continue;
1591
1592 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1593 rf_mode != 2, 2, 5000, false,
1594 rtwdev, path, RR_MOD, RR_MOD_MASK);
1595 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1596 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1597 }
1598 }
1599
_tmac_tx_pause(struct rtw89_dev * rtwdev,enum rtw89_phy_idx band_idx,bool is_pause)1600 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1601 bool is_pause)
1602 {
1603 if (!is_pause)
1604 return;
1605
1606 _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1607 }
1608
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path)1609 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1610 enum rtw89_phy_idx phy_idx, u8 path)
1611 {
1612 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1613 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1614 u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
1615 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1616
1617 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1618
1619 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1620 "[IQK]==========IQK start!!!!!==========\n");
1621 iqk_info->iqk_times++;
1622 iqk_info->version = RTW8852B_IQK_VER;
1623
1624 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1625 _iqk_get_ch_info(rtwdev, phy_idx, path);
1626
1627 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1628 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1629 _iqk_macbb_setting(rtwdev, phy_idx, path);
1630 _iqk_preset(rtwdev, path);
1631 _iqk_start_iqk(rtwdev, phy_idx, path);
1632 _iqk_restore(rtwdev, path);
1633 _iqk_afebb_restore(rtwdev, phy_idx, path);
1634 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1635 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1636
1637 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1638 }
1639
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force)1640 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1641 {
1642 u8 kpath = _kpath(rtwdev, phy_idx);
1643
1644 switch (kpath) {
1645 case RF_A:
1646 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1647 break;
1648 case RF_B:
1649 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1650 break;
1651 case RF_AB:
1652 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1653 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1654 break;
1655 default:
1656 break;
1657 }
1658 }
1659
_dpk_bkup_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM],u8 path)1660 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1661 u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1662 {
1663 u8 i;
1664
1665 for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1666 reg_bkup[path][i] =
1667 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1668 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1669 reg[i] + (path << 8), reg_bkup[path][i]);
1670 }
1671 }
1672
_dpk_reload_kip(struct rtw89_dev * rtwdev,const u32 reg[],const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM],u8 path)1673 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1674 const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1675 {
1676 u8 i;
1677
1678 for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1679 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
1680 reg_bkup[path][i]);
1681 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1682 reg[i] + (path << 8), reg_bkup[path][i]);
1683 }
1684 }
1685
_dpk_order_convert(struct rtw89_dev * rtwdev)1686 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
1687 {
1688 u8 order;
1689 u8 val;
1690
1691 order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
1692 val = 0x3 >> order;
1693
1694 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
1695
1696 return val;
1697 }
1698
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)1699 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1700 {
1701 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1702 u8 val, kidx = dpk->cur_idx[path];
1703
1704 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1705
1706 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1707 MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
1708
1709 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1710 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
1711 }
1712
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852b_dpk_id id)1713 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1714 enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
1715 {
1716 u16 dpk_cmd;
1717 u32 val;
1718 int ret;
1719
1720 dpk_cmd = (id << 8) | (0x19 + (path << 4));
1721 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1722
1723 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1724 1, 20000, false,
1725 rtwdev, 0xbff8, MASKBYTE0);
1726 if (ret)
1727 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1728
1729 udelay(1);
1730
1731 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1732
1733 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1734 1, 2000, false,
1735 rtwdev, 0x80fc, MASKLWORD);
1736 if (ret)
1737 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1738
1739 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1740
1741 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1742 "[DPK] one-shot for %s = 0x%x\n",
1743 id == 0x06 ? "LBK_RXIQK" :
1744 id == 0x10 ? "SYNC" :
1745 id == 0x11 ? "MDPK_IDL" :
1746 id == 0x12 ? "MDPK_MPA" :
1747 id == 0x13 ? "GAIN_LOSS" :
1748 id == 0x14 ? "PWR_CAL" :
1749 id == 0x15 ? "DPK_RXAGC" :
1750 id == 0x16 ? "KIP_PRESET" :
1751 id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC",
1752 dpk_cmd);
1753 }
1754
_dpk_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1755 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1756 enum rtw89_rf_path path)
1757 {
1758 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1759 _set_rx_dck(rtwdev, phy, path);
1760 }
1761
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1762 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1763 enum rtw89_rf_path path)
1764 {
1765 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1766 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1767
1768 u8 kidx = dpk->cur_idx[path];
1769
1770 dpk->bp[path][kidx].band = chan->band_type;
1771 dpk->bp[path][kidx].ch = chan->channel;
1772 dpk->bp[path][kidx].bw = chan->band_width;
1773
1774 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1775 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1776 path, dpk->cur_idx[path], phy,
1777 rtwdev->is_tssi_mode[path] ? "on" : "off",
1778 rtwdev->dbcc_en ? "on" : "off",
1779 dpk->bp[path][kidx].band == 0 ? "2G" :
1780 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1781 dpk->bp[path][kidx].ch,
1782 dpk->bp[path][kidx].bw == 0 ? "20M" :
1783 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1784 }
1785
_dpk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)1786 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1787 enum rtw89_phy_idx phy,
1788 enum rtw89_rf_path path, u8 kpath)
1789 {
1790 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1791
1792 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
1793
1794 if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1795 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
1796 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
1797 }
1798
1799 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1800 "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1801 }
1802
_dpk_bb_afe_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)1803 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1804 enum rtw89_phy_idx phy,
1805 enum rtw89_rf_path path, u8 kpath)
1806 {
1807 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1808
1809 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
1810
1811 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1812 "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1813
1814 if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1815 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
1816 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x0);
1817 }
1818 }
1819
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1820 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1821 enum rtw89_rf_path path, bool is_pause)
1822 {
1823 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1824 B_P0_TSSI_TRK_EN, is_pause);
1825
1826 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1827 is_pause ? "pause" : "resume");
1828 }
1829
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1830 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1831 enum rtw89_rf_path path)
1832 {
1833 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
1834
1835 if (rtwdev->hal.cv > CHIP_CAV)
1836 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
1837
1838 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1839 }
1840
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1841 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1842 enum rtw89_rf_path path)
1843 {
1844 u8 cur_rxbb;
1845 u32 tmp;
1846
1847 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
1848
1849 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1850 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1851
1852 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1853 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
1854 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1855 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1856
1857 if (cur_rxbb >= 0x11)
1858 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1859 else if (cur_rxbb <= 0xa)
1860 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1861 else
1862 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1863
1864 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1865 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1866 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1867 udelay(70);
1868
1869 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1870 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1871
1872 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1873
1874 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
1875 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
1876
1877 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1878 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1879 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1880 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1881 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1882 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1883 }
1884
_dpk_get_thermal(struct rtw89_dev * rtwdev,u8 kidx,enum rtw89_rf_path path)1885 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
1886 {
1887 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1888
1889 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1890 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
1891 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1892
1893 udelay(200);
1894
1895 dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
1896
1897 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
1898 dpk->bp[path][kidx].ther_dpk);
1899 }
1900
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)1901 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1902 enum rtw89_rf_path path, u8 kidx)
1903 {
1904 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1905
1906 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1907 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1908 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1909 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1910 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1911 } else {
1912 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1913 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1914 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1915 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1916 rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1917 rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1918 rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1919 }
1920
1921 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1922 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1923 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1924
1925 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1926 "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
1927 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
1928 rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
1929 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
1930 }
1931
_dpk_bypass_rxcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bypass)1932 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1933 enum rtw89_rf_path path, bool is_bypass)
1934 {
1935 if (is_bypass) {
1936 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1937 B_RXIQC_BYPASS2, 0x1);
1938 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1939 B_RXIQC_BYPASS, 0x1);
1940 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1941 "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
1942 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1943 MASKDWORD));
1944 } else {
1945 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
1946 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
1947 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1948 "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
1949 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1950 MASKDWORD));
1951 }
1952 }
1953
1954 static
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1955 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1956 {
1957 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1958
1959 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
1960 rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
1961 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
1962 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
1963 else
1964 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
1965
1966 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
1967 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
1968 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
1969 }
1970
_dpk_table_select(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain)1971 static void _dpk_table_select(struct rtw89_dev *rtwdev,
1972 enum rtw89_rf_path path, u8 kidx, u8 gain)
1973 {
1974 u8 val;
1975
1976 val = 0x80 + kidx * 0x20 + gain * 0x10;
1977 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
1978 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1979 "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
1980 gain, val);
1981 }
1982
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1983 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1984 {
1985 #define DPK_SYNC_TH_DC_I 200
1986 #define DPK_SYNC_TH_DC_Q 200
1987 #define DPK_SYNC_TH_CORR 170
1988 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1989 u16 dc_i, dc_q;
1990 u8 corr_val, corr_idx;
1991
1992 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
1993
1994 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
1995 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
1996
1997 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1998 "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
1999 path, corr_idx, corr_val);
2000
2001 dpk->corr_idx[path][kidx] = corr_idx;
2002 dpk->corr_val[path][kidx] = corr_val;
2003
2004 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2005
2006 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2007 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2008
2009 dc_i = abs(sign_extend32(dc_i, 11));
2010 dc_q = abs(sign_extend32(dc_q, 11));
2011
2012 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2013 path, dc_i, dc_q);
2014
2015 dpk->dc_i[path][kidx] = dc_i;
2016 dpk->dc_q[path][kidx] = dc_q;
2017
2018 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2019 corr_val < DPK_SYNC_TH_CORR)
2020 return true;
2021 else
2022 return false;
2023 }
2024
_dpk_sync(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2025 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2026 enum rtw89_rf_path path, u8 kidx)
2027 {
2028 _dpk_one_shot(rtwdev, phy, path, SYNC);
2029
2030 return _dpk_sync_check(rtwdev, path, kidx);
2031 }
2032
_dpk_dgain_read(struct rtw89_dev * rtwdev)2033 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2034 {
2035 u16 dgain;
2036
2037 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2038
2039 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2040
2041 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2042
2043 return dgain;
2044 }
2045
_dpk_dgain_mapping(struct rtw89_dev * rtwdev,u16 dgain)2046 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2047 {
2048 static const u16 bnd[15] = {
2049 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2050 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2051 };
2052 s8 offset;
2053
2054 if (dgain >= bnd[0])
2055 offset = 0x6;
2056 else if (bnd[0] > dgain && dgain >= bnd[1])
2057 offset = 0x6;
2058 else if (bnd[1] > dgain && dgain >= bnd[2])
2059 offset = 0x5;
2060 else if (bnd[2] > dgain && dgain >= bnd[3])
2061 offset = 0x4;
2062 else if (bnd[3] > dgain && dgain >= bnd[4])
2063 offset = 0x3;
2064 else if (bnd[4] > dgain && dgain >= bnd[5])
2065 offset = 0x2;
2066 else if (bnd[5] > dgain && dgain >= bnd[6])
2067 offset = 0x1;
2068 else if (bnd[6] > dgain && dgain >= bnd[7])
2069 offset = 0x0;
2070 else if (bnd[7] > dgain && dgain >= bnd[8])
2071 offset = 0xff;
2072 else if (bnd[8] > dgain && dgain >= bnd[9])
2073 offset = 0xfe;
2074 else if (bnd[9] > dgain && dgain >= bnd[10])
2075 offset = 0xfd;
2076 else if (bnd[10] > dgain && dgain >= bnd[11])
2077 offset = 0xfc;
2078 else if (bnd[11] > dgain && dgain >= bnd[12])
2079 offset = 0xfb;
2080 else if (bnd[12] > dgain && dgain >= bnd[13])
2081 offset = 0xfa;
2082 else if (bnd[13] > dgain && dgain >= bnd[14])
2083 offset = 0xf9;
2084 else if (bnd[14] > dgain)
2085 offset = 0xf8;
2086 else
2087 offset = 0x0;
2088
2089 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2090
2091 return offset;
2092 }
2093
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2094 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2095 {
2096 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2097 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2098
2099 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2100 }
2101
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2102 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2103 enum rtw89_rf_path path, u8 kidx)
2104 {
2105 _dpk_table_select(rtwdev, path, kidx, 1);
2106 _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2107 }
2108
_dpk_kip_preset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2109 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2110 enum rtw89_rf_path path, u8 kidx)
2111 {
2112 _dpk_tpg_sel(rtwdev, path, kidx);
2113 _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2114 }
2115
_dpk_kip_pwr_clk_on(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2116 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2117 enum rtw89_rf_path path)
2118 {
2119 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2120 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2121 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2122
2123 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2124 }
2125
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc)2126 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2127 enum rtw89_rf_path path, u8 txagc)
2128 {
2129 rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
2130 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2131 _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2132 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2133
2134 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2135 }
2136
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2137 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2138 enum rtw89_rf_path path)
2139 {
2140 u32 tmp;
2141
2142 tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
2143 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
2144 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2145 _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2146 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2147 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
2148
2149 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2150 "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
2151 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
2152 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
2153 }
2154
_dpk_set_offset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,s8 gain_offset)2155 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2156 enum rtw89_rf_path path, s8 gain_offset)
2157 {
2158 u8 txagc;
2159
2160 txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2161
2162 if (txagc - gain_offset < DPK_TXAGC_LOWER)
2163 txagc = DPK_TXAGC_LOWER;
2164 else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2165 txagc = DPK_TXAGC_UPPER;
2166 else
2167 txagc = txagc - gain_offset;
2168
2169 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2170
2171 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2172 gain_offset, txagc);
2173 return txagc;
2174 }
2175
_dpk_pas_read(struct rtw89_dev * rtwdev,bool is_check)2176 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2177 {
2178 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2179 u8 i;
2180
2181 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2182 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2183 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2184
2185 if (is_check) {
2186 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2187 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2188 val1_i = abs(sign_extend32(val1_i, 11));
2189 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2190 val1_q = abs(sign_extend32(val1_q, 11));
2191
2192 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2193 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2194 val2_i = abs(sign_extend32(val2_i, 11));
2195 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2196 val2_q = abs(sign_extend32(val2_q, 11));
2197
2198 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2199 phy_div(val1_i * val1_i + val1_q * val1_q,
2200 val2_i * val2_i + val2_q * val2_q));
2201 } else {
2202 for (i = 0; i < 32; i++) {
2203 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2204 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2205 "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2206 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2207 }
2208 }
2209
2210 if (val1_i * val1_i + val1_q * val1_q >=
2211 (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2212 return true;
2213
2214 return false;
2215 }
2216
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_txagc,bool loss_only)2217 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2218 enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2219 bool loss_only)
2220 {
2221 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2222 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2223 u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2224 u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
2225 u16 dgain = 0;
2226 s8 offset;
2227 int limit = 200;
2228
2229 tmp_txagc = init_txagc;
2230
2231 do {
2232 switch (step) {
2233 case DPK_AGC_STEP_SYNC_DGAIN:
2234 if (_dpk_sync(rtwdev, phy, path, kidx)) {
2235 tmp_txagc = 0xff;
2236 goout = 1;
2237 break;
2238 }
2239
2240 dgain = _dpk_dgain_read(rtwdev);
2241
2242 if (loss_only == 1 || limited_rxbb == 1)
2243 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2244 else
2245 step = DPK_AGC_STEP_GAIN_ADJ;
2246 break;
2247
2248 case DPK_AGC_STEP_GAIN_ADJ:
2249 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
2250 RFREG_MASKRXBB);
2251 offset = _dpk_dgain_mapping(rtwdev, dgain);
2252
2253 if (tmp_rxbb + offset > 0x1f) {
2254 tmp_rxbb = 0x1f;
2255 limited_rxbb = 1;
2256 } else if (tmp_rxbb + offset < 0) {
2257 tmp_rxbb = 0;
2258 limited_rxbb = 1;
2259 } else {
2260 tmp_rxbb = tmp_rxbb + offset;
2261 }
2262
2263 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
2264 tmp_rxbb);
2265 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2266 "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2267 if (offset || agc_cnt == 0) {
2268 if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
2269 _dpk_bypass_rxcfir(rtwdev, path, true);
2270 else
2271 _dpk_lbk_rxiqk(rtwdev, phy, path);
2272 }
2273 if (dgain > 1922 || dgain < 342)
2274 step = DPK_AGC_STEP_SYNC_DGAIN;
2275 else
2276 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2277
2278 agc_cnt++;
2279 break;
2280
2281 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2282 _dpk_gainloss(rtwdev, phy, path, kidx);
2283 tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2284
2285 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2286 tmp_gl_idx >= 7)
2287 step = DPK_AGC_STEP_GL_GT_CRITERION;
2288 else if (tmp_gl_idx == 0)
2289 step = DPK_AGC_STEP_GL_LT_CRITERION;
2290 else
2291 step = DPK_AGC_STEP_SET_TX_GAIN;
2292 break;
2293
2294 case DPK_AGC_STEP_GL_GT_CRITERION:
2295 if (tmp_txagc == 0x2e) {
2296 goout = 1;
2297 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2298 "[DPK] Txagc@lower bound!!\n");
2299 } else {
2300 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
2301 }
2302 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2303 agc_cnt++;
2304 break;
2305
2306 case DPK_AGC_STEP_GL_LT_CRITERION:
2307 if (tmp_txagc == 0x3f) {
2308 goout = 1;
2309 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2310 "[DPK] Txagc@upper bound!!\n");
2311 } else {
2312 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
2313 }
2314 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2315 agc_cnt++;
2316 break;
2317 case DPK_AGC_STEP_SET_TX_GAIN:
2318 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
2319 goout = 1;
2320 agc_cnt++;
2321 break;
2322
2323 default:
2324 goout = 1;
2325 break;
2326 }
2327 } while (!goout && agc_cnt < 6 && limit-- > 0);
2328
2329 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2330 "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2331 tmp_rxbb);
2332
2333 return tmp_txagc;
2334 }
2335
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,u8 order)2336 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2337 {
2338 switch (order) {
2339 case 0:
2340 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2341 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2342 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2343 break;
2344 case 1:
2345 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2346 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2347 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2348 break;
2349 case 2:
2350 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2351 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2352 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2353 break;
2354 default:
2355 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2356 "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2357 break;
2358 }
2359
2360 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2361 "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2362 }
2363
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain)2364 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2365 enum rtw89_rf_path path, u8 kidx, u8 gain)
2366 {
2367 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2368
2369 if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2370 dpk->bp[path][kidx].band == RTW89_BAND_5G)
2371 _dpk_set_mdpd_para(rtwdev, 0x2);
2372 else
2373 _dpk_set_mdpd_para(rtwdev, 0x0);
2374
2375 _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2376 }
2377
_dpk_fill_result(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain,u8 txagc)2378 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2379 enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2380 {
2381 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2382 const u16 pwsf = 0x78;
2383 u8 gs = dpk->dpk_gs[phy];
2384
2385 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2386 B_COEF_SEL_MDPD, kidx);
2387
2388 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2389 "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2390 pwsf, gs);
2391
2392 dpk->bp[path][kidx].txagc_dpk = txagc;
2393 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2394 0x3F << ((gain << 3) + (kidx << 4)), txagc);
2395
2396 dpk->bp[path][kidx].pwsf = pwsf;
2397 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2398 0x1FF << (gain << 4), pwsf);
2399
2400 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2401 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2402
2403 dpk->bp[path][kidx].gs = gs;
2404 if (dpk->dpk_gs[phy] == 0x7f)
2405 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2406 MASKDWORD, 0x007f7f7f);
2407 else
2408 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2409 MASKDWORD, 0x005b5b5b);
2410
2411 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2412 B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
2413 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2414 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2415 }
2416
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2417 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2418 enum rtw89_rf_path path)
2419 {
2420 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2421 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2422 bool is_reload = false;
2423 u8 idx, cur_band, cur_ch;
2424
2425 cur_band = chan->band_type;
2426 cur_ch = chan->channel;
2427
2428 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2429 if (cur_band != dpk->bp[path][idx].band ||
2430 cur_ch != dpk->bp[path][idx].ch)
2431 continue;
2432
2433 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2434 B_COEF_SEL_MDPD, idx);
2435 dpk->cur_idx[path] = idx;
2436 is_reload = true;
2437 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2438 "[DPK] reload S%d[%d] success\n", path, idx);
2439 }
2440
2441 return is_reload;
2442 }
2443
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain)2444 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2445 enum rtw89_rf_path path, u8 gain)
2446 {
2447 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2448 u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2449 bool is_fail = false;
2450
2451 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2452 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2453
2454 _rfk_rf_direct_cntrl(rtwdev, path, false);
2455 _rfk_drf_direct_cntrl(rtwdev, path, false);
2456
2457 _dpk_kip_pwr_clk_on(rtwdev, path);
2458 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2459 _dpk_rf_setting(rtwdev, gain, path, kidx);
2460 _dpk_rx_dck(rtwdev, phy, path);
2461
2462 _dpk_kip_preset(rtwdev, phy, path, kidx);
2463 _dpk_kip_set_rxagc(rtwdev, phy, path);
2464 _dpk_table_select(rtwdev, path, kidx, gain);
2465
2466 txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2467 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
2468
2469 if (txagc == 0xff) {
2470 is_fail = true;
2471 } else {
2472 _dpk_get_thermal(rtwdev, kidx, path);
2473
2474 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2475
2476 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2477
2478 _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2479 }
2480
2481 if (!is_fail)
2482 dpk->bp[path][kidx].path_ok = true;
2483 else
2484 dpk->bp[path][kidx].path_ok = false;
2485
2486 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2487 is_fail ? "Check" : "Success");
2488
2489 return is_fail;
2490 }
2491
_dpk_cal_select(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy,u8 kpath)2492 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2493 enum rtw89_phy_idx phy, u8 kpath)
2494 {
2495 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2496 static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
2497 u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
2498 u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2499 u32 backup_bb_val[BACKUP_BB_REGS_NR];
2500 bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
2501 u8 path;
2502
2503 if (dpk->is_dpk_reload_en) {
2504 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2505 reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2506 if (!reloaded[path] && dpk->bp[path][0].ch)
2507 dpk->cur_idx[path] = !dpk->cur_idx[path];
2508 else
2509 _dpk_onoff(rtwdev, path, false);
2510 }
2511 } else {
2512 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
2513 dpk->cur_idx[path] = 0;
2514 }
2515
2516 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2517
2518 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2519 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2520 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2521 _dpk_information(rtwdev, phy, path);
2522 if (rtwdev->is_tssi_mode[path])
2523 _dpk_tssi_pause(rtwdev, path, true);
2524 }
2525
2526 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2527
2528 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2529 is_fail = _dpk_main(rtwdev, phy, path, 1);
2530 _dpk_onoff(rtwdev, path, is_fail);
2531 }
2532
2533 _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
2534 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2535
2536 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2537 _dpk_kip_restore(rtwdev, path);
2538 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2539 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2540 if (rtwdev->is_tssi_mode[path])
2541 _dpk_tssi_pause(rtwdev, path, false);
2542 }
2543 }
2544
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2545 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2546 {
2547 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2548 struct rtw89_fem_info *fem = &rtwdev->fem;
2549
2550 if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2551 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2552 "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2553 return true;
2554 } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2555 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2556 "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2557 return true;
2558 } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2559 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2560 "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2561 return true;
2562 }
2563
2564 return false;
2565 }
2566
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2567 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2568 {
2569 u8 path, kpath;
2570
2571 kpath = _kpath(rtwdev, phy);
2572
2573 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2574 if (kpath & BIT(path))
2575 _dpk_onoff(rtwdev, path, true);
2576 }
2577 }
2578
_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool force)2579 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2580 {
2581 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2582 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2583 RTW8852B_DPK_VER, rtwdev->hal.cv,
2584 RTW8852B_RF_REL_VERSION);
2585
2586 if (_dpk_bypass_check(rtwdev, phy))
2587 _dpk_force_bypass(rtwdev, phy);
2588 else
2589 _dpk_cal_select(rtwdev, force, phy, RF_AB);
2590 }
2591
_dpk_track(struct rtw89_dev * rtwdev)2592 static void _dpk_track(struct rtw89_dev *rtwdev)
2593 {
2594 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2595 s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2596 s8 delta_ther[2] = {};
2597 u8 trk_idx, txagc_rf;
2598 u8 path, kidx;
2599 u16 pwsf[2];
2600 u8 cur_ther;
2601 u32 tmp;
2602
2603 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2604 kidx = dpk->cur_idx[path];
2605
2606 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2607 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2608 path, kidx, dpk->bp[path][kidx].ch);
2609
2610 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2611
2612 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2613 "[DPK_TRK] thermal now = %d\n", cur_ther);
2614
2615 if (dpk->bp[path][kidx].ch && cur_ther)
2616 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2617
2618 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2619 delta_ther[path] = delta_ther[path] * 3 / 2;
2620 else
2621 delta_ther[path] = delta_ther[path] * 5 / 2;
2622
2623 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2624 0x0000003f);
2625
2626 if (rtwdev->is_tssi_mode[path]) {
2627 trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2628
2629 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2630 "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2631 txagc_rf, trk_idx);
2632
2633 txagc_bb =
2634 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2635 MASKBYTE2);
2636 txagc_bb_tp =
2637 rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2638 B_TXAGC_TP);
2639
2640 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2641 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2642 txagc_bb_tp, txagc_bb);
2643
2644 txagc_ofst =
2645 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2646 MASKBYTE3);
2647
2648 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2649 "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2650 txagc_ofst, delta_ther[path]);
2651 tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2652 B_DPD_COM_OF);
2653 if (tmp == 0x1) {
2654 txagc_ofst = 0;
2655 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2656 "[DPK_TRK] HW txagc offset mode\n");
2657 }
2658
2659 if (txagc_rf && cur_ther)
2660 ini_diff = txagc_ofst + (delta_ther[path]);
2661
2662 tmp = rtw89_phy_read32_mask(rtwdev,
2663 R_P0_TXDPD + (path << 13),
2664 B_P0_TXDPD);
2665 if (tmp == 0x0) {
2666 pwsf[0] = dpk->bp[path][kidx].pwsf +
2667 txagc_bb_tp - txagc_bb + ini_diff;
2668 pwsf[1] = dpk->bp[path][kidx].pwsf +
2669 txagc_bb_tp - txagc_bb + ini_diff;
2670 } else {
2671 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2672 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2673 }
2674
2675 } else {
2676 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2677 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2678 }
2679
2680 tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2681 if (!tmp && txagc_rf) {
2682 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2683 "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2684 pwsf[0], pwsf[1]);
2685
2686 rtw89_phy_write32_mask(rtwdev,
2687 R_DPD_BND + (path << 8) + (kidx << 2),
2688 B_DPD_BND_0, pwsf[0]);
2689 rtw89_phy_write32_mask(rtwdev,
2690 R_DPD_BND + (path << 8) + (kidx << 2),
2691 B_DPD_BND_1, pwsf[1]);
2692 }
2693 }
2694 }
2695
_set_dpd_backoff(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2696 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2697 {
2698 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2699 u8 tx_scale, ofdm_bkof, path, kpath;
2700
2701 kpath = _kpath(rtwdev, phy);
2702
2703 ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2704 tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2705
2706 if (ofdm_bkof + tx_scale >= 44) {
2707 /* move dpd backoff to bb, and set dpd backoff to 0 */
2708 dpk->dpk_gs[phy] = 0x7f;
2709 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2710 if (!(kpath & BIT(path)))
2711 continue;
2712
2713 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2714 B_DPD_CFG, 0x7f7f7f);
2715 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2716 "[RFK] Set S%d DPD backoff to 0dB\n", path);
2717 }
2718 } else {
2719 dpk->dpk_gs[phy] = 0x5b;
2720 }
2721 }
2722
_tssi_rf_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2723 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2724 enum rtw89_rf_path path)
2725 {
2726 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2727 enum rtw89_band band = chan->band_type;
2728
2729 if (band == RTW89_BAND_2G)
2730 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2731 else
2732 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2733 }
2734
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2735 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2736 enum rtw89_rf_path path)
2737 {
2738 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2739 enum rtw89_band band = chan->band_type;
2740
2741 rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
2742
2743 if (path == RF_PATH_A)
2744 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2745 &rtw8852b_tssi_sys_a_defs_2g_tbl,
2746 &rtw8852b_tssi_sys_a_defs_5g_tbl);
2747 else
2748 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2749 &rtw8852b_tssi_sys_b_defs_2g_tbl,
2750 &rtw8852b_tssi_sys_b_defs_5g_tbl);
2751 }
2752
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2753 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2754 enum rtw89_phy_idx phy,
2755 enum rtw89_rf_path path)
2756 {
2757 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2758 &rtw8852b_tssi_init_txpwr_defs_a_tbl,
2759 &rtw8852b_tssi_init_txpwr_defs_b_tbl);
2760 }
2761
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2762 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2763 enum rtw89_phy_idx phy,
2764 enum rtw89_rf_path path)
2765 {
2766 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2767 &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl,
2768 &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl);
2769 }
2770
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2771 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2772 enum rtw89_rf_path path)
2773 {
2774 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2775 &rtw8852b_tssi_dck_defs_a_tbl,
2776 &rtw8852b_tssi_dck_defs_b_tbl);
2777 }
2778
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2779 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2780 enum rtw89_rf_path path)
2781 {
2782 #define RTW8852B_TSSI_GET_VAL(ptr, idx) \
2783 ({ \
2784 s8 *__ptr = (ptr); \
2785 u8 __idx = (idx), __i, __v; \
2786 u32 __val = 0; \
2787 for (__i = 0; __i < 4; __i++) { \
2788 __v = (__ptr[__idx + __i]); \
2789 __val |= (__v << (8 * __i)); \
2790 } \
2791 __val; \
2792 })
2793 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2794 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2795 u8 ch = chan->channel;
2796 u8 subband = chan->subband_type;
2797 const s8 *thm_up_a = NULL;
2798 const s8 *thm_down_a = NULL;
2799 const s8 *thm_up_b = NULL;
2800 const s8 *thm_down_b = NULL;
2801 u8 thermal = 0xff;
2802 s8 thm_ofst[64] = {0};
2803 u32 tmp = 0;
2804 u8 i, j;
2805
2806 switch (subband) {
2807 default:
2808 case RTW89_CH_2G:
2809 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p;
2810 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n;
2811 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p;
2812 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n;
2813 break;
2814 case RTW89_CH_5G_BAND_1:
2815 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0];
2816 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0];
2817 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0];
2818 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0];
2819 break;
2820 case RTW89_CH_5G_BAND_3:
2821 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1];
2822 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1];
2823 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1];
2824 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1];
2825 break;
2826 case RTW89_CH_5G_BAND_4:
2827 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2];
2828 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2];
2829 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2];
2830 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2];
2831 break;
2832 }
2833
2834 if (path == RF_PATH_A) {
2835 thermal = tssi_info->thermal[RF_PATH_A];
2836
2837 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2838 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2839
2840 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2841 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2842
2843 if (thermal == 0xff) {
2844 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2845 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2846
2847 for (i = 0; i < 64; i += 4) {
2848 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2849
2850 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2851 "[TSSI] write 0x%x val=0x%08x\n",
2852 R_P0_TSSI_BASE + i, 0x0);
2853 }
2854
2855 } else {
2856 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2857 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2858 thermal);
2859
2860 i = 0;
2861 for (j = 0; j < 32; j++)
2862 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2863 -thm_down_a[i++] :
2864 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2865
2866 i = 1;
2867 for (j = 63; j >= 32; j--)
2868 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2869 thm_up_a[i++] :
2870 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2871
2872 for (i = 0; i < 64; i += 4) {
2873 tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2874 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2875
2876 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2877 "[TSSI] write 0x%x val=0x%08x\n",
2878 0x5c00 + i, tmp);
2879 }
2880 }
2881 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2882 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2883
2884 } else {
2885 thermal = tssi_info->thermal[RF_PATH_B];
2886
2887 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2888 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2889
2890 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2891 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2892
2893 if (thermal == 0xff) {
2894 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2895 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2896
2897 for (i = 0; i < 64; i += 4) {
2898 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2899
2900 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2901 "[TSSI] write 0x%x val=0x%08x\n",
2902 0x7c00 + i, 0x0);
2903 }
2904
2905 } else {
2906 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
2907 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
2908 thermal);
2909
2910 i = 0;
2911 for (j = 0; j < 32; j++)
2912 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2913 -thm_down_b[i++] :
2914 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
2915
2916 i = 1;
2917 for (j = 63; j >= 32; j--)
2918 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2919 thm_up_b[i++] :
2920 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
2921
2922 for (i = 0; i < 64; i += 4) {
2923 tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2924 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
2925
2926 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2927 "[TSSI] write 0x%x val=0x%08x\n",
2928 0x7c00 + i, tmp);
2929 }
2930 }
2931 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
2932 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
2933 }
2934 #undef RTW8852B_TSSI_GET_VAL
2935 }
2936
_tssi_set_dac_gain_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2937 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2938 enum rtw89_rf_path path)
2939 {
2940 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2941 &rtw8852b_tssi_dac_gain_defs_a_tbl,
2942 &rtw8852b_tssi_dac_gain_defs_b_tbl);
2943 }
2944
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2945 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2946 enum rtw89_rf_path path)
2947 {
2948 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2949 enum rtw89_band band = chan->band_type;
2950
2951 if (path == RF_PATH_A)
2952 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2953 &rtw8852b_tssi_slope_a_defs_2g_tbl,
2954 &rtw8852b_tssi_slope_a_defs_5g_tbl);
2955 else
2956 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2957 &rtw8852b_tssi_slope_b_defs_2g_tbl,
2958 &rtw8852b_tssi_slope_b_defs_5g_tbl);
2959 }
2960
_tssi_alignment_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,bool all)2961 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2962 enum rtw89_rf_path path, bool all)
2963 {
2964 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2965 enum rtw89_band band = chan->band_type;
2966 const struct rtw89_rfk_tbl *tbl = NULL;
2967 u8 ch = chan->channel;
2968
2969 if (path == RF_PATH_A) {
2970 if (band == RTW89_BAND_2G) {
2971 if (all)
2972 tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl;
2973 else
2974 tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl;
2975 } else if (ch >= 36 && ch <= 64) {
2976 if (all)
2977 tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl;
2978 else
2979 tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl;
2980 } else if (ch >= 100 && ch <= 144) {
2981 if (all)
2982 tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl;
2983 else
2984 tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl;
2985 } else if (ch >= 149 && ch <= 177) {
2986 if (all)
2987 tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl;
2988 else
2989 tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl;
2990 }
2991 } else {
2992 if (ch >= 1 && ch <= 14) {
2993 if (all)
2994 tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl;
2995 else
2996 tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl;
2997 } else if (ch >= 36 && ch <= 64) {
2998 if (all)
2999 tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl;
3000 else
3001 tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl;
3002 } else if (ch >= 100 && ch <= 144) {
3003 if (all)
3004 tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl;
3005 else
3006 tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl;
3007 } else if (ch >= 149 && ch <= 177) {
3008 if (all)
3009 tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl;
3010 else
3011 tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl;
3012 }
3013 }
3014
3015 if (tbl)
3016 rtw89_rfk_parser(rtwdev, tbl);
3017 }
3018
_tssi_set_tssi_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3019 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3020 enum rtw89_rf_path path)
3021 {
3022 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3023 &rtw8852b_tssi_slope_defs_a_tbl,
3024 &rtw8852b_tssi_slope_defs_b_tbl);
3025 }
3026
_tssi_set_tssi_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3027 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3028 enum rtw89_rf_path path)
3029 {
3030 if (path == RF_PATH_A)
3031 rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3032 else
3033 rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3034 }
3035
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3036 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3037 enum rtw89_phy_idx phy,
3038 enum rtw89_rf_path path)
3039 {
3040 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
3041 path);
3042
3043 if (path == RF_PATH_A)
3044 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010);
3045 else
3046 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010);
3047 }
3048
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3049 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3050 {
3051 u8 i;
3052
3053 for (i = 0; i < RF_PATH_NUM_8852B; i++) {
3054 _tssi_set_tssi_track(rtwdev, phy, i);
3055 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3056
3057 if (i == RF_PATH_A) {
3058 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3059 B_P0_TSSI_MV_CLR, 0x0);
3060 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3061 B_P0_TSSI_EN, 0x0);
3062 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3063 B_P0_TSSI_EN, 0x1);
3064 rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3065 RR_TXGA_V1_TRK_EN, 0x1);
3066 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3067 B_P0_TSSI_RFC, 0x3);
3068
3069 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3070 B_P0_TSSI_OFT, 0xc0);
3071 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3072 B_P0_TSSI_OFT_EN, 0x0);
3073 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3074 B_P0_TSSI_OFT_EN, 0x1);
3075
3076 rtwdev->is_tssi_mode[RF_PATH_A] = true;
3077 } else {
3078 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3079 B_P1_TSSI_MV_CLR, 0x0);
3080 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3081 B_P1_TSSI_EN, 0x0);
3082 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3083 B_P1_TSSI_EN, 0x1);
3084 rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3085 RR_TXGA_V1_TRK_EN, 0x1);
3086 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3087 B_P1_TSSI_RFC, 0x3);
3088
3089 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3090 B_P1_TSSI_OFT, 0xc0);
3091 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3092 B_P1_TSSI_OFT_EN, 0x0);
3093 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3094 B_P1_TSSI_OFT_EN, 0x1);
3095
3096 rtwdev->is_tssi_mode[RF_PATH_B] = true;
3097 }
3098 }
3099 }
3100
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3101 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3102 {
3103 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3104 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3105 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3106 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3107 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3108 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3109
3110 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3111 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3112 }
3113
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3114 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3115 {
3116 switch (ch) {
3117 case 1 ... 2:
3118 return 0;
3119 case 3 ... 5:
3120 return 1;
3121 case 6 ... 8:
3122 return 2;
3123 case 9 ... 11:
3124 return 3;
3125 case 12 ... 13:
3126 return 4;
3127 case 14:
3128 return 5;
3129 }
3130
3131 return 0;
3132 }
3133
3134 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3135 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3136 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3137 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3138 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3139
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3140 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3141 {
3142 switch (ch) {
3143 case 1 ... 2:
3144 return 0;
3145 case 3 ... 5:
3146 return 1;
3147 case 6 ... 8:
3148 return 2;
3149 case 9 ... 11:
3150 return 3;
3151 case 12 ... 14:
3152 return 4;
3153 case 36 ... 40:
3154 return 5;
3155 case 41 ... 43:
3156 return TSSI_EXTRA_GROUP(5);
3157 case 44 ... 48:
3158 return 6;
3159 case 49 ... 51:
3160 return TSSI_EXTRA_GROUP(6);
3161 case 52 ... 56:
3162 return 7;
3163 case 57 ... 59:
3164 return TSSI_EXTRA_GROUP(7);
3165 case 60 ... 64:
3166 return 8;
3167 case 100 ... 104:
3168 return 9;
3169 case 105 ... 107:
3170 return TSSI_EXTRA_GROUP(9);
3171 case 108 ... 112:
3172 return 10;
3173 case 113 ... 115:
3174 return TSSI_EXTRA_GROUP(10);
3175 case 116 ... 120:
3176 return 11;
3177 case 121 ... 123:
3178 return TSSI_EXTRA_GROUP(11);
3179 case 124 ... 128:
3180 return 12;
3181 case 129 ... 131:
3182 return TSSI_EXTRA_GROUP(12);
3183 case 132 ... 136:
3184 return 13;
3185 case 137 ... 139:
3186 return TSSI_EXTRA_GROUP(13);
3187 case 140 ... 144:
3188 return 14;
3189 case 149 ... 153:
3190 return 15;
3191 case 154 ... 156:
3192 return TSSI_EXTRA_GROUP(15);
3193 case 157 ... 161:
3194 return 16;
3195 case 162 ... 164:
3196 return TSSI_EXTRA_GROUP(16);
3197 case 165 ... 169:
3198 return 17;
3199 case 170 ... 172:
3200 return TSSI_EXTRA_GROUP(17);
3201 case 173 ... 177:
3202 return 18;
3203 }
3204
3205 return 0;
3206 }
3207
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3208 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3209 {
3210 switch (ch) {
3211 case 1 ... 8:
3212 return 0;
3213 case 9 ... 14:
3214 return 1;
3215 case 36 ... 48:
3216 return 2;
3217 case 52 ... 64:
3218 return 3;
3219 case 100 ... 112:
3220 return 4;
3221 case 116 ... 128:
3222 return 5;
3223 case 132 ... 144:
3224 return 6;
3225 case 149 ... 177:
3226 return 7;
3227 }
3228
3229 return 0;
3230 }
3231
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3232 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3233 enum rtw89_rf_path path)
3234 {
3235 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3236 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3237 u8 ch = chan->channel;
3238 u32 gidx, gidx_1st, gidx_2nd;
3239 s8 de_1st;
3240 s8 de_2nd;
3241 s8 val;
3242
3243 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3244
3245 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3246 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3247
3248 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3249 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3250 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3251 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3252 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3253 val = (de_1st + de_2nd) / 2;
3254
3255 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3256 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3257 path, val, de_1st, de_2nd);
3258 } else {
3259 val = tssi_info->tssi_mcs[path][gidx];
3260
3261 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3262 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3263 }
3264
3265 return val;
3266 }
3267
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3268 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3269 enum rtw89_rf_path path)
3270 {
3271 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3272 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3273 u8 ch = chan->channel;
3274 u32 tgidx, tgidx_1st, tgidx_2nd;
3275 s8 tde_1st;
3276 s8 tde_2nd;
3277 s8 val;
3278
3279 tgidx = _tssi_get_trim_group(rtwdev, ch);
3280
3281 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3282 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3283 path, tgidx);
3284
3285 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3286 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3287 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3288 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3289 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3290 val = (tde_1st + tde_2nd) / 2;
3291
3292 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3293 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3294 path, val, tde_1st, tde_2nd);
3295 } else {
3296 val = tssi_info->tssi_trim[path][tgidx];
3297
3298 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3299 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3300 path, val);
3301 }
3302
3303 return val;
3304 }
3305
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3306 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3307 {
3308 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3309 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3310 u8 ch = chan->channel;
3311 u8 gidx;
3312 s8 ofdm_de;
3313 s8 trim_de;
3314 s32 val;
3315 u32 i;
3316
3317 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3318 phy, ch);
3319
3320 for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3321 gidx = _tssi_get_cck_group(rtwdev, ch);
3322 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3323 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3324
3325 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3326 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3327 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3328
3329 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3330 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3331
3332 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3333 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3334 _tssi_de_cck_long[i],
3335 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3336 _TSSI_DE_MASK));
3337
3338 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3339 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3340 val = ofdm_de + trim_de;
3341
3342 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3343 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3344 i, ofdm_de, trim_de);
3345
3346 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3347 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3348 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3349 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3350 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3351 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3352
3353 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3354 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3355 _tssi_de_mcs_20m[i],
3356 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3357 _TSSI_DE_MASK));
3358 }
3359 }
3360
_tssi_alimentk_dump_result(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)3361 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3362 {
3363 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3364 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3365 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3366 R_TSSI_PA_K1 + (path << 13),
3367 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD),
3368 R_TSSI_PA_K2 + (path << 13),
3369 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD),
3370 R_P0_TSSI_ALIM1 + (path << 13),
3371 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD),
3372 R_P0_TSSI_ALIM3 + (path << 13),
3373 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD),
3374 R_TSSI_PA_K5 + (path << 13),
3375 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD),
3376 R_P0_TSSI_ALIM2 + (path << 13),
3377 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD),
3378 R_P0_TSSI_ALIM4 + (path << 13),
3379 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD),
3380 R_TSSI_PA_K8 + (path << 13),
3381 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD));
3382 }
3383
_tssi_alimentk_done(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3384 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3385 enum rtw89_phy_idx phy, enum rtw89_rf_path path)
3386 {
3387 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3388 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3389 u8 channel = chan->channel;
3390 u8 band;
3391
3392 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3393 "======>%s phy=%d path=%d\n", __func__, phy, path);
3394
3395 if (channel >= 1 && channel <= 14)
3396 band = TSSI_ALIMK_2G;
3397 else if (channel >= 36 && channel <= 64)
3398 band = TSSI_ALIMK_5GL;
3399 else if (channel >= 100 && channel <= 144)
3400 band = TSSI_ALIMK_5GM;
3401 else if (channel >= 149 && channel <= 177)
3402 band = TSSI_ALIMK_5GH;
3403 else
3404 band = TSSI_ALIMK_2G;
3405
3406 if (tssi_info->alignment_done[path][band]) {
3407 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3408 tssi_info->alignment_value[path][band][0]);
3409 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3410 tssi_info->alignment_value[path][band][1]);
3411 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3412 tssi_info->alignment_value[path][band][2]);
3413 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3414 tssi_info->alignment_value[path][band][3]);
3415 }
3416
3417 _tssi_alimentk_dump_result(rtwdev, path);
3418 }
3419
_tssi_hw_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u16 cnt,u16 period,s16 pwr_dbm,u8 enable)3420 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3421 enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3422 u8 enable)
3423 {
3424 enum rtw89_rf_path_bit rx_path;
3425
3426 if (path == RF_PATH_A)
3427 rx_path = RF_A;
3428 else if (path == RF_PATH_B)
3429 rx_path = RF_B;
3430 else if (path == RF_PATH_AB)
3431 rx_path = RF_AB;
3432 else
3433 rx_path = RF_ABCD; /* don't change path, but still set others */
3434
3435 if (enable) {
3436 rtw8852b_bb_set_plcp_tx(rtwdev);
3437 rtw8852b_bb_cfg_tx_path(rtwdev, path);
3438 rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
3439 rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy);
3440 }
3441
3442 rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
3443 }
3444
_tssi_backup_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3445 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3446 enum rtw89_phy_idx phy, const u32 reg[],
3447 u32 reg_backup[], u32 reg_num)
3448 {
3449 u32 i;
3450
3451 for (i = 0; i < reg_num; i++) {
3452 reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3453
3454 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3455 "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3456 reg_backup[i]);
3457 }
3458 }
3459
_tssi_reload_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3460 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3461 enum rtw89_phy_idx phy, const u32 reg[],
3462 u32 reg_backup[], u32 reg_num)
3463
3464 {
3465 u32 i;
3466
3467 for (i = 0; i < reg_num; i++) {
3468 rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3469
3470 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3471 "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3472 reg_backup[i]);
3473 }
3474 }
3475
_tssi_ch_to_idx(struct rtw89_dev * rtwdev,u8 channel)3476 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3477 {
3478 u8 channel_index;
3479
3480 if (channel >= 1 && channel <= 14)
3481 channel_index = channel - 1;
3482 else if (channel >= 36 && channel <= 64)
3483 channel_index = (channel - 36) / 2 + 14;
3484 else if (channel >= 100 && channel <= 144)
3485 channel_index = ((channel - 100) / 2) + 15 + 14;
3486 else if (channel >= 149 && channel <= 177)
3487 channel_index = ((channel - 149) / 2) + 38 + 14;
3488 else
3489 channel_index = 0;
3490
3491 return channel_index;
3492 }
3493
_tssi_get_cw_report(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const s16 * power,u32 * tssi_cw_rpt)3494 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3495 enum rtw89_rf_path path, const s16 *power,
3496 u32 *tssi_cw_rpt)
3497 {
3498 u32 tx_counter, tx_counter_tmp;
3499 const int retry = 100;
3500 u32 tmp;
3501 int j, k;
3502
3503 for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3504 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3505 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3506
3507 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3508
3509 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3510 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3511 "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
3512 _tssi_trigger[path], tmp, path);
3513
3514 if (j == 0)
3515 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
3516 else
3517 _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
3518
3519 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3520 tx_counter_tmp -= tx_counter;
3521
3522 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3523 "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3524 tx_counter_tmp, path);
3525
3526 for (k = 0; k < retry; k++) {
3527 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3528 B_TSSI_CWRPT_RDY);
3529 if (tmp)
3530 break;
3531
3532 udelay(30);
3533
3534 tx_counter_tmp =
3535 rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3536 tx_counter_tmp -= tx_counter;
3537
3538 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3539 "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3540 k, tx_counter_tmp, path);
3541 }
3542
3543 if (k >= retry) {
3544 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3545 "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3546 k, path);
3547
3548 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
3549 return false;
3550 }
3551
3552 tssi_cw_rpt[j] =
3553 rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
3554
3555 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
3556
3557 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3558 tx_counter_tmp -= tx_counter;
3559
3560 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3561 "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3562 tx_counter_tmp, path);
3563 }
3564
3565 return true;
3566 }
3567
_tssi_alimentk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3568 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3569 enum rtw89_rf_path path)
3570 {
3571 static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3572 0x78e4, 0x49c0, 0x0d18, 0x0d80};
3573 static const s16 power_2g[4] = {48, 20, 4, 4};
3574 static const s16 power_5g[4] = {48, 20, 4, 4};
3575 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3576 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3577 s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3578 u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
3579 u8 channel = chan->channel;
3580 u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3581 struct rtw8852b_bb_tssi_bak tssi_bak;
3582 s32 aliment_diff, tssi_cw_default;
3583 u32 start_time, finish_time;
3584 u32 bb_reg_backup[8] = {0};
3585 const s16 *power;
3586 u8 band;
3587 bool ok;
3588 u32 tmp;
3589 u8 j;
3590
3591 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3592 "======> %s channel=%d path=%d\n", __func__, channel,
3593 path);
3594
3595 if (tssi_info->check_backup_aligmk[path][ch_idx]) {
3596 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3597 tssi_info->alignment_backup_by_ch[path][ch_idx][0]);
3598 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3599 tssi_info->alignment_backup_by_ch[path][ch_idx][1]);
3600 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3601 tssi_info->alignment_backup_by_ch[path][ch_idx][2]);
3602 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3603 tssi_info->alignment_backup_by_ch[path][ch_idx][3]);
3604
3605 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3606 "======> %s Reload TSSI Alignment !!!\n", __func__);
3607 _tssi_alimentk_dump_result(rtwdev, path);
3608 return;
3609 }
3610
3611 start_time = ktime_get_ns();
3612
3613 if (chan->band_type == RTW89_BAND_2G)
3614 power = power_2g;
3615 else
3616 power = power_5g;
3617
3618 if (channel >= 1 && channel <= 14)
3619 band = TSSI_ALIMK_2G;
3620 else if (channel >= 36 && channel <= 64)
3621 band = TSSI_ALIMK_5GL;
3622 else if (channel >= 100 && channel <= 144)
3623 band = TSSI_ALIMK_5GM;
3624 else if (channel >= 149 && channel <= 177)
3625 band = TSSI_ALIMK_5GH;
3626 else
3627 band = TSSI_ALIMK_2G;
3628
3629 rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3630 _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3631
3632 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3633 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3634 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3635 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3636
3637 ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
3638 if (!ok)
3639 goto out;
3640
3641 for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3642 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3643 "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
3644 power[j], j, tssi_cw_rpt[j]);
3645 }
3646
3647 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3648 _tssi_cw_default_mask[1]);
3649 tssi_cw_default = sign_extend32(tmp, 8);
3650 tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3651 tssi_cw_rpt[1] + tssi_cw_default;
3652 aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3653
3654 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3655 _tssi_cw_default_mask[2]);
3656 tssi_cw_default = sign_extend32(tmp, 8);
3657 tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3658
3659 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3660 _tssi_cw_default_mask[3]);
3661 tssi_cw_default = sign_extend32(tmp, 8);
3662 tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3663
3664 if (path == RF_PATH_A) {
3665 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3666 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3667 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3668
3669 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3670 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3671
3672 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3673 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3674 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3675 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3676 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3677 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3678 } else {
3679 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3680 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3681 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3682
3683 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3684 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3685
3686 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3687 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3688 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3689 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3690 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3691 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3692 }
3693
3694 tssi_info->alignment_done[path][band] = true;
3695 tssi_info->alignment_value[path][band][0] =
3696 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3697 tssi_info->alignment_value[path][band][1] =
3698 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3699 tssi_info->alignment_value[path][band][2] =
3700 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3701 tssi_info->alignment_value[path][band][3] =
3702 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3703
3704 tssi_info->check_backup_aligmk[path][ch_idx] = true;
3705 tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3706 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3707 tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3708 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3709 tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3710 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3711 tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3712 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3713
3714 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3715 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3716 path, band, R_P0_TSSI_ALIM1 + (path << 13),
3717 tssi_info->alignment_value[path][band][0]);
3718 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3719 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3720 path, band, R_P0_TSSI_ALIM3 + (path << 13),
3721 tssi_info->alignment_value[path][band][1]);
3722 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3723 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3724 path, band, R_P0_TSSI_ALIM2 + (path << 13),
3725 tssi_info->alignment_value[path][band][2]);
3726 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3727 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3728 path, band, R_P0_TSSI_ALIM4 + (path << 13),
3729 tssi_info->alignment_value[path][band][3]);
3730
3731 out:
3732 _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3733 rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3734 rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0);
3735
3736 finish_time = ktime_get_ns();
3737 tssi_info->tssi_alimk_time += finish_time - start_time;
3738
3739 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3740 "[TSSI PA K] %s processing time = %d ms\n", __func__,
3741 tssi_info->tssi_alimk_time);
3742 }
3743
rtw8852b_dpk_init(struct rtw89_dev * rtwdev)3744 void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
3745 {
3746 _set_dpd_backoff(rtwdev, RTW89_PHY_0);
3747 }
3748
rtw8852b_rck(struct rtw89_dev * rtwdev)3749 void rtw8852b_rck(struct rtw89_dev *rtwdev)
3750 {
3751 u8 path;
3752
3753 for (path = 0; path < RF_PATH_NUM_8852B; path++)
3754 _rck(rtwdev, path);
3755 }
3756
rtw8852b_dack(struct rtw89_dev * rtwdev)3757 void rtw8852b_dack(struct rtw89_dev *rtwdev)
3758 {
3759 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3760
3761 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3762 _dac_cal(rtwdev, false);
3763 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3764 }
3765
rtw8852b_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3766 void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3767 {
3768 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3769 u32 tx_en;
3770
3771 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3772 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3773 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3774
3775 _iqk_init(rtwdev);
3776 _iqk(rtwdev, phy_idx, false);
3777
3778 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3779 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3780 }
3781
rtw8852b_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3782 void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3783 {
3784 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3785 u32 tx_en;
3786
3787 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3788 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3789 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3790
3791 _rx_dck(rtwdev, phy_idx);
3792
3793 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3794 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3795 }
3796
rtw8852b_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3797 void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3798 {
3799 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3800 u32 tx_en;
3801
3802 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3803 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3804 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3805
3806 rtwdev->dpk.is_dpk_enable = true;
3807 rtwdev->dpk.is_dpk_reload_en = false;
3808 _dpk(rtwdev, phy_idx, false);
3809
3810 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3811 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3812 }
3813
rtw8852b_dpk_track(struct rtw89_dev * rtwdev)3814 void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
3815 {
3816 _dpk_track(rtwdev);
3817 }
3818
rtw8852b_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool hwtx_en)3819 void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
3820 {
3821 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
3822 u32 tx_en;
3823 u8 i;
3824
3825 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3826 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3827
3828 _tssi_disable(rtwdev, phy);
3829
3830 for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3831 _tssi_rf_setting(rtwdev, phy, i);
3832 _tssi_set_sys(rtwdev, phy, i);
3833 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3834 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3835 _tssi_set_dck(rtwdev, phy, i);
3836 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3837 _tssi_set_dac_gain_tbl(rtwdev, phy, i);
3838 _tssi_slope_cal_org(rtwdev, phy, i);
3839 _tssi_alignment_default(rtwdev, phy, i, true);
3840 _tssi_set_tssi_slope(rtwdev, phy, i);
3841
3842 rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3843 _tmac_tx_pause(rtwdev, phy, true);
3844 if (hwtx_en)
3845 _tssi_alimentk(rtwdev, phy, i);
3846 _tmac_tx_pause(rtwdev, phy, false);
3847 rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3848 }
3849
3850 _tssi_enable(rtwdev, phy);
3851 _tssi_set_efuse_to_de(rtwdev, phy);
3852
3853 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3854 }
3855
rtw8852b_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3856 void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3857 {
3858 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3859 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3860 u8 channel = chan->channel;
3861 u8 band;
3862 u32 i;
3863
3864 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3865 "======>%s phy=%d channel=%d\n", __func__, phy, channel);
3866
3867 if (channel >= 1 && channel <= 14)
3868 band = TSSI_ALIMK_2G;
3869 else if (channel >= 36 && channel <= 64)
3870 band = TSSI_ALIMK_5GL;
3871 else if (channel >= 100 && channel <= 144)
3872 band = TSSI_ALIMK_5GM;
3873 else if (channel >= 149 && channel <= 177)
3874 band = TSSI_ALIMK_5GH;
3875 else
3876 band = TSSI_ALIMK_2G;
3877
3878 _tssi_disable(rtwdev, phy);
3879
3880 for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
3881 _tssi_rf_setting(rtwdev, phy, i);
3882 _tssi_set_sys(rtwdev, phy, i);
3883 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3884
3885 if (tssi_info->alignment_done[i][band])
3886 _tssi_alimentk_done(rtwdev, phy, i);
3887 else
3888 _tssi_alignment_default(rtwdev, phy, i, true);
3889 }
3890
3891 _tssi_enable(rtwdev, phy);
3892 _tssi_set_efuse_to_de(rtwdev, phy);
3893 }
3894
rtw8852b_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable)3895 static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
3896 enum rtw89_phy_idx phy, bool enable)
3897 {
3898 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3899 u8 channel = chan->channel;
3900
3901 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
3902 __func__, channel);
3903
3904 if (enable) {
3905 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3906 rtw8852b_tssi(rtwdev, phy, true);
3907 return;
3908 }
3909
3910 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3911 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3912 __func__,
3913 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3914 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3915
3916 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
3917 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
3918 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3919 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3920 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3921 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3922
3923 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
3924 _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
3925
3926 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3927 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3928 __func__,
3929 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3930 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3931
3932 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3933 "======> %s SCAN_END\n", __func__);
3934 }
3935
rtw8852b_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx)3936 void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
3937 enum rtw89_phy_idx phy_idx)
3938 {
3939 if (scan_start)
3940 rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
3941 else
3942 rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
3943 }
3944
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool dav)3945 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3946 enum rtw89_bandwidth bw, bool dav)
3947 {
3948 u32 rf_reg18;
3949 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
3950
3951 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
3952
3953 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
3954 if (rf_reg18 == INV_RF_DATA) {
3955 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3956 "[RFK]Invalid RF_0x18 for Path-%d\n", path);
3957 return;
3958 }
3959 rf_reg18 &= ~RR_CFGCH_BW;
3960
3961 switch (bw) {
3962 case RTW89_CHANNEL_WIDTH_5:
3963 case RTW89_CHANNEL_WIDTH_10:
3964 case RTW89_CHANNEL_WIDTH_20:
3965 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3966 break;
3967 case RTW89_CHANNEL_WIDTH_40:
3968 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3969 break;
3970 case RTW89_CHANNEL_WIDTH_80:
3971 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3972 break;
3973 default:
3974 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
3975 }
3976
3977 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
3978 RR_CFGCH_BW2) & RFREG_MASK;
3979 rf_reg18 |= RR_CFGCH_BW2;
3980 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
3981
3982 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
3983 bw, path, reg18_addr,
3984 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
3985 }
3986
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3987 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3988 enum rtw89_bandwidth bw)
3989 {
3990 _bw_setting(rtwdev, RF_PATH_A, bw, true);
3991 _bw_setting(rtwdev, RF_PATH_B, bw, true);
3992 _bw_setting(rtwdev, RF_PATH_A, bw, false);
3993 _bw_setting(rtwdev, RF_PATH_B, bw, false);
3994 }
3995
_set_s0_arfc18(struct rtw89_dev * rtwdev,u32 val)3996 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
3997 {
3998 u32 bak;
3999 u32 tmp;
4000 int ret;
4001
4002 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
4003 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
4004 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
4005
4006 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4007 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4008 if (ret)
4009 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
4010
4011 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
4012
4013 return !!ret;
4014 }
4015
_lck_check(struct rtw89_dev * rtwdev)4016 static void _lck_check(struct rtw89_dev *rtwdev)
4017 {
4018 u32 tmp;
4019
4020 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4021 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
4022
4023 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
4024 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
4025 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
4026 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
4027 }
4028
4029 udelay(10);
4030
4031 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4032 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
4033
4034 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4035 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4036 _set_s0_arfc18(rtwdev, tmp);
4037 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4038 }
4039
4040 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4041 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
4042
4043 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
4044 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
4045 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
4046 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
4047
4048 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
4049 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
4050 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
4051 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
4052
4053 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4054 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4055 _set_s0_arfc18(rtwdev, tmp);
4056 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4057
4058 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
4059 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
4060 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
4061 }
4062 }
4063
_set_ch(struct rtw89_dev * rtwdev,u32 val)4064 static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4065 {
4066 bool timeout;
4067
4068 timeout = _set_s0_arfc18(rtwdev, val);
4069 if (!timeout)
4070 _lck_check(rtwdev);
4071 }
4072
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,bool dav)4073 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4074 u8 central_ch, bool dav)
4075 {
4076 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4077 bool is_2g_ch = central_ch <= 14;
4078 u32 rf_reg18;
4079
4080 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4081
4082 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4083 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4084 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4085 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4086
4087 if (!is_2g_ch)
4088 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4089 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4090
4091 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4092 RR_CFGCH_BW2) & RFREG_MASK;
4093 rf_reg18 |= RR_CFGCH_BW2;
4094
4095 if (path == RF_PATH_A && dav)
4096 _set_ch(rtwdev, rf_reg18);
4097 else
4098 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4099
4100 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
4101 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
4102
4103 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4104 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4105 central_ch, path, reg18_addr,
4106 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4107 }
4108
_ctrl_ch(struct rtw89_dev * rtwdev,u8 central_ch)4109 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4110 {
4111 _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
4112 _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
4113 _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
4114 _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
4115 }
4116
_set_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_bandwidth bw,enum rtw89_rf_path path)4117 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4118 enum rtw89_rf_path path)
4119 {
4120 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4121 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
4122
4123 if (bw == RTW89_CHANNEL_WIDTH_20)
4124 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
4125 else if (bw == RTW89_CHANNEL_WIDTH_40)
4126 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
4127 else if (bw == RTW89_CHANNEL_WIDTH_80)
4128 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
4129 else
4130 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
4131
4132 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
4133 rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
4134
4135 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4136 }
4137
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4138 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4139 enum rtw89_bandwidth bw)
4140 {
4141 u8 kpath, path;
4142
4143 kpath = _kpath(rtwdev, phy);
4144
4145 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
4146 if (!(kpath & BIT(path)))
4147 continue;
4148
4149 _set_rxbb_bw(rtwdev, bw, path);
4150 }
4151 }
4152
rtw8852b_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)4153 static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4154 enum rtw89_phy_idx phy, u8 central_ch,
4155 enum rtw89_band band, enum rtw89_bandwidth bw)
4156 {
4157 _ctrl_ch(rtwdev, central_ch);
4158 _ctrl_bw(rtwdev, phy, bw);
4159 _rxbb_bw(rtwdev, phy, bw);
4160 }
4161
rtw8852b_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)4162 void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
4163 const struct rtw89_chan *chan,
4164 enum rtw89_phy_idx phy_idx)
4165 {
4166 rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
4167 chan->band_width);
4168 }
4169