1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include "cam.h"
6 #include "coex.h"
7 #include "debug.h"
8 #include "fw.h"
9 #include "mac.h"
10 #include "phy.h"
11 #include "reg.h"
12
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)13 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
14 bool header)
15 {
16 struct sk_buff *skb;
17 u32 header_len = 0;
18 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
19
20 if (header)
21 header_len = H2C_HEADER_LEN;
22
23 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
24 if (!skb)
25 return NULL;
26 skb_reserve(skb, header_len + h2c_desc_size);
27 memset(skb->data, 0, len);
28
29 return skb;
30 }
31
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)32 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
33 {
34 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
35 }
36
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)37 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
38 {
39 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
40 }
41
_fw_get_rdy(struct rtw89_dev * rtwdev)42 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
43 {
44 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
45
46 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
47 }
48
49 #define FWDL_WAIT_CNT 400000
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev)50 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
51 {
52 u8 val;
53 int ret;
54
55 ret = read_poll_timeout_atomic(_fw_get_rdy, val,
56 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
57 1, FWDL_WAIT_CNT, false, rtwdev);
58 if (ret) {
59 switch (val) {
60 case RTW89_FWDL_CHECKSUM_FAIL:
61 rtw89_err(rtwdev, "fw checksum fail\n");
62 return -EINVAL;
63
64 case RTW89_FWDL_SECURITY_FAIL:
65 rtw89_err(rtwdev, "fw security fail\n");
66 return -EINVAL;
67
68 case RTW89_FWDL_CV_NOT_MATCH:
69 rtw89_err(rtwdev, "fw cv not match\n");
70 return -EINVAL;
71
72 default:
73 return -EBUSY;
74 }
75 }
76
77 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
78
79 return 0;
80 }
81
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)82 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
83 struct rtw89_fw_bin_info *info)
84 {
85 struct rtw89_fw_hdr_section_info *section_info;
86 const u8 *fw_end = fw + len;
87 const u8 *bin;
88 u32 i;
89
90 if (!info)
91 return -EINVAL;
92
93 info->section_num = GET_FW_HDR_SEC_NUM(fw);
94 info->hdr_len = RTW89_FW_HDR_SIZE +
95 info->section_num * RTW89_FW_SECTION_HDR_SIZE;
96
97 bin = fw + info->hdr_len;
98
99 /* jump to section header */
100 fw += RTW89_FW_HDR_SIZE;
101 section_info = info->section_info;
102 for (i = 0; i < info->section_num; i++) {
103 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
104 if (GET_FWSECTION_HDR_CHECKSUM(fw))
105 section_info->len += FWDL_SECTION_CHKSUM_LEN;
106 section_info->redl = GET_FWSECTION_HDR_REDL(fw);
107 section_info->dladdr =
108 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff;
109 section_info->addr = bin;
110 bin += section_info->len;
111 fw += RTW89_FW_SECTION_HDR_SIZE;
112 section_info++;
113 }
114
115 if (fw_end != bin) {
116 rtw89_err(rtwdev, "[ERR]fw bin size\n");
117 return -EINVAL;
118 }
119
120 return 0;
121 }
122
123 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)124 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
125 struct rtw89_fw_suit *fw_suit)
126 {
127 struct rtw89_fw_info *fw_info = &rtwdev->fw;
128 const u8 *mfw = fw_info->firmware->data;
129 u32 mfw_len = fw_info->firmware->size;
130 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
131 const struct rtw89_mfw_info *mfw_info;
132 int i;
133
134 if (mfw_hdr->sig != RTW89_MFW_SIG) {
135 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
136 /* legacy firmware support normal type only */
137 if (type != RTW89_FW_NORMAL)
138 return -EINVAL;
139 fw_suit->data = mfw;
140 fw_suit->size = mfw_len;
141 return 0;
142 }
143
144 for (i = 0; i < mfw_hdr->fw_nr; i++) {
145 mfw_info = &mfw_hdr->info[i];
146 if (mfw_info->cv != rtwdev->hal.cv ||
147 mfw_info->type != type ||
148 mfw_info->mp)
149 continue;
150
151 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
152 fw_suit->size = le32_to_cpu(mfw_info->size);
153 return 0;
154 }
155
156 rtw89_err(rtwdev, "no suitable firmware found\n");
157 return -ENOENT;
158 }
159
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)160 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
161 enum rtw89_fw_type type,
162 struct rtw89_fw_suit *fw_suit)
163 {
164 const u8 *hdr = fw_suit->data;
165
166 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr);
167 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr);
168 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr);
169 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr);
170 fw_suit->build_year = GET_FW_HDR_YEAR(hdr);
171 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr);
172 fw_suit->build_date = GET_FW_HDR_DATE(hdr);
173 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr);
174 fw_suit->build_min = GET_FW_HDR_MIN(hdr);
175 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr);
176
177 rtw89_info(rtwdev,
178 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n",
179 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
180 fw_suit->sub_idex, fw_suit->cmd_ver, type);
181 }
182
183 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type)184 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
185 {
186 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
187 int ret;
188
189 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
190 if (ret)
191 return ret;
192
193 rtw89_fw_update_ver(rtwdev, type, fw_suit);
194
195 return 0;
196 }
197
198 #define __DEF_FW_FEAT_COND(__cond, __op) \
199 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
200 { \
201 return suit_ver_code __op comp_ver_code; \
202 }
203
204 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
205 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
206
207 struct __fw_feat_cfg {
208 enum rtw89_core_chip_id chip_id;
209 enum rtw89_fw_feature feature;
210 u32 ver_code;
211 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
212 };
213
214 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
215 { \
216 .chip_id = _chip, \
217 .feature = RTW89_FW_FEATURE_ ## _feat, \
218 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
219 .cond = __fw_feat_cond_ ## _cond, \
220 }
221
222 static const struct __fw_feat_cfg fw_feat_tbl[] = {
223 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
224 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
225 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
226 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
227 };
228
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)229 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
230 {
231 const struct rtw89_chip_info *chip = rtwdev->chip;
232 const struct __fw_feat_cfg *ent;
233 const struct rtw89_fw_suit *fw_suit;
234 u32 suit_ver_code;
235 int i;
236
237 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
238 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
239
240 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
241 ent = &fw_feat_tbl[i];
242 if (chip->chip_id != ent->chip_id)
243 continue;
244
245 if (ent->cond(suit_ver_code, ent->ver_code))
246 RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw);
247 }
248 }
249
rtw89_fw_recognize(struct rtw89_dev * rtwdev)250 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
251 {
252 int ret;
253
254 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
255 if (ret)
256 return ret;
257
258 /* It still works if wowlan firmware isn't existing. */
259 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
260
261 rtw89_fw_recognize_features(rtwdev);
262
263 return 0;
264 }
265
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)266 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
267 u8 type, u8 cat, u8 class, u8 func,
268 bool rack, bool dack, u32 len)
269 {
270 struct fwcmd_hdr *hdr;
271
272 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
273
274 if (!(rtwdev->fw.h2c_seq % 4))
275 rack = true;
276 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
277 FIELD_PREP(H2C_HDR_CAT, cat) |
278 FIELD_PREP(H2C_HDR_CLASS, class) |
279 FIELD_PREP(H2C_HDR_FUNC, func) |
280 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
281
282 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
283 len + H2C_HEADER_LEN) |
284 (rack ? H2C_HDR_REC_ACK : 0) |
285 (dack ? H2C_HDR_DONE_ACK : 0));
286
287 rtwdev->fw.h2c_seq++;
288 }
289
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)290 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
291 struct sk_buff *skb,
292 u8 type, u8 cat, u8 class, u8 func,
293 u32 len)
294 {
295 struct fwcmd_hdr *hdr;
296
297 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
298
299 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
300 FIELD_PREP(H2C_HDR_CAT, cat) |
301 FIELD_PREP(H2C_HDR_CLASS, class) |
302 FIELD_PREP(H2C_HDR_FUNC, func) |
303 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
304
305 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
306 len + H2C_HEADER_LEN));
307 }
308
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const u8 * fw,u32 len)309 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
310 {
311 struct sk_buff *skb;
312 u32 ret = 0;
313
314 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
315 if (!skb) {
316 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
317 return -ENOMEM;
318 }
319
320 skb_put_data(skb, fw, len);
321 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
322 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
323 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
324 H2C_FUNC_MAC_FWHDR_DL, len);
325
326 ret = rtw89_h2c_tx(rtwdev, skb, false);
327 if (ret) {
328 rtw89_err(rtwdev, "failed to send h2c\n");
329 ret = -1;
330 goto fail;
331 }
332
333 return 0;
334 fail:
335 dev_kfree_skb_any(skb);
336
337 return ret;
338 }
339
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const u8 * fw,u32 len)340 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
341 {
342 u8 val;
343 int ret;
344
345 ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
346 if (ret) {
347 rtw89_err(rtwdev, "[ERR]FW header download\n");
348 return ret;
349 }
350
351 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
352 1, FWDL_WAIT_CNT, false,
353 rtwdev, R_AX_WCPU_FW_CTRL);
354 if (ret) {
355 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
356 return ret;
357 }
358
359 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
360 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
361
362 return 0;
363 }
364
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info)365 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
366 struct rtw89_fw_hdr_section_info *info)
367 {
368 struct sk_buff *skb;
369 const u8 *section = info->addr;
370 u32 residue_len = info->len;
371 u32 pkt_len;
372 int ret;
373
374 while (residue_len) {
375 if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
376 pkt_len = FWDL_SECTION_PER_PKT_LEN;
377 else
378 pkt_len = residue_len;
379
380 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
381 if (!skb) {
382 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
383 return -ENOMEM;
384 }
385 skb_put_data(skb, section, pkt_len);
386
387 ret = rtw89_h2c_tx(rtwdev, skb, true);
388 if (ret) {
389 rtw89_err(rtwdev, "failed to send h2c\n");
390 ret = -1;
391 goto fail;
392 }
393
394 section += pkt_len;
395 residue_len -= pkt_len;
396 }
397
398 return 0;
399 fail:
400 dev_kfree_skb_any(skb);
401
402 return ret;
403 }
404
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const u8 * fw,struct rtw89_fw_bin_info * info)405 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
406 struct rtw89_fw_bin_info *info)
407 {
408 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
409 u8 section_num = info->section_num;
410 int ret;
411
412 while (section_num--) {
413 ret = __rtw89_fw_download_main(rtwdev, section_info);
414 if (ret)
415 return ret;
416 section_info++;
417 }
418
419 mdelay(5);
420
421 ret = rtw89_fw_check_rdy(rtwdev);
422 if (ret) {
423 rtw89_warn(rtwdev, "download firmware fail\n");
424 return ret;
425 }
426
427 return 0;
428 }
429
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)430 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
431 {
432 u32 val32;
433 u16 index;
434
435 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
436 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
437 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
438 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
439
440 for (index = 0; index < 15; index++) {
441 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
442 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
443 fsleep(10);
444 }
445 }
446
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)447 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
448 {
449 u32 val32;
450 u16 val16;
451
452 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
453 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
454
455 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
456 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
457
458 rtw89_fw_prog_cnt_dump(rtwdev);
459 }
460
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type)461 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
462 {
463 struct rtw89_fw_info *fw_info = &rtwdev->fw;
464 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
465 struct rtw89_fw_bin_info info;
466 const u8 *fw = fw_suit->data;
467 u32 len = fw_suit->size;
468 u8 val;
469 int ret;
470
471 if (!fw || !len) {
472 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
473 return -ENOENT;
474 }
475
476 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info);
477 if (ret) {
478 rtw89_err(rtwdev, "parse fw header fail\n");
479 goto fwdl_err;
480 }
481
482 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
483 1, FWDL_WAIT_CNT, false,
484 rtwdev, R_AX_WCPU_FW_CTRL);
485 if (ret) {
486 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
487 goto fwdl_err;
488 }
489
490 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len);
491 if (ret) {
492 ret = -EBUSY;
493 goto fwdl_err;
494 }
495
496 ret = rtw89_fw_download_main(rtwdev, fw, &info);
497 if (ret) {
498 ret = -EBUSY;
499 goto fwdl_err;
500 }
501
502 fw_info->h2c_seq = 0;
503 fw_info->rec_seq = 0;
504 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
505 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
506
507 return ret;
508
509 fwdl_err:
510 rtw89_fw_dl_fail_dump(rtwdev);
511 return ret;
512 }
513
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)514 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
515 {
516 struct rtw89_fw_info *fw = &rtwdev->fw;
517
518 wait_for_completion(&fw->completion);
519 if (!fw->firmware)
520 return -EINVAL;
521
522 return 0;
523 }
524
rtw89_load_firmware_cb(const struct firmware * firmware,void * context)525 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context)
526 {
527 struct rtw89_fw_info *fw = context;
528 struct rtw89_dev *rtwdev = fw->rtwdev;
529
530 if (!firmware || !firmware->data) {
531 rtw89_err(rtwdev, "failed to request firmware\n");
532 complete_all(&fw->completion);
533 return;
534 }
535
536 fw->firmware = firmware;
537 complete_all(&fw->completion);
538 }
539
rtw89_load_firmware(struct rtw89_dev * rtwdev)540 int rtw89_load_firmware(struct rtw89_dev *rtwdev)
541 {
542 struct rtw89_fw_info *fw = &rtwdev->fw;
543 const char *fw_name = rtwdev->chip->fw_name;
544 int ret;
545
546 fw->rtwdev = rtwdev;
547 init_completion(&fw->completion);
548
549 ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
550 GFP_KERNEL, fw, rtw89_load_firmware_cb);
551 if (ret) {
552 rtw89_err(rtwdev, "failed to async firmware request\n");
553 return ret;
554 }
555
556 return 0;
557 }
558
rtw89_unload_firmware(struct rtw89_dev * rtwdev)559 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
560 {
561 struct rtw89_fw_info *fw = &rtwdev->fw;
562
563 rtw89_wait_firmware_completion(rtwdev);
564
565 if (fw->firmware)
566 release_firmware(fw->firmware);
567 }
568
569 #define H2C_CAM_LEN 60
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,const u8 * scan_mac_addr)570 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
571 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
572 {
573 struct sk_buff *skb;
574
575 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
576 if (!skb) {
577 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
578 return -ENOMEM;
579 }
580 skb_put(skb, H2C_CAM_LEN);
581 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
582 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data);
583
584 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
585 H2C_CAT_MAC,
586 H2C_CL_MAC_ADDR_CAM_UPDATE,
587 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
588 H2C_CAM_LEN);
589
590 if (rtw89_h2c_tx(rtwdev, skb, false)) {
591 rtw89_err(rtwdev, "failed to send h2c\n");
592 goto fail;
593 }
594
595 return 0;
596 fail:
597 dev_kfree_skb_any(skb);
598
599 return -EBUSY;
600 }
601
602 #define H2C_DCTL_SEC_CAM_LEN 68
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta)603 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
604 struct rtw89_vif *rtwvif,
605 struct rtw89_sta *rtwsta)
606 {
607 struct sk_buff *skb;
608
609 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
610 if (!skb) {
611 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
612 return -ENOMEM;
613 }
614 skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
615
616 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
617
618 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
619 H2C_CAT_MAC,
620 H2C_CL_MAC_FR_EXCHG,
621 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
622 H2C_DCTL_SEC_CAM_LEN);
623
624 if (rtw89_h2c_tx(rtwdev, skb, false)) {
625 rtw89_err(rtwdev, "failed to send h2c\n");
626 goto fail;
627 }
628
629 return 0;
630 fail:
631 dev_kfree_skb_any(skb);
632
633 return -EBUSY;
634 }
635 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
636
637 #define H2C_BA_CAM_LEN 8
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta,bool valid,struct ieee80211_ampdu_params * params)638 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
639 bool valid, struct ieee80211_ampdu_params *params)
640 {
641 u8 macid = rtwsta->mac_id;
642 struct sk_buff *skb;
643 u8 entry_idx;
644 int ret;
645
646 ret = valid ?
647 rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) :
648 rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx);
649 if (ret) {
650 /* it still works even if we don't have static BA CAM, because
651 * hardware can create dynamic BA CAM automatically.
652 */
653 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
654 "failed to %s entry tid=%d for h2c ba cam\n",
655 valid ? "alloc" : "free", params->tid);
656 return 0;
657 }
658
659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
660 if (!skb) {
661 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
662 return -ENOMEM;
663 }
664 skb_put(skb, H2C_BA_CAM_LEN);
665 SET_BA_CAM_MACID(skb->data, macid);
666 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
667 if (!valid)
668 goto end;
669 SET_BA_CAM_VALID(skb->data, valid);
670 SET_BA_CAM_TID(skb->data, params->tid);
671 if (params->buf_size > 64)
672 SET_BA_CAM_BMAP_SIZE(skb->data, 4);
673 else
674 SET_BA_CAM_BMAP_SIZE(skb->data, 0);
675 /* If init req is set, hw will set the ssn */
676 SET_BA_CAM_INIT_REQ(skb->data, 1);
677 SET_BA_CAM_SSN(skb->data, params->ssn);
678
679 end:
680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
681 H2C_CAT_MAC,
682 H2C_CL_BA_CAM,
683 H2C_FUNC_MAC_BA_CAM, 0, 1,
684 H2C_BA_CAM_LEN);
685
686 if (rtw89_h2c_tx(rtwdev, skb, false)) {
687 rtw89_err(rtwdev, "failed to send h2c\n");
688 goto fail;
689 }
690
691 return 0;
692 fail:
693 dev_kfree_skb_any(skb);
694
695 return -EBUSY;
696 }
697
698 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)699 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
700 {
701 struct sk_buff *skb;
702 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
703 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
704
705 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
706 if (!skb) {
707 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
708 return -ENOMEM;
709 }
710
711 skb_put(skb, H2C_LOG_CFG_LEN);
712 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER);
713 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
714 SET_LOG_CFG_COMP(skb->data, comp);
715 SET_LOG_CFG_COMP_EXT(skb->data, 0);
716
717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
718 H2C_CAT_MAC,
719 H2C_CL_FW_INFO,
720 H2C_FUNC_LOG_CFG, 0, 0,
721 H2C_LOG_CFG_LEN);
722
723 if (rtw89_h2c_tx(rtwdev, skb, false)) {
724 rtw89_err(rtwdev, "failed to send h2c\n");
725 goto fail;
726 }
727
728 return 0;
729 fail:
730 dev_kfree_skb_any(skb);
731
732 return -EBUSY;
733 }
734
735 #define H2C_GENERAL_PKT_LEN 6
736 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,u8 macid)737 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
738 {
739 struct sk_buff *skb;
740
741 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
742 if (!skb) {
743 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
744 return -ENOMEM;
745 }
746 skb_put(skb, H2C_GENERAL_PKT_LEN);
747 SET_GENERAL_PKT_MACID(skb->data, macid);
748 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
749 SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
750 SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
751 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
752 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
753
754 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
755 H2C_CAT_MAC,
756 H2C_CL_FW_INFO,
757 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
758 H2C_GENERAL_PKT_LEN);
759
760 if (rtw89_h2c_tx(rtwdev, skb, false)) {
761 rtw89_err(rtwdev, "failed to send h2c\n");
762 goto fail;
763 }
764
765 return 0;
766 fail:
767 dev_kfree_skb_any(skb);
768
769 return -EBUSY;
770 }
771
772 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)773 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
774 struct rtw89_lps_parm *lps_param)
775 {
776 struct sk_buff *skb;
777
778 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
779 if (!skb) {
780 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
781 return -ENOMEM;
782 }
783 skb_put(skb, H2C_LPS_PARM_LEN);
784
785 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
786 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
787 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
788 SET_LPS_PARM_RLBM(skb->data, 1);
789 SET_LPS_PARM_SMARTPS(skb->data, 1);
790 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
791 SET_LPS_PARM_VOUAPSD(skb->data, 0);
792 SET_LPS_PARM_VIUAPSD(skb->data, 0);
793 SET_LPS_PARM_BEUAPSD(skb->data, 0);
794 SET_LPS_PARM_BKUAPSD(skb->data, 0);
795
796 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
797 H2C_CAT_MAC,
798 H2C_CL_MAC_PS,
799 H2C_FUNC_MAC_LPS_PARM, 0, 1,
800 H2C_LPS_PARM_LEN);
801
802 if (rtw89_h2c_tx(rtwdev, skb, false)) {
803 rtw89_err(rtwdev, "failed to send h2c\n");
804 goto fail;
805 }
806
807 return 0;
808 fail:
809 dev_kfree_skb_any(skb);
810
811 return -EBUSY;
812 }
813
814 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)815 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
816 struct rtw89_vif *rtwvif)
817 {
818 const struct rtw89_chip_info *chip = rtwdev->chip;
819 struct rtw89_hal *hal = &rtwdev->hal;
820 struct sk_buff *skb;
821 u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
822 u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
823 u8 macid = rtwvif->mac_id;
824
825 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
826 if (!skb) {
827 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
828 return -ENOMEM;
829 }
830 skb_put(skb, H2C_CMC_TBL_LEN);
831 SET_CTRL_INFO_MACID(skb->data, macid);
832 SET_CTRL_INFO_OPERATION(skb->data, 1);
833 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
834 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
835 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
836 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
837 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
838 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
839 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
840 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
841 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
842 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
843 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
844 }
845 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
846 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
847 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
848 SET_CMC_TBL_DATA_DCM(skb->data, 0);
849
850 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
851 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
852 chip->h2c_cctl_func_id, 0, 1,
853 H2C_CMC_TBL_LEN);
854
855 if (rtw89_h2c_tx(rtwdev, skb, false)) {
856 rtw89_err(rtwdev, "failed to send h2c\n");
857 goto fail;
858 }
859
860 return 0;
861 fail:
862 dev_kfree_skb_any(skb);
863
864 return -EBUSY;
865 }
866
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,u8 * pads)867 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
868 struct ieee80211_sta *sta, u8 *pads)
869 {
870 bool ppe_th;
871 u8 ppe16, ppe8;
872 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
873 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
874 u8 ru_bitmap;
875 u8 n, idx, sh;
876 u16 ppe;
877 int i;
878
879 if (!sta->deflink.he_cap.has_he)
880 return;
881
882 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
883 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
884 if (!ppe_th) {
885 u8 pad;
886
887 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
888 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
889
890 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
891 pads[i] = pad;
892
893 return;
894 }
895
896 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
897 n = hweight8(ru_bitmap);
898 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
899
900 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
901 if (!(ru_bitmap & BIT(i))) {
902 pads[i] = 1;
903 continue;
904 }
905
906 idx = n >> 3;
907 sh = n & 7;
908 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
909
910 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
911 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
912 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
913 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
914
915 if (ppe16 != 7 && ppe8 == 7)
916 pads[i] = 2;
917 else if (ppe8 != 7)
918 pads[i] = 1;
919 else
920 pads[i] = 0;
921 }
922 }
923
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)924 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
925 struct ieee80211_vif *vif,
926 struct ieee80211_sta *sta)
927 {
928 const struct rtw89_chip_info *chip = rtwdev->chip;
929 struct rtw89_hal *hal = &rtwdev->hal;
930 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
931 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
932 struct sk_buff *skb;
933 u8 pads[RTW89_PPE_BW_NUM];
934 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
935
936 memset(pads, 0, sizeof(pads));
937 if (sta)
938 __get_sta_he_pkt_padding(rtwdev, sta, pads);
939
940 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
941 if (!skb) {
942 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
943 return -ENOMEM;
944 }
945 skb_put(skb, H2C_CMC_TBL_LEN);
946 SET_CTRL_INFO_MACID(skb->data, mac_id);
947 SET_CTRL_INFO_OPERATION(skb->data, 1);
948 SET_CMC_TBL_DISRTSFB(skb->data, 1);
949 SET_CMC_TBL_DISDATAFB(skb->data, 1);
950 if (hal->current_band_type == RTW89_BAND_2G)
951 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1);
952 else
953 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6);
954 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
955 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
956 if (vif->type == NL80211_IFTYPE_STATION)
957 SET_CMC_TBL_ULDL(skb->data, 1);
958 else
959 SET_CMC_TBL_ULDL(skb->data, 0);
960 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
961 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
962 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
963 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
964 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
965 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
966 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
967 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
968 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
969 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
970 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
971 }
972 if (sta)
973 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
974 sta->deflink.he_cap.has_he);
975 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
976 SET_CMC_TBL_DATA_DCM(skb->data, 0);
977
978 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
979 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
980 chip->h2c_cctl_func_id, 0, 1,
981 H2C_CMC_TBL_LEN);
982
983 if (rtw89_h2c_tx(rtwdev, skb, false)) {
984 rtw89_err(rtwdev, "failed to send h2c\n");
985 goto fail;
986 }
987
988 return 0;
989 fail:
990 dev_kfree_skb_any(skb);
991
992 return -EBUSY;
993 }
994
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta)995 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
996 struct rtw89_sta *rtwsta)
997 {
998 const struct rtw89_chip_info *chip = rtwdev->chip;
999 struct sk_buff *skb;
1000
1001 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1002 if (!skb) {
1003 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1004 return -ENOMEM;
1005 }
1006 skb_put(skb, H2C_CMC_TBL_LEN);
1007 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1008 SET_CTRL_INFO_OPERATION(skb->data, 1);
1009 if (rtwsta->cctl_tx_time) {
1010 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
1011 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
1012 }
1013 if (rtwsta->cctl_tx_retry_limit) {
1014 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
1015 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
1016 }
1017
1018 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1019 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1020 chip->h2c_cctl_func_id, 0, 1,
1021 H2C_CMC_TBL_LEN);
1022
1023 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1024 rtw89_err(rtwdev, "failed to send h2c\n");
1025 goto fail;
1026 }
1027
1028 return 0;
1029 fail:
1030 dev_kfree_skb_any(skb);
1031
1032 return -EBUSY;
1033 }
1034
1035 #define H2C_BCN_BASE_LEN 12
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)1036 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
1037 struct rtw89_vif *rtwvif)
1038 {
1039 struct rtw89_hal *hal = &rtwdev->hal;
1040 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1041 struct sk_buff *skb;
1042 struct sk_buff *skb_beacon;
1043 u16 tim_offset;
1044 int bcn_total_len;
1045
1046 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, NULL);
1047 if (!skb_beacon) {
1048 rtw89_err(rtwdev, "failed to get beacon skb\n");
1049 return -ENOMEM;
1050 }
1051
1052 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
1053 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
1054 if (!skb) {
1055 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1056 dev_kfree_skb_any(skb_beacon);
1057 return -ENOMEM;
1058 }
1059 skb_put(skb, H2C_BCN_BASE_LEN);
1060
1061 SET_BCN_UPD_PORT(skb->data, rtwvif->port);
1062 SET_BCN_UPD_MBSSID(skb->data, 0);
1063 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
1064 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
1065 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
1066 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
1067 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
1068 SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ?
1069 RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6);
1070
1071 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
1072 dev_kfree_skb_any(skb_beacon);
1073
1074 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1075 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1076 H2C_FUNC_MAC_BCN_UPD, 0, 1,
1077 bcn_total_len);
1078
1079 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1080 rtw89_err(rtwdev, "failed to send h2c\n");
1081 dev_kfree_skb_any(skb);
1082 return -EBUSY;
1083 }
1084
1085 return 0;
1086 }
1087
1088 #define H2C_ROLE_MAINTAIN_LEN 4
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,enum rtw89_upd_mode upd_mode)1089 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
1090 struct rtw89_vif *rtwvif,
1091 struct rtw89_sta *rtwsta,
1092 enum rtw89_upd_mode upd_mode)
1093 {
1094 struct sk_buff *skb;
1095 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1096 u8 self_role;
1097
1098 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
1099 if (rtwsta)
1100 self_role = RTW89_SELF_ROLE_AP_CLIENT;
1101 else
1102 self_role = rtwvif->self_role;
1103 } else {
1104 self_role = rtwvif->self_role;
1105 }
1106
1107 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
1108 if (!skb) {
1109 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1110 return -ENOMEM;
1111 }
1112 skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
1113 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
1114 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
1115 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
1116 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
1117
1118 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1119 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
1120 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
1121 H2C_ROLE_MAINTAIN_LEN);
1122
1123 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1124 rtw89_err(rtwdev, "failed to send h2c\n");
1125 goto fail;
1126 }
1127
1128 return 0;
1129 fail:
1130 dev_kfree_skb_any(skb);
1131
1132 return -EBUSY;
1133 }
1134
1135 #define H2C_JOIN_INFO_LEN 4
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,bool dis_conn)1136 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1137 struct rtw89_sta *rtwsta, bool dis_conn)
1138 {
1139 struct sk_buff *skb;
1140 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1141 u8 self_role = rtwvif->self_role;
1142 u8 net_type = rtwvif->net_type;
1143
1144 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
1145 self_role = RTW89_SELF_ROLE_AP_CLIENT;
1146 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
1147 }
1148
1149 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
1150 if (!skb) {
1151 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1152 return -ENOMEM;
1153 }
1154 skb_put(skb, H2C_JOIN_INFO_LEN);
1155 SET_JOININFO_MACID(skb->data, mac_id);
1156 SET_JOININFO_OP(skb->data, dis_conn);
1157 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
1158 SET_JOININFO_WMM(skb->data, rtwvif->wmm);
1159 SET_JOININFO_TGR(skb->data, rtwvif->trigger);
1160 SET_JOININFO_ISHESTA(skb->data, 0);
1161 SET_JOININFO_DLBW(skb->data, 0);
1162 SET_JOININFO_TF_MAC_PAD(skb->data, 0);
1163 SET_JOININFO_DL_T_PE(skb->data, 0);
1164 SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
1165 SET_JOININFO_NET_TYPE(skb->data, net_type);
1166 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
1167 SET_JOININFO_SELF_ROLE(skb->data, self_role);
1168
1169 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1170 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
1171 H2C_FUNC_MAC_JOININFO, 0, 1,
1172 H2C_JOIN_INFO_LEN);
1173
1174 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1175 rtw89_err(rtwdev, "failed to send h2c\n");
1176 goto fail;
1177 }
1178
1179 return 0;
1180 fail:
1181 dev_kfree_skb_any(skb);
1182
1183 return -EBUSY;
1184 }
1185
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)1186 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
1187 bool pause)
1188 {
1189 struct rtw89_fw_macid_pause_grp h2c = {{0}};
1190 u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
1191 struct sk_buff *skb;
1192
1193 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
1194 if (!skb) {
1195 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1196 return -ENOMEM;
1197 }
1198 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
1199 if (pause)
1200 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
1201 skb_put_data(skb, &h2c, len);
1202
1203 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1204 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1205 H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
1206 len);
1207
1208 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1209 rtw89_err(rtwdev, "failed to send h2c\n");
1210 goto fail;
1211 }
1212
1213 return 0;
1214 fail:
1215 dev_kfree_skb_any(skb);
1216
1217 return -EBUSY;
1218 }
1219
1220 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,u8 ac,u32 val)1221 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1222 u8 ac, u32 val)
1223 {
1224 struct sk_buff *skb;
1225
1226 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
1227 if (!skb) {
1228 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
1229 return -ENOMEM;
1230 }
1231 skb_put(skb, H2C_EDCA_LEN);
1232 RTW89_SET_EDCA_SEL(skb->data, 0);
1233 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
1234 RTW89_SET_EDCA_WMM(skb->data, 0);
1235 RTW89_SET_EDCA_AC(skb->data, ac);
1236 RTW89_SET_EDCA_PARAM(skb->data, val);
1237
1238 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1239 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1240 H2C_FUNC_USR_EDCA, 0, 1,
1241 H2C_EDCA_LEN);
1242
1243 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1244 rtw89_err(rtwdev, "failed to send h2c\n");
1245 goto fail;
1246 }
1247
1248 return 0;
1249 fail:
1250 dev_kfree_skb_any(skb);
1251
1252 return -EBUSY;
1253 }
1254
1255 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)1256 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
1257 {
1258 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
1259 struct sk_buff *skb;
1260
1261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
1262 if (!skb) {
1263 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
1264 return -ENOMEM;
1265 }
1266 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
1267
1268 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1269 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1270 H2C_FUNC_OFLD_CFG, 0, 1,
1271 H2C_OFLD_CFG_LEN);
1272
1273 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1274 rtw89_err(rtwdev, "failed to send h2c\n");
1275 goto fail;
1276 }
1277
1278 return 0;
1279 fail:
1280 dev_kfree_skb_any(skb);
1281
1282 return -EBUSY;
1283 }
1284
1285 #define H2C_RA_LEN 16
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)1286 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
1287 {
1288 struct sk_buff *skb;
1289 u8 *cmd;
1290
1291 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
1292 if (!skb) {
1293 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1294 return -ENOMEM;
1295 }
1296 skb_put(skb, H2C_RA_LEN);
1297 cmd = skb->data;
1298 rtw89_debug(rtwdev, RTW89_DBG_RA,
1299 "ra cmd msk: %llx ", ra->ra_mask);
1300
1301 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl);
1302 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap);
1303 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid);
1304 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap);
1305 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap);
1306 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv);
1307 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all);
1308 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi);
1309 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap);
1310 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap);
1311 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num);
1312 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf);
1313 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask);
1314 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask);
1315 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask));
1316 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask));
1317 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
1318 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
1319 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
1320
1321 if (csi) {
1322 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
1323 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num);
1324 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel);
1325 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en);
1326 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en);
1327 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx);
1328 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode);
1329 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf);
1330 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw);
1331 }
1332
1333 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1334 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
1335 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
1336 H2C_RA_LEN);
1337
1338 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1339 rtw89_err(rtwdev, "failed to send h2c\n");
1340 goto fail;
1341 }
1342
1343 return 0;
1344 fail:
1345 dev_kfree_skb_any(skb);
1346
1347 return -EBUSY;
1348 }
1349
1350 #define H2C_LEN_CXDRVHDR 2
1351 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev)1352 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
1353 {
1354 struct rtw89_btc *btc = &rtwdev->btc;
1355 struct rtw89_btc_dm *dm = &btc->dm;
1356 struct rtw89_btc_init_info *init_info = &dm->init_info;
1357 struct rtw89_btc_module *module = &init_info->module;
1358 struct rtw89_btc_ant_info *ant = &module->ant;
1359 struct sk_buff *skb;
1360 u8 *cmd;
1361
1362 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
1363 if (!skb) {
1364 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
1365 return -ENOMEM;
1366 }
1367 skb_put(skb, H2C_LEN_CXDRVINFO_INIT);
1368 cmd = skb->data;
1369
1370 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT);
1371 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR);
1372
1373 RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type);
1374 RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num);
1375 RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation);
1376 RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos);
1377 RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity);
1378
1379 RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type);
1380 RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv);
1381 RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo);
1382 RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos);
1383 RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type);
1384
1385 RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch);
1386 RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only);
1387 RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok);
1388 RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en);
1389 RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other);
1390 RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only);
1391
1392 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1393 H2C_CAT_OUTSRC, BTFC_SET,
1394 SET_DRV_INFO, 0, 0,
1395 H2C_LEN_CXDRVINFO_INIT);
1396
1397 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1398 rtw89_err(rtwdev, "failed to send h2c\n");
1399 goto fail;
1400 }
1401
1402 return 0;
1403 fail:
1404 dev_kfree_skb_any(skb);
1405
1406 return -EBUSY;
1407 }
1408
1409 #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev)1410 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
1411 {
1412 struct rtw89_btc *btc = &rtwdev->btc;
1413 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1414 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
1415 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
1416 struct rtw89_btc_wl_active_role *active = role_info->active_role;
1417 struct sk_buff *skb;
1418 u8 *cmd;
1419 int i;
1420
1421 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
1422 if (!skb) {
1423 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
1424 return -ENOMEM;
1425 }
1426 skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
1427 cmd = skb->data;
1428
1429 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
1430 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
1431
1432 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
1433 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
1434
1435 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
1436 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
1437 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
1438 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
1439 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
1440 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
1441 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
1442 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
1443 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
1444 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
1445 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
1446 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
1447
1448 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
1449 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
1450 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
1451 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
1452 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i);
1453 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i);
1454 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i);
1455 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i);
1456 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i);
1457 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i);
1458 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i);
1459 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i);
1460 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i);
1461 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i);
1462 }
1463
1464 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1465 H2C_CAT_OUTSRC, BTFC_SET,
1466 SET_DRV_INFO, 0, 0,
1467 H2C_LEN_CXDRVINFO_ROLE);
1468
1469 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1470 rtw89_err(rtwdev, "failed to send h2c\n");
1471 goto fail;
1472 }
1473
1474 return 0;
1475 fail:
1476 dev_kfree_skb_any(skb);
1477
1478 return -EBUSY;
1479 }
1480
1481 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev)1482 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
1483 {
1484 struct rtw89_btc *btc = &rtwdev->btc;
1485 struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
1486 struct sk_buff *skb;
1487 u8 *cmd;
1488
1489 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
1490 if (!skb) {
1491 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
1492 return -ENOMEM;
1493 }
1494 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
1495 cmd = skb->data;
1496
1497 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
1498 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
1499
1500 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
1501 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
1502 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
1503 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
1504
1505 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1506 H2C_CAT_OUTSRC, BTFC_SET,
1507 SET_DRV_INFO, 0, 0,
1508 H2C_LEN_CXDRVINFO_CTRL);
1509
1510 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1511 rtw89_err(rtwdev, "failed to send h2c\n");
1512 goto fail;
1513 }
1514
1515 return 0;
1516 fail:
1517 dev_kfree_skb_any(skb);
1518
1519 return -EBUSY;
1520 }
1521
1522 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev)1523 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
1524 {
1525 struct rtw89_btc *btc = &rtwdev->btc;
1526 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1527 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
1528 struct sk_buff *skb;
1529 u8 *cmd;
1530
1531 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
1532 if (!skb) {
1533 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
1534 return -ENOMEM;
1535 }
1536 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
1537 cmd = skb->data;
1538
1539 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
1540 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
1541
1542 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
1543 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
1544 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
1545 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
1546 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
1547
1548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1549 H2C_CAT_OUTSRC, BTFC_SET,
1550 SET_DRV_INFO, 0, 0,
1551 H2C_LEN_CXDRVINFO_RFK);
1552
1553 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1554 rtw89_err(rtwdev, "failed to send h2c\n");
1555 goto fail;
1556 }
1557
1558 return 0;
1559 fail:
1560 dev_kfree_skb_any(skb);
1561
1562 return -EBUSY;
1563 }
1564
1565 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)1566 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
1567 {
1568 struct sk_buff *skb;
1569 u8 *cmd;
1570
1571 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
1572 if (!skb) {
1573 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
1574 return -ENOMEM;
1575 }
1576 skb_put(skb, H2C_LEN_PKT_OFLD);
1577 cmd = skb->data;
1578
1579 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
1580 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
1581
1582 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1583 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1584 H2C_FUNC_PACKET_OFLD, 1, 1,
1585 H2C_LEN_PKT_OFLD);
1586
1587 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1588 rtw89_err(rtwdev, "failed to send h2c\n");
1589 goto fail;
1590 }
1591
1592 return 0;
1593 fail:
1594 dev_kfree_skb_any(skb);
1595
1596 return -EBUSY;
1597 }
1598
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)1599 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
1600 struct sk_buff *skb_ofld)
1601 {
1602 struct sk_buff *skb;
1603 u8 *cmd;
1604 u8 alloc_id;
1605
1606 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
1607 RTW89_MAX_PKT_OFLD_NUM);
1608 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
1609 return -ENOSPC;
1610
1611 *id = alloc_id;
1612
1613 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
1614 if (!skb) {
1615 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
1616 return -ENOMEM;
1617 }
1618 skb_put(skb, H2C_LEN_PKT_OFLD);
1619 cmd = skb->data;
1620
1621 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
1622 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
1623 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
1624 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
1625
1626 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1627 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1628 H2C_FUNC_PACKET_OFLD, 1, 1,
1629 H2C_LEN_PKT_OFLD + skb_ofld->len);
1630
1631 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1632 rtw89_err(rtwdev, "failed to send h2c\n");
1633 goto fail;
1634 }
1635
1636 return 0;
1637 fail:
1638 dev_kfree_skb_any(skb);
1639
1640 return -EBUSY;
1641 }
1642
1643 #define H2C_LEN_SCAN_LIST_OFFLOAD 4
rtw89_fw_h2c_scan_list_offload(struct rtw89_dev * rtwdev,int len,struct list_head * chan_list)1644 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
1645 struct list_head *chan_list)
1646 {
1647 struct rtw89_mac_chinfo *ch_info;
1648 struct sk_buff *skb;
1649 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
1650 u8 *cmd;
1651
1652 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
1653 if (!skb) {
1654 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
1655 return -ENOMEM;
1656 }
1657 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
1658 cmd = skb->data;
1659
1660 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
1661 /* in unit of 4 bytes */
1662 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
1663
1664 list_for_each_entry(ch_info, chan_list, list) {
1665 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
1666
1667 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
1668 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
1669 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
1670 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
1671 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
1672 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
1673 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
1674 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
1675 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
1676 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
1677 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
1678 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
1679 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
1680 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
1681 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
1682 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
1683 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
1684 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
1685 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
1686 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
1687 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
1688 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
1689 }
1690
1691 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1692 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1693 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
1694
1695 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1696 rtw89_err(rtwdev, "failed to send h2c\n");
1697 goto fail;
1698 }
1699
1700 return 0;
1701 fail:
1702 dev_kfree_skb_any(skb);
1703
1704 return -EBUSY;
1705 }
1706
1707 #define H2C_LEN_SCAN_OFFLOAD 20
rtw89_fw_h2c_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif * rtwvif)1708 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
1709 struct rtw89_scan_option *option,
1710 struct rtw89_vif *rtwvif)
1711 {
1712 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
1713 struct sk_buff *skb;
1714 u8 *cmd;
1715
1716 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
1717 if (!skb) {
1718 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
1719 return -ENOMEM;
1720 }
1721 skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
1722 cmd = skb->data;
1723
1724 RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
1725 RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
1726 RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
1727 RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
1728 RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
1729 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
1730 RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
1731 RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
1732 if (option->target_ch_mode) {
1733 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
1734 RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
1735 scan_info->op_pri_ch);
1736 RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
1737 scan_info->op_chan);
1738 }
1739
1740 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1741 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1742 H2C_FUNC_SCANOFLD, 1, 1,
1743 H2C_LEN_SCAN_OFFLOAD);
1744
1745 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1746 rtw89_err(rtwdev, "failed to send h2c\n");
1747 goto fail;
1748 }
1749
1750 return 0;
1751 fail:
1752 dev_kfree_skb_any(skb);
1753
1754 return -EBUSY;
1755 }
1756
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)1757 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
1758 struct rtw89_fw_h2c_rf_reg_info *info,
1759 u16 len, u8 page)
1760 {
1761 struct sk_buff *skb;
1762 u8 class = info->rf_path == RF_PATH_A ?
1763 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
1764
1765 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1766 if (!skb) {
1767 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
1768 return -ENOMEM;
1769 }
1770 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
1771
1772 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1773 H2C_CAT_OUTSRC, class, page, 0, 0,
1774 len);
1775
1776 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1777 rtw89_err(rtwdev, "failed to send h2c\n");
1778 goto fail;
1779 }
1780
1781 return 0;
1782 fail:
1783 dev_kfree_skb_any(skb);
1784
1785 return -EBUSY;
1786 }
1787
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)1788 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
1789 {
1790 struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
1791 struct rtw89_fw_h2c_rf_get_mccch *mccch;
1792 struct sk_buff *skb;
1793
1794 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
1795 if (!skb) {
1796 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
1797 return -ENOMEM;
1798 }
1799 skb_put(skb, sizeof(*mccch));
1800 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
1801
1802 mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]);
1803 mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
1804 mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
1805 mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
1806 mccch->current_channel = cpu_to_le32(rtwdev->hal.current_channel);
1807 mccch->current_band_type = cpu_to_le32(rtwdev->hal.current_band_type);
1808
1809 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1810 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
1811 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
1812 sizeof(*mccch));
1813
1814 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1815 rtw89_err(rtwdev, "failed to send h2c\n");
1816 goto fail;
1817 }
1818
1819 return 0;
1820 fail:
1821 dev_kfree_skb_any(skb);
1822
1823 return -EBUSY;
1824 }
1825 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
1826
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)1827 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
1828 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
1829 bool rack, bool dack)
1830 {
1831 struct sk_buff *skb;
1832
1833 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1834 if (!skb) {
1835 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
1836 return -ENOMEM;
1837 }
1838 skb_put_data(skb, buf, len);
1839
1840 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1841 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
1842 len);
1843
1844 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1845 rtw89_err(rtwdev, "failed to send h2c\n");
1846 goto fail;
1847 }
1848
1849 return 0;
1850 fail:
1851 dev_kfree_skb_any(skb);
1852
1853 return -EBUSY;
1854 }
1855
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)1856 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
1857 {
1858 struct sk_buff *skb;
1859
1860 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
1861 if (!skb) {
1862 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
1863 return -ENOMEM;
1864 }
1865 skb_put_data(skb, buf, len);
1866
1867 if (rtw89_h2c_tx(rtwdev, skb, false)) {
1868 rtw89_err(rtwdev, "failed to send h2c\n");
1869 goto fail;
1870 }
1871
1872 return 0;
1873 fail:
1874 dev_kfree_skb_any(skb);
1875
1876 return -EBUSY;
1877 }
1878
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)1879 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
1880 {
1881 struct rtw89_early_h2c *early_h2c;
1882
1883 lockdep_assert_held(&rtwdev->mutex);
1884
1885 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
1886 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
1887 }
1888 }
1889
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)1890 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
1891 {
1892 struct rtw89_early_h2c *early_h2c, *tmp;
1893
1894 mutex_lock(&rtwdev->mutex);
1895 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
1896 list_del(&early_h2c->list);
1897 kfree(early_h2c->h2c);
1898 kfree(early_h2c);
1899 }
1900 mutex_unlock(&rtwdev->mutex);
1901 }
1902
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)1903 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
1904 {
1905 skb_queue_tail(&rtwdev->c2h_queue, c2h);
1906 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
1907 }
1908
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)1909 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
1910 struct sk_buff *skb)
1911 {
1912 u8 category = RTW89_GET_C2H_CATEGORY(skb->data);
1913 u8 class = RTW89_GET_C2H_CLASS(skb->data);
1914 u8 func = RTW89_GET_C2H_FUNC(skb->data);
1915 u16 len = RTW89_GET_C2H_LEN(skb->data);
1916 bool dump = true;
1917
1918 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
1919 return;
1920
1921 switch (category) {
1922 case RTW89_C2H_CAT_TEST:
1923 break;
1924 case RTW89_C2H_CAT_MAC:
1925 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
1926 if (class == RTW89_MAC_C2H_CLASS_INFO &&
1927 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
1928 dump = false;
1929 break;
1930 case RTW89_C2H_CAT_OUTSRC:
1931 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
1932 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
1933 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
1934 else
1935 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
1936 break;
1937 }
1938
1939 if (dump)
1940 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
1941 }
1942
rtw89_fw_c2h_work(struct work_struct * work)1943 void rtw89_fw_c2h_work(struct work_struct *work)
1944 {
1945 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
1946 c2h_work);
1947 struct sk_buff *skb, *tmp;
1948
1949 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
1950 skb_unlink(skb, &rtwdev->c2h_queue);
1951 mutex_lock(&rtwdev->mutex);
1952 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
1953 mutex_unlock(&rtwdev->mutex);
1954 dev_kfree_skb_any(skb);
1955 }
1956 }
1957
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)1958 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
1959 struct rtw89_mac_h2c_info *info)
1960 {
1961 const struct rtw89_chip_info *chip = rtwdev->chip;
1962 const u32 *h2c_reg = chip->h2c_regs;
1963 u8 i, val, len;
1964 int ret;
1965
1966 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
1967 rtwdev, chip->h2c_ctrl_reg);
1968 if (ret) {
1969 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
1970 return ret;
1971 }
1972
1973 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
1974 sizeof(info->h2creg[0]));
1975
1976 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id);
1977 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len);
1978 for (i = 0; i < RTW89_H2CREG_MAX; i++)
1979 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
1980
1981 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
1982
1983 return 0;
1984 }
1985
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)1986 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
1987 struct rtw89_mac_c2h_info *info)
1988 {
1989 const struct rtw89_chip_info *chip = rtwdev->chip;
1990 const u32 *c2h_reg = chip->c2h_regs;
1991 u32 ret;
1992 u8 i, val;
1993
1994 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
1995
1996 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
1997 RTW89_C2H_TIMEOUT, false, rtwdev,
1998 chip->c2h_ctrl_reg);
1999 if (ret) {
2000 rtw89_warn(rtwdev, "c2h reg timeout\n");
2001 return ret;
2002 }
2003
2004 for (i = 0; i < RTW89_C2HREG_MAX; i++)
2005 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
2006
2007 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
2008
2009 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
2010 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
2011 RTW89_C2HREG_HDR_LEN;
2012
2013 return 0;
2014 }
2015
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)2016 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
2017 struct rtw89_mac_h2c_info *h2c_info,
2018 struct rtw89_mac_c2h_info *c2h_info)
2019 {
2020 u32 ret;
2021
2022 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
2023 lockdep_assert_held(&rtwdev->mutex);
2024
2025 if (!h2c_info && !c2h_info)
2026 return -EINVAL;
2027
2028 if (!h2c_info)
2029 goto recv_c2h;
2030
2031 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
2032 if (ret)
2033 return ret;
2034
2035 recv_c2h:
2036 if (!c2h_info)
2037 return 0;
2038
2039 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
2040 if (ret)
2041 return ret;
2042
2043 return 0;
2044 }
2045
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)2046 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
2047 {
2048 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
2049 rtw89_err(rtwdev, "[ERR]pwr is off\n");
2050 return;
2051 }
2052
2053 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
2054 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
2055 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
2056 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
2057 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
2058 rtw89_read32(rtwdev, R_AX_HALT_C2H));
2059 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
2060 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
2061
2062 rtw89_fw_prog_cnt_dump(rtwdev);
2063 }
2064
rtw89_release_pkt_list(struct rtw89_dev * rtwdev)2065 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
2066 {
2067 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
2068 struct rtw89_pktofld_info *info, *tmp;
2069 u8 idx;
2070
2071 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
2072 if (!(rtwdev->chip->support_bands & BIT(idx)))
2073 continue;
2074
2075 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
2076 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2077 rtw89_core_release_bit_map(rtwdev->pkt_offload,
2078 info->id);
2079 list_del(&info->list);
2080 kfree(info);
2081 }
2082 }
2083 }
2084
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct sk_buff * skb)2085 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
2086 struct rtw89_vif *rtwvif,
2087 struct sk_buff *skb)
2088 {
2089 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2090 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
2091 struct rtw89_pktofld_info *info;
2092 struct sk_buff *new;
2093 int ret = 0;
2094 u8 band;
2095
2096 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2097 if (!(rtwdev->chip->support_bands & BIT(band)))
2098 continue;
2099
2100 new = skb_copy(skb, GFP_KERNEL);
2101 if (!new) {
2102 ret = -ENOMEM;
2103 goto out;
2104 }
2105 skb_put_data(new, ies->ies[band], ies->len[band]);
2106 skb_put_data(new, ies->common_ies, ies->common_ie_len);
2107
2108 info = kzalloc(sizeof(*info), GFP_KERNEL);
2109 if (!info) {
2110 ret = -ENOMEM;
2111 kfree_skb(new);
2112 goto out;
2113 }
2114
2115 list_add_tail(&info->list, &scan_info->pkt_list[band]);
2116 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
2117 if (ret)
2118 goto out;
2119
2120 kfree_skb(new);
2121 }
2122 out:
2123 return ret;
2124 }
2125
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2126 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
2127 struct rtw89_vif *rtwvif)
2128 {
2129 struct cfg80211_scan_request *req = rtwvif->scan_req;
2130 struct sk_buff *skb;
2131 u8 num = req->n_ssids, i;
2132 int ret;
2133
2134 for (i = 0; i < num; i++) {
2135 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
2136 req->ssids[i].ssid,
2137 req->ssids[i].ssid_len,
2138 req->ie_len);
2139 if (!skb)
2140 return -ENOMEM;
2141
2142 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
2143 kfree_skb(skb);
2144
2145 if (ret)
2146 return ret;
2147 }
2148
2149 return 0;
2150 }
2151
rtw89_hw_scan_add_chan(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)2152 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
2153 int ssid_num,
2154 struct rtw89_mac_chinfo *ch_info)
2155 {
2156 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2157 struct rtw89_pktofld_info *info;
2158 u8 band, probe_count = 0;
2159
2160 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
2161 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
2162 ch_info->bw = RTW89_SCAN_WIDTH;
2163 ch_info->tx_pkt = true;
2164 ch_info->cfg_tx_pwr = false;
2165 ch_info->tx_pwr_idx = 0;
2166 ch_info->tx_null = false;
2167 ch_info->pause_data = false;
2168
2169 if (ssid_num) {
2170 ch_info->num_pkt = ssid_num;
2171 band = ch_info->ch_band;
2172
2173 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
2174 ch_info->probe_id = info->id;
2175 ch_info->pkt_id[probe_count] = info->id;
2176 if (++probe_count >= ssid_num)
2177 break;
2178 }
2179 if (probe_count != ssid_num)
2180 rtw89_err(rtwdev, "SSID num differs from list len\n");
2181 }
2182
2183 switch (chan_type) {
2184 case RTW89_CHAN_OPERATE:
2185 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
2186 ch_info->central_ch = scan_info->op_chan;
2187 ch_info->pri_ch = scan_info->op_pri_ch;
2188 ch_info->ch_band = scan_info->op_band;
2189 ch_info->bw = scan_info->op_bw;
2190 ch_info->tx_null = true;
2191 ch_info->num_pkt = 0;
2192 break;
2193 case RTW89_CHAN_DFS:
2194 ch_info->period = max_t(u8, ch_info->period,
2195 RTW89_DFS_CHAN_TIME);
2196 ch_info->dwell_time = RTW89_DWELL_TIME;
2197 break;
2198 case RTW89_CHAN_ACTIVE:
2199 break;
2200 default:
2201 rtw89_err(rtwdev, "Channel type out of bound\n");
2202 }
2203 }
2204
rtw89_hw_scan_add_chan_list(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2205 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
2206 struct rtw89_vif *rtwvif)
2207 {
2208 struct cfg80211_scan_request *req = rtwvif->scan_req;
2209 struct rtw89_mac_chinfo *ch_info, *tmp;
2210 struct ieee80211_channel *channel;
2211 struct list_head chan_list;
2212 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
2213 int list_len = req->n_channels, off_chan_time = 0;
2214 enum rtw89_chan_type type;
2215 int ret = 0, i;
2216
2217 INIT_LIST_HEAD(&chan_list);
2218 for (i = 0; i < req->n_channels; i++) {
2219 channel = req->channels[i];
2220 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
2221 if (!ch_info) {
2222 ret = -ENOMEM;
2223 goto out;
2224 }
2225
2226 ch_info->period = req->duration_mandatory ?
2227 req->duration : RTW89_CHANNEL_TIME;
2228 ch_info->ch_band = channel->band;
2229 ch_info->central_ch = channel->hw_value;
2230 ch_info->pri_ch = channel->hw_value;
2231 ch_info->rand_seq_num = random_seq;
2232
2233 if (channel->flags &
2234 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
2235 type = RTW89_CHAN_DFS;
2236 else
2237 type = RTW89_CHAN_ACTIVE;
2238 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
2239
2240 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
2241 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
2242 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2243 if (!tmp) {
2244 ret = -ENOMEM;
2245 kfree(ch_info);
2246 goto out;
2247 }
2248
2249 type = RTW89_CHAN_OPERATE;
2250 tmp->period = req->duration_mandatory ?
2251 req->duration : RTW89_CHANNEL_TIME;
2252 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
2253 list_add_tail(&tmp->list, &chan_list);
2254 off_chan_time = 0;
2255 list_len++;
2256 }
2257 list_add_tail(&ch_info->list, &chan_list);
2258 off_chan_time += ch_info->period;
2259 }
2260 rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
2261
2262 out:
2263 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
2264 list_del(&ch_info->list);
2265 kfree(ch_info);
2266 }
2267
2268 return ret;
2269 }
2270
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2271 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
2272 struct rtw89_vif *rtwvif)
2273 {
2274 int ret;
2275
2276 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
2277 if (ret) {
2278 rtw89_err(rtwdev, "Update probe request failed\n");
2279 goto out;
2280 }
2281 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
2282 out:
2283 return ret;
2284 }
2285
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_scan_request * scan_req)2286 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2287 struct ieee80211_scan_request *scan_req)
2288 {
2289 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2290 struct cfg80211_scan_request *req = &scan_req->req;
2291 u8 mac_addr[ETH_ALEN];
2292
2293 rtwdev->scan_info.scanning_vif = vif;
2294 rtwvif->scan_ies = &scan_req->ies;
2295 rtwvif->scan_req = req;
2296 ieee80211_stop_queues(rtwdev->hw);
2297
2298 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
2299 get_random_mask_addr(mac_addr, req->mac_addr,
2300 req->mac_addr_mask);
2301 else
2302 ether_addr_copy(mac_addr, vif->addr);
2303 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
2304
2305 rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
2306 rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
2307 rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
2308 rtw89_write32_mask(rtwdev,
2309 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
2310 B_AX_RX_FLTR_CFG_MASK,
2311 rtwdev->hal.rx_fltr);
2312 }
2313
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool aborted)2314 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2315 bool aborted)
2316 {
2317 struct cfg80211_scan_info info = {
2318 .aborted = aborted,
2319 };
2320 struct rtw89_vif *rtwvif;
2321
2322 if (!vif)
2323 return;
2324
2325 rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
2326 rtwdev->hal.rx_fltr |= B_AX_A_BC;
2327 rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
2328 rtw89_write32_mask(rtwdev,
2329 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
2330 B_AX_RX_FLTR_CFG_MASK,
2331 rtwdev->hal.rx_fltr);
2332
2333 rtw89_core_scan_complete(rtwdev, vif, true);
2334 ieee80211_scan_completed(rtwdev->hw, &info);
2335 ieee80211_wake_queues(rtwdev->hw);
2336
2337 rtw89_release_pkt_list(rtwdev);
2338 rtwvif = (struct rtw89_vif *)vif->drv_priv;
2339 rtwvif->scan_req = NULL;
2340 rtwvif->scan_ies = NULL;
2341 rtwdev->scan_info.scanning_vif = NULL;
2342 }
2343
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif)2344 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
2345 {
2346 rtw89_hw_scan_offload(rtwdev, vif, false);
2347 rtw89_hw_scan_complete(rtwdev, vif, true);
2348 }
2349
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool enable)2350 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2351 bool enable)
2352 {
2353 struct rtw89_scan_option opt = {0};
2354 struct rtw89_vif *rtwvif;
2355 int ret = 0;
2356
2357 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
2358 if (!rtwvif)
2359 return -EINVAL;
2360
2361 opt.enable = enable;
2362 opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
2363 if (enable) {
2364 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
2365 if (ret)
2366 goto out;
2367 }
2368 rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
2369 out:
2370 return ret;
2371 }
2372
rtw89_store_op_chan(struct rtw89_dev * rtwdev)2373 void rtw89_store_op_chan(struct rtw89_dev *rtwdev)
2374 {
2375 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2376 struct rtw89_hal *hal = &rtwdev->hal;
2377
2378 scan_info->op_pri_ch = hal->current_primary_channel;
2379 scan_info->op_chan = hal->current_channel;
2380 scan_info->op_bw = hal->current_band_width;
2381 scan_info->op_band = hal->current_band_type;
2382 }
2383
2384 #define H2C_FW_CPU_EXCEPTION_LEN 4
2385 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)2386 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
2387 {
2388 struct sk_buff *skb;
2389
2390 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
2391 if (!skb) {
2392 rtw89_err(rtwdev,
2393 "failed to alloc skb for fw cpu exception\n");
2394 return -ENOMEM;
2395 }
2396
2397 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
2398 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
2399 H2C_FW_CPU_EXCEPTION_TYPE_DEF);
2400
2401 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2402 H2C_CAT_TEST,
2403 H2C_CL_FW_STATUS_TEST,
2404 H2C_FUNC_CPU_EXCEPTION, 0, 0,
2405 H2C_FW_CPU_EXCEPTION_LEN);
2406
2407 if (rtw89_h2c_tx(rtwdev, skb, false)) {
2408 rtw89_err(rtwdev, "failed to send h2c\n");
2409 goto fail;
2410 }
2411
2412 return 0;
2413
2414 fail:
2415 dev_kfree_skb_any(skb);
2416 return -EBUSY;
2417 }
2418