1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/completion.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/firmware.h>
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12
13 #include "iwl-drv.h"
14 #include "iwl-csr.h"
15 #include "iwl-debug.h"
16 #include "iwl-trans.h"
17 #include "iwl-op-mode.h"
18 #include "iwl-agn-hw.h"
19 #include "fw/img.h"
20 #include "iwl-dbg-tlv.h"
21 #include "iwl-config.h"
22 #include "iwl-modparams.h"
23 #include "fw/api/alive.h"
24 #include "fw/api/mac.h"
25
26 /******************************************************************************
27 *
28 * module boiler plate
29 *
30 ******************************************************************************/
31
32 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
33 MODULE_DESCRIPTION(DRV_DESCRIPTION);
34 MODULE_LICENSE("GPL");
35
36 #ifdef CONFIG_IWLWIFI_DEBUGFS
37 static struct dentry *iwl_dbgfs_root;
38 #endif
39
40 /**
41 * struct iwl_drv - drv common data
42 * @list: list of drv structures using this opmode
43 * @fw: the iwl_fw structure
44 * @op_mode: the running op_mode
45 * @trans: transport layer
46 * @dev: for debug prints only
47 * @fw_index: firmware revision to try loading
48 * @firmware_name: composite filename of ucode file to load
49 * @request_firmware_complete: the firmware has been obtained from user space
50 * @dbgfs_drv: debugfs root directory entry
51 * @dbgfs_trans: debugfs transport directory entry
52 * @dbgfs_op_mode: debugfs op_mode directory entry
53 */
54 struct iwl_drv {
55 struct list_head list;
56 struct iwl_fw fw;
57
58 struct iwl_op_mode *op_mode;
59 struct iwl_trans *trans;
60 struct device *dev;
61
62 int fw_index; /* firmware we're trying to load */
63 char firmware_name[64]; /* name of firmware file to load */
64
65 struct completion request_firmware_complete;
66
67 #ifdef CONFIG_IWLWIFI_DEBUGFS
68 struct dentry *dbgfs_drv;
69 struct dentry *dbgfs_trans;
70 struct dentry *dbgfs_op_mode;
71 #endif
72 };
73
74 enum {
75 DVM_OP_MODE,
76 MVM_OP_MODE,
77 };
78
79 /* Protects the table contents, i.e. the ops pointer & drv list */
80 static DEFINE_MUTEX(iwlwifi_opmode_table_mtx);
81 static struct iwlwifi_opmode_table {
82 const char *name; /* name: iwldvm, iwlmvm, etc */
83 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
84 struct list_head drv; /* list of devices using this op_mode */
85 } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
86 [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
87 [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
88 };
89
90 #define IWL_DEFAULT_SCAN_CHANNELS 40
91
92 /*
93 * struct fw_sec: Just for the image parsing process.
94 * For the fw storage we are using struct fw_desc.
95 */
96 struct fw_sec {
97 const void *data; /* the sec data */
98 size_t size; /* section size */
99 u32 offset; /* offset of writing in the device */
100 };
101
iwl_free_fw_desc(struct iwl_drv * drv,struct fw_desc * desc)102 static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
103 {
104 vfree(desc->data);
105 desc->data = NULL;
106 desc->len = 0;
107 }
108
iwl_free_fw_img(struct iwl_drv * drv,struct fw_img * img)109 static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
110 {
111 int i;
112 for (i = 0; i < img->num_sec; i++)
113 iwl_free_fw_desc(drv, &img->sec[i]);
114 kfree(img->sec);
115 }
116
iwl_dealloc_ucode(struct iwl_drv * drv)117 static void iwl_dealloc_ucode(struct iwl_drv *drv)
118 {
119 int i;
120
121 kfree(drv->fw.dbg.dest_tlv);
122 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
123 kfree(drv->fw.dbg.conf_tlv[i]);
124 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
125 kfree(drv->fw.dbg.trigger_tlv[i]);
126 kfree(drv->fw.dbg.mem_tlv);
127 kfree(drv->fw.iml);
128 kfree(drv->fw.ucode_capa.cmd_versions);
129 kfree(drv->fw.phy_integration_ver);
130
131 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
132 iwl_free_fw_img(drv, drv->fw.img + i);
133
134 /* clear the data for the aborted load case */
135 memset(&drv->fw, 0, sizeof(drv->fw));
136 }
137
iwl_alloc_fw_desc(struct iwl_drv * drv,struct fw_desc * desc,struct fw_sec * sec)138 static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
139 struct fw_sec *sec)
140 {
141 void *data;
142
143 desc->data = NULL;
144
145 if (!sec || !sec->size)
146 return -EINVAL;
147
148 data = vmalloc(sec->size);
149 if (!data)
150 return -ENOMEM;
151
152 desc->len = sec->size;
153 desc->offset = sec->offset;
154 memcpy(data, sec->data, desc->len);
155 desc->data = data;
156
157 return 0;
158 }
159
160 static void iwl_req_fw_callback(const struct firmware *ucode_raw,
161 void *context);
162
iwl_request_firmware(struct iwl_drv * drv,bool first)163 static int iwl_request_firmware(struct iwl_drv *drv, bool first)
164 {
165 const struct iwl_cfg *cfg = drv->trans->cfg;
166 char tag[8];
167
168 if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
169 (drv->trans->hw_rev_step != SILICON_B_STEP &&
170 drv->trans->hw_rev_step != SILICON_C_STEP)) {
171 IWL_ERR(drv,
172 "Only HW steps B and C are currently supported (0x%0x)\n",
173 drv->trans->hw_rev);
174 return -EINVAL;
175 }
176
177 if (first) {
178 drv->fw_index = cfg->ucode_api_max;
179 sprintf(tag, "%d", drv->fw_index);
180 } else {
181 drv->fw_index--;
182 sprintf(tag, "%d", drv->fw_index);
183 }
184
185 if (drv->fw_index < cfg->ucode_api_min) {
186 IWL_ERR(drv, "no suitable firmware found!\n");
187
188 if (cfg->ucode_api_min == cfg->ucode_api_max) {
189 IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre,
190 cfg->ucode_api_max);
191 } else {
192 IWL_ERR(drv, "minimum version required: %s%d\n",
193 cfg->fw_name_pre, cfg->ucode_api_min);
194 IWL_ERR(drv, "maximum version supported: %s%d\n",
195 cfg->fw_name_pre, cfg->ucode_api_max);
196 }
197
198 IWL_ERR(drv,
199 "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
200 return -ENOENT;
201 }
202
203 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
204 cfg->fw_name_pre, tag);
205
206 IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
207 drv->firmware_name);
208
209 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
210 drv->trans->dev,
211 GFP_KERNEL, drv, iwl_req_fw_callback);
212 }
213
214 struct fw_img_parsing {
215 struct fw_sec *sec;
216 int sec_counter;
217 };
218
219 /*
220 * struct fw_sec_parsing: to extract fw section and it's offset from tlv
221 */
222 struct fw_sec_parsing {
223 __le32 offset;
224 const u8 data[];
225 } __packed;
226
227 /**
228 * struct iwl_tlv_calib_data - parse the default calib data from TLV
229 *
230 * @ucode_type: the uCode to which the following default calib relates.
231 * @calib: default calibrations.
232 */
233 struct iwl_tlv_calib_data {
234 __le32 ucode_type;
235 struct iwl_tlv_calib_ctrl calib;
236 } __packed;
237
238 struct iwl_firmware_pieces {
239 struct fw_img_parsing img[IWL_UCODE_TYPE_MAX];
240
241 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
242 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
243
244 /* FW debug data parsed for driver usage */
245 bool dbg_dest_tlv_init;
246 const u8 *dbg_dest_ver;
247 union {
248 const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
249 const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
250 };
251 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
252 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
253 const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
254 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
255 struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
256 size_t n_mem_tlv;
257 };
258
259 /*
260 * These functions are just to extract uCode section data from the pieces
261 * structure.
262 */
get_sec(struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type,int sec)263 static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
264 enum iwl_ucode_type type,
265 int sec)
266 {
267 return &pieces->img[type].sec[sec];
268 }
269
alloc_sec_data(struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type,int sec)270 static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
271 enum iwl_ucode_type type,
272 int sec)
273 {
274 struct fw_img_parsing *img = &pieces->img[type];
275 struct fw_sec *sec_memory;
276 int size = sec + 1;
277 size_t alloc_size = sizeof(*img->sec) * size;
278
279 if (img->sec && img->sec_counter >= size)
280 return;
281
282 sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL);
283 if (!sec_memory)
284 return;
285
286 img->sec = sec_memory;
287 img->sec_counter = size;
288 }
289
set_sec_data(struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type,int sec,const void * data)290 static void set_sec_data(struct iwl_firmware_pieces *pieces,
291 enum iwl_ucode_type type,
292 int sec,
293 const void *data)
294 {
295 alloc_sec_data(pieces, type, sec);
296
297 pieces->img[type].sec[sec].data = data;
298 }
299
set_sec_size(struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type,int sec,size_t size)300 static void set_sec_size(struct iwl_firmware_pieces *pieces,
301 enum iwl_ucode_type type,
302 int sec,
303 size_t size)
304 {
305 alloc_sec_data(pieces, type, sec);
306
307 pieces->img[type].sec[sec].size = size;
308 }
309
get_sec_size(struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type,int sec)310 static size_t get_sec_size(struct iwl_firmware_pieces *pieces,
311 enum iwl_ucode_type type,
312 int sec)
313 {
314 return pieces->img[type].sec[sec].size;
315 }
316
set_sec_offset(struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type,int sec,u32 offset)317 static void set_sec_offset(struct iwl_firmware_pieces *pieces,
318 enum iwl_ucode_type type,
319 int sec,
320 u32 offset)
321 {
322 alloc_sec_data(pieces, type, sec);
323
324 pieces->img[type].sec[sec].offset = offset;
325 }
326
327 /*
328 * Gets uCode section from tlv.
329 */
iwl_store_ucode_sec(struct iwl_firmware_pieces * pieces,const void * data,enum iwl_ucode_type type,int size)330 static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
331 const void *data, enum iwl_ucode_type type,
332 int size)
333 {
334 struct fw_img_parsing *img;
335 struct fw_sec *sec;
336 const struct fw_sec_parsing *sec_parse;
337 size_t alloc_size;
338
339 if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
340 return -1;
341
342 sec_parse = (const struct fw_sec_parsing *)data;
343
344 img = &pieces->img[type];
345
346 alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
347 sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
348 if (!sec)
349 return -ENOMEM;
350 img->sec = sec;
351
352 sec = &img->sec[img->sec_counter];
353
354 sec->offset = le32_to_cpu(sec_parse->offset);
355 sec->data = sec_parse->data;
356 sec->size = size - sizeof(sec_parse->offset);
357
358 ++img->sec_counter;
359
360 return 0;
361 }
362
iwl_set_default_calib(struct iwl_drv * drv,const u8 * data)363 static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
364 {
365 const struct iwl_tlv_calib_data *def_calib =
366 (const struct iwl_tlv_calib_data *)data;
367 u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
368 if (ucode_type >= IWL_UCODE_TYPE_MAX) {
369 IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
370 ucode_type);
371 return -EINVAL;
372 }
373 drv->fw.default_calib[ucode_type].flow_trigger =
374 def_calib->calib.flow_trigger;
375 drv->fw.default_calib[ucode_type].event_trigger =
376 def_calib->calib.event_trigger;
377
378 return 0;
379 }
380
iwl_set_ucode_api_flags(struct iwl_drv * drv,const u8 * data,struct iwl_ucode_capabilities * capa)381 static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
382 struct iwl_ucode_capabilities *capa)
383 {
384 const struct iwl_ucode_api *ucode_api = (const void *)data;
385 u32 api_index = le32_to_cpu(ucode_api->api_index);
386 u32 api_flags = le32_to_cpu(ucode_api->api_flags);
387 int i;
388
389 if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
390 IWL_WARN(drv,
391 "api flags index %d larger than supported by driver\n",
392 api_index);
393 return;
394 }
395
396 for (i = 0; i < 32; i++) {
397 if (api_flags & BIT(i))
398 __set_bit(i + 32 * api_index, capa->_api);
399 }
400 }
401
iwl_set_ucode_capabilities(struct iwl_drv * drv,const u8 * data,struct iwl_ucode_capabilities * capa)402 static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
403 struct iwl_ucode_capabilities *capa)
404 {
405 const struct iwl_ucode_capa *ucode_capa = (const void *)data;
406 u32 api_index = le32_to_cpu(ucode_capa->api_index);
407 u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
408 int i;
409
410 if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
411 IWL_WARN(drv,
412 "capa flags index %d larger than supported by driver\n",
413 api_index);
414 return;
415 }
416
417 for (i = 0; i < 32; i++) {
418 if (api_flags & BIT(i))
419 __set_bit(i + 32 * api_index, capa->_capa);
420 }
421 }
422
iwl_reduced_fw_name(struct iwl_drv * drv)423 static const char *iwl_reduced_fw_name(struct iwl_drv *drv)
424 {
425 const char *name = drv->firmware_name;
426
427 if (strncmp(name, "iwlwifi-", 8) == 0)
428 name += 8;
429
430 return name;
431 }
432
iwl_parse_v1_v2_firmware(struct iwl_drv * drv,const struct firmware * ucode_raw,struct iwl_firmware_pieces * pieces)433 static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
434 const struct firmware *ucode_raw,
435 struct iwl_firmware_pieces *pieces)
436 {
437 const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
438 u32 api_ver, hdr_size, build;
439 char buildstr[25];
440 const u8 *src;
441
442 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
443 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
444
445 switch (api_ver) {
446 default:
447 hdr_size = 28;
448 if (ucode_raw->size < hdr_size) {
449 IWL_ERR(drv, "File size too small!\n");
450 return -EINVAL;
451 }
452 build = le32_to_cpu(ucode->u.v2.build);
453 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
454 le32_to_cpu(ucode->u.v2.inst_size));
455 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
456 le32_to_cpu(ucode->u.v2.data_size));
457 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
458 le32_to_cpu(ucode->u.v2.init_size));
459 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
460 le32_to_cpu(ucode->u.v2.init_data_size));
461 src = ucode->u.v2.data;
462 break;
463 case 0:
464 case 1:
465 case 2:
466 hdr_size = 24;
467 if (ucode_raw->size < hdr_size) {
468 IWL_ERR(drv, "File size too small!\n");
469 return -EINVAL;
470 }
471 build = 0;
472 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
473 le32_to_cpu(ucode->u.v1.inst_size));
474 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
475 le32_to_cpu(ucode->u.v1.data_size));
476 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
477 le32_to_cpu(ucode->u.v1.init_size));
478 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
479 le32_to_cpu(ucode->u.v1.init_data_size));
480 src = ucode->u.v1.data;
481 break;
482 }
483
484 if (build)
485 sprintf(buildstr, " build %u", build);
486 else
487 buildstr[0] = '\0';
488
489 snprintf(drv->fw.fw_version,
490 sizeof(drv->fw.fw_version),
491 "%u.%u.%u.%u%s %s",
492 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
493 IWL_UCODE_MINOR(drv->fw.ucode_ver),
494 IWL_UCODE_API(drv->fw.ucode_ver),
495 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
496 buildstr, iwl_reduced_fw_name(drv));
497
498 /* Verify size of file vs. image size info in file's header */
499
500 if (ucode_raw->size != hdr_size +
501 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
502 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
503 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
504 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
505
506 IWL_ERR(drv,
507 "uCode file size %d does not match expected size\n",
508 (int)ucode_raw->size);
509 return -EINVAL;
510 }
511
512
513 set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
514 src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
515 set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
516 IWLAGN_RTC_INST_LOWER_BOUND);
517 set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
518 src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
519 set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
520 IWLAGN_RTC_DATA_LOWER_BOUND);
521 set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
522 src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
523 set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
524 IWLAGN_RTC_INST_LOWER_BOUND);
525 set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
526 src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
527 set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
528 IWLAGN_RTC_DATA_LOWER_BOUND);
529 return 0;
530 }
531
iwl_drv_set_dump_exclude(struct iwl_drv * drv,enum iwl_ucode_tlv_type tlv_type,const void * tlv_data,u32 tlv_len)532 static void iwl_drv_set_dump_exclude(struct iwl_drv *drv,
533 enum iwl_ucode_tlv_type tlv_type,
534 const void *tlv_data, u32 tlv_len)
535 {
536 const struct iwl_fw_dump_exclude *fw = tlv_data;
537 struct iwl_dump_exclude *excl;
538
539 if (tlv_len < sizeof(*fw))
540 return;
541
542 if (tlv_type == IWL_UCODE_TLV_SEC_TABLE_ADDR) {
543 excl = &drv->fw.dump_excl[0];
544
545 /* second time we find this, it's for WoWLAN */
546 if (excl->addr)
547 excl = &drv->fw.dump_excl_wowlan[0];
548 } else if (fw_has_capa(&drv->fw.ucode_capa,
549 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG)) {
550 /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is regular image */
551 excl = &drv->fw.dump_excl[0];
552 } else {
553 /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is WoWLAN image */
554 excl = &drv->fw.dump_excl_wowlan[0];
555 }
556
557 if (excl->addr)
558 excl++;
559
560 if (excl->addr) {
561 IWL_DEBUG_FW_INFO(drv, "found too many excludes in fw file\n");
562 return;
563 }
564
565 excl->addr = le32_to_cpu(fw->addr) & ~FW_ADDR_CACHE_CONTROL;
566 excl->size = le32_to_cpu(fw->size);
567 }
568
iwl_parse_dbg_tlv_assert_tables(struct iwl_drv * drv,const struct iwl_ucode_tlv * tlv)569 static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
570 const struct iwl_ucode_tlv *tlv)
571 {
572 const struct iwl_fw_ini_region_tlv *region;
573 u32 length = le32_to_cpu(tlv->length);
574 u32 addr;
575
576 if (length < offsetof(typeof(*region), special_mem) +
577 sizeof(region->special_mem))
578 return;
579
580 region = (const void *)tlv->data;
581 addr = le32_to_cpu(region->special_mem.base_addr);
582 addr += le32_to_cpu(region->special_mem.offset);
583 addr &= ~FW_ADDR_CACHE_CONTROL;
584
585 if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY)
586 return;
587
588 switch (region->sub_type) {
589 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE:
590 drv->trans->dbg.umac_error_event_table = addr;
591 drv->trans->dbg.error_event_table_tlv_status |=
592 IWL_ERROR_EVENT_TABLE_UMAC;
593 break;
594 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE:
595 drv->trans->dbg.lmac_error_event_table[0] = addr;
596 drv->trans->dbg.error_event_table_tlv_status |=
597 IWL_ERROR_EVENT_TABLE_LMAC1;
598 break;
599 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE:
600 drv->trans->dbg.lmac_error_event_table[1] = addr;
601 drv->trans->dbg.error_event_table_tlv_status |=
602 IWL_ERROR_EVENT_TABLE_LMAC2;
603 break;
604 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE:
605 drv->trans->dbg.tcm_error_event_table[0] = addr;
606 drv->trans->dbg.error_event_table_tlv_status |=
607 IWL_ERROR_EVENT_TABLE_TCM1;
608 break;
609 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE:
610 drv->trans->dbg.tcm_error_event_table[1] = addr;
611 drv->trans->dbg.error_event_table_tlv_status |=
612 IWL_ERROR_EVENT_TABLE_TCM2;
613 break;
614 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE:
615 drv->trans->dbg.rcm_error_event_table[0] = addr;
616 drv->trans->dbg.error_event_table_tlv_status |=
617 IWL_ERROR_EVENT_TABLE_RCM1;
618 break;
619 case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE:
620 drv->trans->dbg.rcm_error_event_table[1] = addr;
621 drv->trans->dbg.error_event_table_tlv_status |=
622 IWL_ERROR_EVENT_TABLE_RCM2;
623 break;
624 default:
625 break;
626 }
627 }
628
iwl_parse_tlv_firmware(struct iwl_drv * drv,const struct firmware * ucode_raw,struct iwl_firmware_pieces * pieces,struct iwl_ucode_capabilities * capa,bool * usniffer_images)629 static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
630 const struct firmware *ucode_raw,
631 struct iwl_firmware_pieces *pieces,
632 struct iwl_ucode_capabilities *capa,
633 bool *usniffer_images)
634 {
635 const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
636 const struct iwl_ucode_tlv *tlv;
637 size_t len = ucode_raw->size;
638 const u8 *data;
639 u32 tlv_len;
640 u32 usniffer_img;
641 enum iwl_ucode_tlv_type tlv_type;
642 const u8 *tlv_data;
643 char buildstr[25];
644 u32 build, paging_mem_size;
645 int num_of_cpus;
646 bool usniffer_req = false;
647
648 if (len < sizeof(*ucode)) {
649 IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
650 return -EINVAL;
651 }
652
653 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
654 IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
655 le32_to_cpu(ucode->magic));
656 return -EINVAL;
657 }
658
659 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
660 memcpy(drv->fw.human_readable, ucode->human_readable,
661 sizeof(drv->fw.human_readable));
662 build = le32_to_cpu(ucode->build);
663
664 if (build)
665 sprintf(buildstr, " build %u", build);
666 else
667 buildstr[0] = '\0';
668
669 snprintf(drv->fw.fw_version,
670 sizeof(drv->fw.fw_version),
671 "%u.%u.%u.%u%s %s",
672 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
673 IWL_UCODE_MINOR(drv->fw.ucode_ver),
674 IWL_UCODE_API(drv->fw.ucode_ver),
675 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
676 buildstr, iwl_reduced_fw_name(drv));
677
678 data = ucode->data;
679
680 len -= sizeof(*ucode);
681
682 while (len >= sizeof(*tlv)) {
683 len -= sizeof(*tlv);
684
685 tlv = (const void *)data;
686 tlv_len = le32_to_cpu(tlv->length);
687 tlv_type = le32_to_cpu(tlv->type);
688 tlv_data = tlv->data;
689
690 if (len < tlv_len) {
691 IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
692 len, tlv_len);
693 return -EINVAL;
694 }
695 len -= ALIGN(tlv_len, 4);
696 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
697
698 switch (tlv_type) {
699 case IWL_UCODE_TLV_INST:
700 set_sec_data(pieces, IWL_UCODE_REGULAR,
701 IWL_UCODE_SECTION_INST, tlv_data);
702 set_sec_size(pieces, IWL_UCODE_REGULAR,
703 IWL_UCODE_SECTION_INST, tlv_len);
704 set_sec_offset(pieces, IWL_UCODE_REGULAR,
705 IWL_UCODE_SECTION_INST,
706 IWLAGN_RTC_INST_LOWER_BOUND);
707 break;
708 case IWL_UCODE_TLV_DATA:
709 set_sec_data(pieces, IWL_UCODE_REGULAR,
710 IWL_UCODE_SECTION_DATA, tlv_data);
711 set_sec_size(pieces, IWL_UCODE_REGULAR,
712 IWL_UCODE_SECTION_DATA, tlv_len);
713 set_sec_offset(pieces, IWL_UCODE_REGULAR,
714 IWL_UCODE_SECTION_DATA,
715 IWLAGN_RTC_DATA_LOWER_BOUND);
716 break;
717 case IWL_UCODE_TLV_INIT:
718 set_sec_data(pieces, IWL_UCODE_INIT,
719 IWL_UCODE_SECTION_INST, tlv_data);
720 set_sec_size(pieces, IWL_UCODE_INIT,
721 IWL_UCODE_SECTION_INST, tlv_len);
722 set_sec_offset(pieces, IWL_UCODE_INIT,
723 IWL_UCODE_SECTION_INST,
724 IWLAGN_RTC_INST_LOWER_BOUND);
725 break;
726 case IWL_UCODE_TLV_INIT_DATA:
727 set_sec_data(pieces, IWL_UCODE_INIT,
728 IWL_UCODE_SECTION_DATA, tlv_data);
729 set_sec_size(pieces, IWL_UCODE_INIT,
730 IWL_UCODE_SECTION_DATA, tlv_len);
731 set_sec_offset(pieces, IWL_UCODE_INIT,
732 IWL_UCODE_SECTION_DATA,
733 IWLAGN_RTC_DATA_LOWER_BOUND);
734 break;
735 case IWL_UCODE_TLV_BOOT:
736 IWL_ERR(drv, "Found unexpected BOOT ucode\n");
737 break;
738 case IWL_UCODE_TLV_PROBE_MAX_LEN:
739 if (tlv_len != sizeof(u32))
740 goto invalid_tlv_len;
741 capa->max_probe_length =
742 le32_to_cpup((const __le32 *)tlv_data);
743 break;
744 case IWL_UCODE_TLV_PAN:
745 if (tlv_len)
746 goto invalid_tlv_len;
747 capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
748 break;
749 case IWL_UCODE_TLV_FLAGS:
750 /* must be at least one u32 */
751 if (tlv_len < sizeof(u32))
752 goto invalid_tlv_len;
753 /* and a proper number of u32s */
754 if (tlv_len % sizeof(u32))
755 goto invalid_tlv_len;
756 /*
757 * This driver only reads the first u32 as
758 * right now no more features are defined,
759 * if that changes then either the driver
760 * will not work with the new firmware, or
761 * it'll not take advantage of new features.
762 */
763 capa->flags = le32_to_cpup((const __le32 *)tlv_data);
764 break;
765 case IWL_UCODE_TLV_API_CHANGES_SET:
766 if (tlv_len != sizeof(struct iwl_ucode_api))
767 goto invalid_tlv_len;
768 iwl_set_ucode_api_flags(drv, tlv_data, capa);
769 break;
770 case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
771 if (tlv_len != sizeof(struct iwl_ucode_capa))
772 goto invalid_tlv_len;
773 iwl_set_ucode_capabilities(drv, tlv_data, capa);
774 break;
775 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
776 if (tlv_len != sizeof(u32))
777 goto invalid_tlv_len;
778 pieces->init_evtlog_ptr =
779 le32_to_cpup((const __le32 *)tlv_data);
780 break;
781 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
782 if (tlv_len != sizeof(u32))
783 goto invalid_tlv_len;
784 pieces->init_evtlog_size =
785 le32_to_cpup((const __le32 *)tlv_data);
786 break;
787 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
788 if (tlv_len != sizeof(u32))
789 goto invalid_tlv_len;
790 pieces->init_errlog_ptr =
791 le32_to_cpup((const __le32 *)tlv_data);
792 break;
793 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
794 if (tlv_len != sizeof(u32))
795 goto invalid_tlv_len;
796 pieces->inst_evtlog_ptr =
797 le32_to_cpup((const __le32 *)tlv_data);
798 break;
799 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
800 if (tlv_len != sizeof(u32))
801 goto invalid_tlv_len;
802 pieces->inst_evtlog_size =
803 le32_to_cpup((const __le32 *)tlv_data);
804 break;
805 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
806 if (tlv_len != sizeof(u32))
807 goto invalid_tlv_len;
808 pieces->inst_errlog_ptr =
809 le32_to_cpup((const __le32 *)tlv_data);
810 break;
811 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
812 if (tlv_len)
813 goto invalid_tlv_len;
814 drv->fw.enhance_sensitivity_table = true;
815 break;
816 case IWL_UCODE_TLV_WOWLAN_INST:
817 set_sec_data(pieces, IWL_UCODE_WOWLAN,
818 IWL_UCODE_SECTION_INST, tlv_data);
819 set_sec_size(pieces, IWL_UCODE_WOWLAN,
820 IWL_UCODE_SECTION_INST, tlv_len);
821 set_sec_offset(pieces, IWL_UCODE_WOWLAN,
822 IWL_UCODE_SECTION_INST,
823 IWLAGN_RTC_INST_LOWER_BOUND);
824 break;
825 case IWL_UCODE_TLV_WOWLAN_DATA:
826 set_sec_data(pieces, IWL_UCODE_WOWLAN,
827 IWL_UCODE_SECTION_DATA, tlv_data);
828 set_sec_size(pieces, IWL_UCODE_WOWLAN,
829 IWL_UCODE_SECTION_DATA, tlv_len);
830 set_sec_offset(pieces, IWL_UCODE_WOWLAN,
831 IWL_UCODE_SECTION_DATA,
832 IWLAGN_RTC_DATA_LOWER_BOUND);
833 break;
834 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
835 if (tlv_len != sizeof(u32))
836 goto invalid_tlv_len;
837 capa->standard_phy_calibration_size =
838 le32_to_cpup((const __le32 *)tlv_data);
839 break;
840 case IWL_UCODE_TLV_SEC_RT:
841 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
842 tlv_len);
843 drv->fw.type = IWL_FW_MVM;
844 break;
845 case IWL_UCODE_TLV_SEC_INIT:
846 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
847 tlv_len);
848 drv->fw.type = IWL_FW_MVM;
849 break;
850 case IWL_UCODE_TLV_SEC_WOWLAN:
851 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
852 tlv_len);
853 drv->fw.type = IWL_FW_MVM;
854 break;
855 case IWL_UCODE_TLV_DEF_CALIB:
856 if (tlv_len != sizeof(struct iwl_tlv_calib_data))
857 goto invalid_tlv_len;
858 if (iwl_set_default_calib(drv, tlv_data))
859 goto tlv_error;
860 break;
861 case IWL_UCODE_TLV_PHY_SKU:
862 if (tlv_len != sizeof(u32))
863 goto invalid_tlv_len;
864 drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
865 drv->fw.valid_tx_ant = (drv->fw.phy_config &
866 FW_PHY_CFG_TX_CHAIN) >>
867 FW_PHY_CFG_TX_CHAIN_POS;
868 drv->fw.valid_rx_ant = (drv->fw.phy_config &
869 FW_PHY_CFG_RX_CHAIN) >>
870 FW_PHY_CFG_RX_CHAIN_POS;
871 break;
872 case IWL_UCODE_TLV_SECURE_SEC_RT:
873 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
874 tlv_len);
875 drv->fw.type = IWL_FW_MVM;
876 break;
877 case IWL_UCODE_TLV_SECURE_SEC_INIT:
878 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
879 tlv_len);
880 drv->fw.type = IWL_FW_MVM;
881 break;
882 case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
883 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
884 tlv_len);
885 drv->fw.type = IWL_FW_MVM;
886 break;
887 case IWL_UCODE_TLV_NUM_OF_CPU:
888 if (tlv_len != sizeof(u32))
889 goto invalid_tlv_len;
890 num_of_cpus =
891 le32_to_cpup((const __le32 *)tlv_data);
892
893 if (num_of_cpus == 2) {
894 drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
895 true;
896 drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
897 true;
898 drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
899 true;
900 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
901 IWL_ERR(drv, "Driver support upto 2 CPUs\n");
902 return -EINVAL;
903 }
904 break;
905 case IWL_UCODE_TLV_N_SCAN_CHANNELS:
906 if (tlv_len != sizeof(u32))
907 goto invalid_tlv_len;
908 capa->n_scan_channels =
909 le32_to_cpup((const __le32 *)tlv_data);
910 break;
911 case IWL_UCODE_TLV_FW_VERSION: {
912 const __le32 *ptr = (const void *)tlv_data;
913 u32 major, minor;
914 u8 local_comp;
915
916 if (tlv_len != sizeof(u32) * 3)
917 goto invalid_tlv_len;
918
919 major = le32_to_cpup(ptr++);
920 minor = le32_to_cpup(ptr++);
921 local_comp = le32_to_cpup(ptr);
922
923 if (major >= 35)
924 snprintf(drv->fw.fw_version,
925 sizeof(drv->fw.fw_version),
926 "%u.%08x.%u %s", major, minor,
927 local_comp, iwl_reduced_fw_name(drv));
928 else
929 snprintf(drv->fw.fw_version,
930 sizeof(drv->fw.fw_version),
931 "%u.%u.%u %s", major, minor,
932 local_comp, iwl_reduced_fw_name(drv));
933 break;
934 }
935 case IWL_UCODE_TLV_FW_DBG_DEST: {
936 const struct iwl_fw_dbg_dest_tlv *dest = NULL;
937 const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
938 u8 mon_mode;
939
940 pieces->dbg_dest_ver = (const u8 *)tlv_data;
941 if (*pieces->dbg_dest_ver == 1) {
942 dest = (const void *)tlv_data;
943 } else if (*pieces->dbg_dest_ver == 0) {
944 dest_v1 = (const void *)tlv_data;
945 } else {
946 IWL_ERR(drv,
947 "The version is %d, and it is invalid\n",
948 *pieces->dbg_dest_ver);
949 break;
950 }
951
952 if (pieces->dbg_dest_tlv_init) {
953 IWL_ERR(drv,
954 "dbg destination ignored, already exists\n");
955 break;
956 }
957
958 pieces->dbg_dest_tlv_init = true;
959
960 if (dest_v1) {
961 pieces->dbg_dest_tlv_v1 = dest_v1;
962 mon_mode = dest_v1->monitor_mode;
963 } else {
964 pieces->dbg_dest_tlv = dest;
965 mon_mode = dest->monitor_mode;
966 }
967
968 IWL_INFO(drv, "Found debug destination: %s\n",
969 get_fw_dbg_mode_string(mon_mode));
970
971 drv->fw.dbg.n_dest_reg = (dest_v1) ?
972 tlv_len -
973 offsetof(struct iwl_fw_dbg_dest_tlv_v1,
974 reg_ops) :
975 tlv_len -
976 offsetof(struct iwl_fw_dbg_dest_tlv,
977 reg_ops);
978
979 drv->fw.dbg.n_dest_reg /=
980 sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
981
982 break;
983 }
984 case IWL_UCODE_TLV_FW_DBG_CONF: {
985 const struct iwl_fw_dbg_conf_tlv *conf =
986 (const void *)tlv_data;
987
988 if (!pieces->dbg_dest_tlv_init) {
989 IWL_ERR(drv,
990 "Ignore dbg config %d - no destination configured\n",
991 conf->id);
992 break;
993 }
994
995 if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
996 IWL_ERR(drv,
997 "Skip unknown configuration: %d\n",
998 conf->id);
999 break;
1000 }
1001
1002 if (pieces->dbg_conf_tlv[conf->id]) {
1003 IWL_ERR(drv,
1004 "Ignore duplicate dbg config %d\n",
1005 conf->id);
1006 break;
1007 }
1008
1009 if (conf->usniffer)
1010 usniffer_req = true;
1011
1012 IWL_INFO(drv, "Found debug configuration: %d\n",
1013 conf->id);
1014
1015 pieces->dbg_conf_tlv[conf->id] = conf;
1016 pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
1017 break;
1018 }
1019 case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
1020 const struct iwl_fw_dbg_trigger_tlv *trigger =
1021 (const void *)tlv_data;
1022 u32 trigger_id = le32_to_cpu(trigger->id);
1023
1024 if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
1025 IWL_ERR(drv,
1026 "Skip unknown trigger: %u\n",
1027 trigger->id);
1028 break;
1029 }
1030
1031 if (pieces->dbg_trigger_tlv[trigger_id]) {
1032 IWL_ERR(drv,
1033 "Ignore duplicate dbg trigger %u\n",
1034 trigger->id);
1035 break;
1036 }
1037
1038 IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
1039
1040 pieces->dbg_trigger_tlv[trigger_id] = trigger;
1041 pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
1042 break;
1043 }
1044 case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
1045 if (tlv_len != sizeof(u32)) {
1046 IWL_ERR(drv,
1047 "dbg lst mask size incorrect, skip\n");
1048 break;
1049 }
1050
1051 drv->fw.dbg.dump_mask =
1052 le32_to_cpup((const __le32 *)tlv_data);
1053 break;
1054 }
1055 case IWL_UCODE_TLV_SEC_RT_USNIFFER:
1056 *usniffer_images = true;
1057 iwl_store_ucode_sec(pieces, tlv_data,
1058 IWL_UCODE_REGULAR_USNIFFER,
1059 tlv_len);
1060 break;
1061 case IWL_UCODE_TLV_PAGING:
1062 if (tlv_len != sizeof(u32))
1063 goto invalid_tlv_len;
1064 paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
1065
1066 IWL_DEBUG_FW(drv,
1067 "Paging: paging enabled (size = %u bytes)\n",
1068 paging_mem_size);
1069
1070 if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
1071 IWL_ERR(drv,
1072 "Paging: driver supports up to %lu bytes for paging image\n",
1073 MAX_PAGING_IMAGE_SIZE);
1074 return -EINVAL;
1075 }
1076
1077 if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
1078 IWL_ERR(drv,
1079 "Paging: image isn't multiple %lu\n",
1080 FW_PAGING_SIZE);
1081 return -EINVAL;
1082 }
1083
1084 drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
1085 paging_mem_size;
1086 usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
1087 drv->fw.img[usniffer_img].paging_mem_size =
1088 paging_mem_size;
1089 break;
1090 case IWL_UCODE_TLV_FW_GSCAN_CAPA:
1091 /* ignored */
1092 break;
1093 case IWL_UCODE_TLV_FW_MEM_SEG: {
1094 const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
1095 (const void *)tlv_data;
1096 size_t size;
1097 struct iwl_fw_dbg_mem_seg_tlv *n;
1098
1099 if (tlv_len != (sizeof(*dbg_mem)))
1100 goto invalid_tlv_len;
1101
1102 IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
1103 dbg_mem->data_type);
1104
1105 size = sizeof(*pieces->dbg_mem_tlv) *
1106 (pieces->n_mem_tlv + 1);
1107 n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
1108 if (!n)
1109 return -ENOMEM;
1110 pieces->dbg_mem_tlv = n;
1111 pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
1112 pieces->n_mem_tlv++;
1113 break;
1114 }
1115 case IWL_UCODE_TLV_IML: {
1116 drv->fw.iml_len = tlv_len;
1117 drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL);
1118 if (!drv->fw.iml)
1119 return -ENOMEM;
1120 break;
1121 }
1122 case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
1123 const struct {
1124 __le32 buf_addr;
1125 __le32 buf_size;
1126 } *recov_info = (const void *)tlv_data;
1127
1128 if (tlv_len != sizeof(*recov_info))
1129 goto invalid_tlv_len;
1130 capa->error_log_addr =
1131 le32_to_cpu(recov_info->buf_addr);
1132 capa->error_log_size =
1133 le32_to_cpu(recov_info->buf_size);
1134 }
1135 break;
1136 case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
1137 const struct {
1138 u8 version[32];
1139 u8 sha1[20];
1140 } *fseq_ver = (const void *)tlv_data;
1141
1142 if (tlv_len != sizeof(*fseq_ver))
1143 goto invalid_tlv_len;
1144 IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
1145 fseq_ver->version);
1146 }
1147 break;
1148 case IWL_UCODE_TLV_FW_NUM_STATIONS:
1149 if (tlv_len != sizeof(u32))
1150 goto invalid_tlv_len;
1151 if (le32_to_cpup((const __le32 *)tlv_data) >
1152 IWL_MVM_STATION_COUNT_MAX) {
1153 IWL_ERR(drv,
1154 "%d is an invalid number of station\n",
1155 le32_to_cpup((const __le32 *)tlv_data));
1156 goto tlv_error;
1157 }
1158 capa->num_stations =
1159 le32_to_cpup((const __le32 *)tlv_data);
1160 break;
1161 case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1162 const struct iwl_umac_debug_addrs *dbg_ptrs =
1163 (const void *)tlv_data;
1164
1165 if (tlv_len != sizeof(*dbg_ptrs))
1166 goto invalid_tlv_len;
1167 if (drv->trans->trans_cfg->device_family <
1168 IWL_DEVICE_FAMILY_22000)
1169 break;
1170 drv->trans->dbg.umac_error_event_table =
1171 le32_to_cpu(dbg_ptrs->error_info_addr) &
1172 ~FW_ADDR_CACHE_CONTROL;
1173 drv->trans->dbg.error_event_table_tlv_status |=
1174 IWL_ERROR_EVENT_TABLE_UMAC;
1175 break;
1176 }
1177 case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1178 const struct iwl_lmac_debug_addrs *dbg_ptrs =
1179 (const void *)tlv_data;
1180
1181 if (tlv_len != sizeof(*dbg_ptrs))
1182 goto invalid_tlv_len;
1183 if (drv->trans->trans_cfg->device_family <
1184 IWL_DEVICE_FAMILY_22000)
1185 break;
1186 drv->trans->dbg.lmac_error_event_table[0] =
1187 le32_to_cpu(dbg_ptrs->error_event_table_ptr) &
1188 ~FW_ADDR_CACHE_CONTROL;
1189 drv->trans->dbg.error_event_table_tlv_status |=
1190 IWL_ERROR_EVENT_TABLE_LMAC1;
1191 break;
1192 }
1193 case IWL_UCODE_TLV_TYPE_REGIONS:
1194 iwl_parse_dbg_tlv_assert_tables(drv, tlv);
1195 fallthrough;
1196 case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
1197 case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1198 case IWL_UCODE_TLV_TYPE_HCMD:
1199 case IWL_UCODE_TLV_TYPE_TRIGGERS:
1200 case IWL_UCODE_TLV_TYPE_CONF_SET:
1201 if (iwlwifi_mod_params.enable_ini)
1202 iwl_dbg_tlv_alloc(drv->trans, tlv, false);
1203 break;
1204 case IWL_UCODE_TLV_CMD_VERSIONS:
1205 if (tlv_len % sizeof(struct iwl_fw_cmd_version)) {
1206 IWL_ERR(drv,
1207 "Invalid length for command versions: %u\n",
1208 tlv_len);
1209 tlv_len /= sizeof(struct iwl_fw_cmd_version);
1210 tlv_len *= sizeof(struct iwl_fw_cmd_version);
1211 }
1212 if (WARN_ON(capa->cmd_versions))
1213 return -EINVAL;
1214 capa->cmd_versions = kmemdup(tlv_data, tlv_len,
1215 GFP_KERNEL);
1216 if (!capa->cmd_versions)
1217 return -ENOMEM;
1218 capa->n_cmd_versions =
1219 tlv_len / sizeof(struct iwl_fw_cmd_version);
1220 break;
1221 case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION:
1222 if (drv->fw.phy_integration_ver) {
1223 IWL_ERR(drv,
1224 "phy integration str ignored, already exists\n");
1225 break;
1226 }
1227
1228 drv->fw.phy_integration_ver =
1229 kmemdup(tlv_data, tlv_len, GFP_KERNEL);
1230 if (!drv->fw.phy_integration_ver)
1231 return -ENOMEM;
1232 drv->fw.phy_integration_ver_len = tlv_len;
1233 break;
1234 case IWL_UCODE_TLV_SEC_TABLE_ADDR:
1235 case IWL_UCODE_TLV_D3_KEK_KCK_ADDR:
1236 iwl_drv_set_dump_exclude(drv, tlv_type,
1237 tlv_data, tlv_len);
1238 break;
1239 default:
1240 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
1241 break;
1242 }
1243 }
1244
1245 if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
1246 usniffer_req && !*usniffer_images) {
1247 IWL_ERR(drv,
1248 "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
1249 return -EINVAL;
1250 }
1251
1252 if (len) {
1253 IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
1254 iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
1255 return -EINVAL;
1256 }
1257
1258 return 0;
1259
1260 invalid_tlv_len:
1261 IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
1262 tlv_error:
1263 iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
1264
1265 return -EINVAL;
1266 }
1267
iwl_alloc_ucode(struct iwl_drv * drv,struct iwl_firmware_pieces * pieces,enum iwl_ucode_type type)1268 static int iwl_alloc_ucode(struct iwl_drv *drv,
1269 struct iwl_firmware_pieces *pieces,
1270 enum iwl_ucode_type type)
1271 {
1272 int i;
1273 struct fw_desc *sec;
1274
1275 sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
1276 if (!sec)
1277 return -ENOMEM;
1278 drv->fw.img[type].sec = sec;
1279 drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
1280
1281 for (i = 0; i < pieces->img[type].sec_counter; i++)
1282 if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
1283 return -ENOMEM;
1284
1285 return 0;
1286 }
1287
validate_sec_sizes(struct iwl_drv * drv,struct iwl_firmware_pieces * pieces,const struct iwl_cfg * cfg)1288 static int validate_sec_sizes(struct iwl_drv *drv,
1289 struct iwl_firmware_pieces *pieces,
1290 const struct iwl_cfg *cfg)
1291 {
1292 IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n",
1293 get_sec_size(pieces, IWL_UCODE_REGULAR,
1294 IWL_UCODE_SECTION_INST));
1295 IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n",
1296 get_sec_size(pieces, IWL_UCODE_REGULAR,
1297 IWL_UCODE_SECTION_DATA));
1298 IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n",
1299 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
1300 IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n",
1301 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
1302
1303 /* Verify that uCode images will fit in card's SRAM. */
1304 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
1305 cfg->max_inst_size) {
1306 IWL_ERR(drv, "uCode instr len %zd too large to fit in\n",
1307 get_sec_size(pieces, IWL_UCODE_REGULAR,
1308 IWL_UCODE_SECTION_INST));
1309 return -1;
1310 }
1311
1312 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
1313 cfg->max_data_size) {
1314 IWL_ERR(drv, "uCode data len %zd too large to fit in\n",
1315 get_sec_size(pieces, IWL_UCODE_REGULAR,
1316 IWL_UCODE_SECTION_DATA));
1317 return -1;
1318 }
1319
1320 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
1321 cfg->max_inst_size) {
1322 IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n",
1323 get_sec_size(pieces, IWL_UCODE_INIT,
1324 IWL_UCODE_SECTION_INST));
1325 return -1;
1326 }
1327
1328 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
1329 cfg->max_data_size) {
1330 IWL_ERR(drv, "uCode init data len %zd too large to fit in\n",
1331 get_sec_size(pieces, IWL_UCODE_REGULAR,
1332 IWL_UCODE_SECTION_DATA));
1333 return -1;
1334 }
1335 return 0;
1336 }
1337
1338 static struct iwl_op_mode *
_iwl_op_mode_start(struct iwl_drv * drv,struct iwlwifi_opmode_table * op)1339 _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
1340 {
1341 const struct iwl_op_mode_ops *ops = op->ops;
1342 struct dentry *dbgfs_dir = NULL;
1343 struct iwl_op_mode *op_mode = NULL;
1344 int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
1345
1346 for (retry = 0; retry <= max_retry; retry++) {
1347
1348 #ifdef CONFIG_IWLWIFI_DEBUGFS
1349 drv->dbgfs_op_mode = debugfs_create_dir(op->name,
1350 drv->dbgfs_drv);
1351 dbgfs_dir = drv->dbgfs_op_mode;
1352 #endif
1353
1354 op_mode = ops->start(drv->trans, drv->trans->cfg,
1355 &drv->fw, dbgfs_dir);
1356
1357 if (op_mode)
1358 return op_mode;
1359
1360 IWL_ERR(drv, "retry init count %d\n", retry);
1361
1362 #ifdef CONFIG_IWLWIFI_DEBUGFS
1363 debugfs_remove_recursive(drv->dbgfs_op_mode);
1364 drv->dbgfs_op_mode = NULL;
1365 #endif
1366 }
1367
1368 return NULL;
1369 }
1370
_iwl_op_mode_stop(struct iwl_drv * drv)1371 static void _iwl_op_mode_stop(struct iwl_drv *drv)
1372 {
1373 /* op_mode can be NULL if its start failed */
1374 if (drv->op_mode) {
1375 iwl_op_mode_stop(drv->op_mode);
1376 drv->op_mode = NULL;
1377
1378 #ifdef CONFIG_IWLWIFI_DEBUGFS
1379 debugfs_remove_recursive(drv->dbgfs_op_mode);
1380 drv->dbgfs_op_mode = NULL;
1381 #endif
1382 }
1383 }
1384
1385 /*
1386 * iwl_req_fw_callback - callback when firmware was loaded
1387 *
1388 * If loaded successfully, copies the firmware into buffers
1389 * for the card to fetch (via DMA).
1390 */
iwl_req_fw_callback(const struct firmware * ucode_raw,void * context)1391 static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1392 {
1393 struct iwl_drv *drv = context;
1394 struct iwl_fw *fw = &drv->fw;
1395 const struct iwl_ucode_header *ucode;
1396 struct iwlwifi_opmode_table *op;
1397 int err;
1398 struct iwl_firmware_pieces *pieces;
1399 const unsigned int api_max = drv->trans->cfg->ucode_api_max;
1400 const unsigned int api_min = drv->trans->cfg->ucode_api_min;
1401 size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
1402 u32 api_ver;
1403 int i;
1404 bool load_module = false;
1405 bool usniffer_images = false;
1406 bool failure = true;
1407
1408 fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
1409 fw->ucode_capa.standard_phy_calibration_size =
1410 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1411 fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
1412 fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX;
1413 /* dump all fw memory areas by default */
1414 fw->dbg.dump_mask = 0xffffffff;
1415
1416 pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
1417 if (!pieces)
1418 goto out_free_fw;
1419
1420 if (!ucode_raw)
1421 goto try_again;
1422
1423 IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
1424 drv->firmware_name, ucode_raw->size);
1425
1426 /* Make sure that we got at least the API version number */
1427 if (ucode_raw->size < 4) {
1428 IWL_ERR(drv, "File size way too small!\n");
1429 goto try_again;
1430 }
1431
1432 /* Data from ucode file: header followed by uCode images */
1433 ucode = (const struct iwl_ucode_header *)ucode_raw->data;
1434
1435 if (ucode->ver)
1436 err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
1437 else
1438 err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
1439 &fw->ucode_capa, &usniffer_images);
1440
1441 if (err)
1442 goto try_again;
1443
1444 if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
1445 api_ver = drv->fw.ucode_ver;
1446 else
1447 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
1448
1449 /*
1450 * api_ver should match the api version forming part of the
1451 * firmware filename ... but we don't check for that and only rely
1452 * on the API version read from firmware header from here on forward
1453 */
1454 if (api_ver < api_min || api_ver > api_max) {
1455 IWL_ERR(drv,
1456 "Driver unable to support your firmware API. "
1457 "Driver supports v%u, firmware is v%u.\n",
1458 api_max, api_ver);
1459 goto try_again;
1460 }
1461
1462 /*
1463 * In mvm uCode there is no difference between data and instructions
1464 * sections.
1465 */
1466 if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
1467 drv->trans->cfg))
1468 goto try_again;
1469
1470 /* Allocate ucode buffers for card's bus-master loading ... */
1471
1472 /* Runtime instructions and 2 copies of data:
1473 * 1) unmodified from disk
1474 * 2) backup cache for save/restore during power-downs
1475 */
1476 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
1477 if (iwl_alloc_ucode(drv, pieces, i))
1478 goto out_free_fw;
1479
1480 if (pieces->dbg_dest_tlv_init) {
1481 size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
1482 sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
1483 drv->fw.dbg.n_dest_reg;
1484
1485 drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
1486
1487 if (!drv->fw.dbg.dest_tlv)
1488 goto out_free_fw;
1489
1490 if (*pieces->dbg_dest_ver == 0) {
1491 memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
1492 dbg_dest_size);
1493 } else {
1494 struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
1495 drv->fw.dbg.dest_tlv;
1496
1497 dest_tlv->version = pieces->dbg_dest_tlv->version;
1498 dest_tlv->monitor_mode =
1499 pieces->dbg_dest_tlv->monitor_mode;
1500 dest_tlv->size_power =
1501 pieces->dbg_dest_tlv->size_power;
1502 dest_tlv->wrap_count =
1503 pieces->dbg_dest_tlv->wrap_count;
1504 dest_tlv->write_ptr_reg =
1505 pieces->dbg_dest_tlv->write_ptr_reg;
1506 dest_tlv->base_shift =
1507 pieces->dbg_dest_tlv->base_shift;
1508 memcpy(dest_tlv->reg_ops,
1509 pieces->dbg_dest_tlv->reg_ops,
1510 sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
1511 drv->fw.dbg.n_dest_reg);
1512
1513 /* In version 1 of the destination tlv, which is
1514 * relevant for internal buffer exclusively,
1515 * the base address is part of given with the length
1516 * of the buffer, and the size shift is give instead of
1517 * end shift. We now store these values in base_reg,
1518 * and end shift, and when dumping the data we'll
1519 * manipulate it for extracting both the length and
1520 * base address */
1521 dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
1522 dest_tlv->end_shift =
1523 pieces->dbg_dest_tlv->size_shift;
1524 }
1525 }
1526
1527 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
1528 if (pieces->dbg_conf_tlv[i]) {
1529 drv->fw.dbg.conf_tlv[i] =
1530 kmemdup(pieces->dbg_conf_tlv[i],
1531 pieces->dbg_conf_tlv_len[i],
1532 GFP_KERNEL);
1533 if (!drv->fw.dbg.conf_tlv[i])
1534 goto out_free_fw;
1535 }
1536 }
1537
1538 memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
1539
1540 trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
1541 sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
1542 trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
1543 trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
1544 sizeof(struct iwl_fw_dbg_trigger_cmd);
1545 trigger_tlv_sz[FW_DBG_TRIGGER_MLME] =
1546 sizeof(struct iwl_fw_dbg_trigger_mlme);
1547 trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
1548 sizeof(struct iwl_fw_dbg_trigger_stats);
1549 trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
1550 sizeof(struct iwl_fw_dbg_trigger_low_rssi);
1551 trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
1552 sizeof(struct iwl_fw_dbg_trigger_txq_timer);
1553 trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
1554 sizeof(struct iwl_fw_dbg_trigger_time_event);
1555 trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
1556 sizeof(struct iwl_fw_dbg_trigger_ba);
1557 trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
1558 sizeof(struct iwl_fw_dbg_trigger_tdls);
1559
1560 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
1561 if (pieces->dbg_trigger_tlv[i]) {
1562 /*
1563 * If the trigger isn't long enough, WARN and exit.
1564 * Someone is trying to debug something and he won't
1565 * be able to catch the bug he is trying to chase.
1566 * We'd better be noisy to be sure he knows what's
1567 * going on.
1568 */
1569 if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
1570 (trigger_tlv_sz[i] +
1571 sizeof(struct iwl_fw_dbg_trigger_tlv))))
1572 goto out_free_fw;
1573 drv->fw.dbg.trigger_tlv_len[i] =
1574 pieces->dbg_trigger_tlv_len[i];
1575 drv->fw.dbg.trigger_tlv[i] =
1576 kmemdup(pieces->dbg_trigger_tlv[i],
1577 drv->fw.dbg.trigger_tlv_len[i],
1578 GFP_KERNEL);
1579 if (!drv->fw.dbg.trigger_tlv[i])
1580 goto out_free_fw;
1581 }
1582 }
1583
1584 /* Now that we can no longer fail, copy information */
1585
1586 drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
1587 pieces->dbg_mem_tlv = NULL;
1588 drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
1589
1590 /*
1591 * The (size - 16) / 12 formula is based on the information recorded
1592 * for each event, which is of mode 1 (including timestamp) for all
1593 * new microcodes that include this information.
1594 */
1595 fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
1596 if (pieces->init_evtlog_size)
1597 fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
1598 else
1599 fw->init_evtlog_size =
1600 drv->trans->trans_cfg->base_params->max_event_log_size;
1601 fw->init_errlog_ptr = pieces->init_errlog_ptr;
1602 fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
1603 if (pieces->inst_evtlog_size)
1604 fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
1605 else
1606 fw->inst_evtlog_size =
1607 drv->trans->trans_cfg->base_params->max_event_log_size;
1608 fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
1609
1610 /*
1611 * figure out the offset of chain noise reset and gain commands
1612 * base on the size of standard phy calibration commands table size
1613 */
1614 if (fw->ucode_capa.standard_phy_calibration_size >
1615 IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
1616 fw->ucode_capa.standard_phy_calibration_size =
1617 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1618
1619 /* We have our copies now, allow OS release its copies */
1620 release_firmware(ucode_raw);
1621
1622 iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
1623
1624 mutex_lock(&iwlwifi_opmode_table_mtx);
1625 switch (fw->type) {
1626 case IWL_FW_DVM:
1627 op = &iwlwifi_opmode_table[DVM_OP_MODE];
1628 break;
1629 default:
1630 WARN(1, "Invalid fw type %d\n", fw->type);
1631 fallthrough;
1632 case IWL_FW_MVM:
1633 op = &iwlwifi_opmode_table[MVM_OP_MODE];
1634 break;
1635 }
1636
1637 IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
1638 drv->fw.fw_version, op->name);
1639
1640 /* add this device to the list of devices using this op_mode */
1641 list_add_tail(&drv->list, &op->drv);
1642
1643 if (op->ops) {
1644 drv->op_mode = _iwl_op_mode_start(drv, op);
1645
1646 if (!drv->op_mode) {
1647 mutex_unlock(&iwlwifi_opmode_table_mtx);
1648 goto out_unbind;
1649 }
1650 } else {
1651 load_module = true;
1652 }
1653 mutex_unlock(&iwlwifi_opmode_table_mtx);
1654
1655 /*
1656 * Complete the firmware request last so that
1657 * a driver unbind (stop) doesn't run while we
1658 * are doing the start() above.
1659 */
1660 complete(&drv->request_firmware_complete);
1661
1662 /*
1663 * Load the module last so we don't block anything
1664 * else from proceeding if the module fails to load
1665 * or hangs loading.
1666 */
1667 if (load_module)
1668 request_module("%s", op->name);
1669 failure = false;
1670 goto free;
1671
1672 try_again:
1673 /* try next, if any */
1674 release_firmware(ucode_raw);
1675 if (iwl_request_firmware(drv, false))
1676 goto out_unbind;
1677 goto free;
1678
1679 out_free_fw:
1680 release_firmware(ucode_raw);
1681 out_unbind:
1682 complete(&drv->request_firmware_complete);
1683 device_release_driver(drv->trans->dev);
1684 /* drv has just been freed by the release */
1685 failure = false;
1686 free:
1687 if (failure)
1688 iwl_dealloc_ucode(drv);
1689
1690 if (pieces) {
1691 for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
1692 kfree(pieces->img[i].sec);
1693 kfree(pieces->dbg_mem_tlv);
1694 kfree(pieces);
1695 }
1696 }
1697
iwl_drv_start(struct iwl_trans * trans)1698 struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
1699 {
1700 struct iwl_drv *drv;
1701 int ret;
1702
1703 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
1704 if (!drv) {
1705 ret = -ENOMEM;
1706 goto err;
1707 }
1708
1709 drv->trans = trans;
1710 drv->dev = trans->dev;
1711
1712 init_completion(&drv->request_firmware_complete);
1713 INIT_LIST_HEAD(&drv->list);
1714
1715 #ifdef CONFIG_IWLWIFI_DEBUGFS
1716 /* Create the device debugfs entries. */
1717 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
1718 iwl_dbgfs_root);
1719
1720 /* Create transport layer debugfs dir */
1721 drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
1722 #endif
1723
1724 drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
1725
1726 ret = iwl_request_firmware(drv, true);
1727 if (ret) {
1728 IWL_ERR(trans, "Couldn't request the fw\n");
1729 goto err_fw;
1730 }
1731
1732 return drv;
1733
1734 err_fw:
1735 #ifdef CONFIG_IWLWIFI_DEBUGFS
1736 debugfs_remove_recursive(drv->dbgfs_drv);
1737 iwl_dbg_tlv_free(drv->trans);
1738 #endif
1739 kfree(drv);
1740 err:
1741 return ERR_PTR(ret);
1742 }
1743
iwl_drv_stop(struct iwl_drv * drv)1744 void iwl_drv_stop(struct iwl_drv *drv)
1745 {
1746 wait_for_completion(&drv->request_firmware_complete);
1747
1748 _iwl_op_mode_stop(drv);
1749
1750 iwl_dealloc_ucode(drv);
1751
1752 mutex_lock(&iwlwifi_opmode_table_mtx);
1753 /*
1754 * List is empty (this item wasn't added)
1755 * when firmware loading failed -- in that
1756 * case we can't remove it from any list.
1757 */
1758 if (!list_empty(&drv->list))
1759 list_del(&drv->list);
1760 mutex_unlock(&iwlwifi_opmode_table_mtx);
1761
1762 #ifdef CONFIG_IWLWIFI_DEBUGFS
1763 drv->trans->ops->debugfs_cleanup(drv->trans);
1764
1765 debugfs_remove_recursive(drv->dbgfs_drv);
1766 #endif
1767
1768 iwl_dbg_tlv_free(drv->trans);
1769
1770 kfree(drv);
1771 }
1772
1773 #define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
1774
1775 /* shared module parameters */
1776 struct iwl_mod_params iwlwifi_mod_params = {
1777 .fw_restart = true,
1778 .bt_coex_active = true,
1779 .power_level = IWL_POWER_INDEX_1,
1780 .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
1781 .enable_ini = ENABLE_INI,
1782 /* the rest are 0 by default */
1783 };
1784 IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
1785
iwl_opmode_register(const char * name,const struct iwl_op_mode_ops * ops)1786 int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1787 {
1788 int i;
1789 struct iwl_drv *drv;
1790 struct iwlwifi_opmode_table *op;
1791
1792 mutex_lock(&iwlwifi_opmode_table_mtx);
1793 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1794 op = &iwlwifi_opmode_table[i];
1795 if (strcmp(op->name, name))
1796 continue;
1797 op->ops = ops;
1798 /* TODO: need to handle exceptional case */
1799 list_for_each_entry(drv, &op->drv, list)
1800 drv->op_mode = _iwl_op_mode_start(drv, op);
1801
1802 mutex_unlock(&iwlwifi_opmode_table_mtx);
1803 return 0;
1804 }
1805 mutex_unlock(&iwlwifi_opmode_table_mtx);
1806 return -EIO;
1807 }
1808 IWL_EXPORT_SYMBOL(iwl_opmode_register);
1809
iwl_opmode_deregister(const char * name)1810 void iwl_opmode_deregister(const char *name)
1811 {
1812 int i;
1813 struct iwl_drv *drv;
1814
1815 mutex_lock(&iwlwifi_opmode_table_mtx);
1816 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1817 if (strcmp(iwlwifi_opmode_table[i].name, name))
1818 continue;
1819 iwlwifi_opmode_table[i].ops = NULL;
1820
1821 /* call the stop routine for all devices */
1822 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1823 _iwl_op_mode_stop(drv);
1824
1825 mutex_unlock(&iwlwifi_opmode_table_mtx);
1826 return;
1827 }
1828 mutex_unlock(&iwlwifi_opmode_table_mtx);
1829 }
1830 IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
1831
iwl_drv_init(void)1832 static int __init iwl_drv_init(void)
1833 {
1834 int i, err;
1835
1836 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1837 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1838
1839 pr_info(DRV_DESCRIPTION "\n");
1840
1841 #ifdef CONFIG_IWLWIFI_DEBUGFS
1842 /* Create the root of iwlwifi debugfs subsystem. */
1843 iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
1844 #endif
1845
1846 err = iwl_pci_register_driver();
1847 if (err)
1848 goto cleanup_debugfs;
1849
1850 return 0;
1851
1852 cleanup_debugfs:
1853 #ifdef CONFIG_IWLWIFI_DEBUGFS
1854 debugfs_remove_recursive(iwl_dbgfs_root);
1855 #endif
1856 return err;
1857 }
1858 module_init(iwl_drv_init);
1859
iwl_drv_exit(void)1860 static void __exit iwl_drv_exit(void)
1861 {
1862 iwl_pci_unregister_driver();
1863
1864 #ifdef CONFIG_IWLWIFI_DEBUGFS
1865 debugfs_remove_recursive(iwl_dbgfs_root);
1866 #endif
1867 }
1868 module_exit(iwl_drv_exit);
1869
1870 #ifdef CONFIG_IWLWIFI_DEBUG
1871 module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644);
1872 MODULE_PARM_DESC(debug, "debug output mask");
1873 #endif
1874
1875 module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444);
1876 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1877 module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444);
1878 MODULE_PARM_DESC(11n_disable,
1879 "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
1880 module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
1881 MODULE_PARM_DESC(amsdu_size,
1882 "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, "
1883 "4K for other devices 1:4K 2:8K 3:12K (16K buffers) 4: 2K (default 0)");
1884 module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
1885 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
1886
1887 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
1888 MODULE_PARM_DESC(nvm_file, "NVM file name");
1889
1890 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
1891 MODULE_PARM_DESC(uapsd_disable,
1892 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
1893
enable_ini_set(const char * arg,const struct kernel_param * kp)1894 static int enable_ini_set(const char *arg, const struct kernel_param *kp)
1895 {
1896 int ret = 0;
1897 bool res;
1898 __u32 new_enable_ini;
1899
1900 /* in case the argument type is a number */
1901 ret = kstrtou32(arg, 0, &new_enable_ini);
1902 if (!ret) {
1903 if (new_enable_ini > ENABLE_INI) {
1904 pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
1905 return -EINVAL;
1906 }
1907 goto out;
1908 }
1909
1910 /* in case the argument type is boolean */
1911 ret = kstrtobool(arg, &res);
1912 if (ret)
1913 return ret;
1914 new_enable_ini = (res ? ENABLE_INI : 0);
1915
1916 out:
1917 iwlwifi_mod_params.enable_ini = new_enable_ini;
1918 return 0;
1919 }
1920
1921 static const struct kernel_param_ops enable_ini_ops = {
1922 .set = enable_ini_set
1923 };
1924
1925 module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
1926 MODULE_PARM_DESC(enable_ini,
1927 "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
1928 "Debug INI TLV FW debug infrastructure (default: 16)");
1929
1930 /*
1931 * set bt_coex_active to true, uCode will do kill/defer
1932 * every time the priority line is asserted (BT is sending signals on the
1933 * priority line in the PCIx).
1934 * set bt_coex_active to false, uCode will ignore the BT activity and
1935 * perform the normal operation
1936 *
1937 * User might experience transmit issue on some platform due to WiFi/BT
1938 * co-exist problem. The possible behaviors are:
1939 * Able to scan and finding all the available AP
1940 * Not able to associate with any AP
1941 * On those platforms, WiFi communication can be restored by set
1942 * "bt_coex_active" module parameter to "false"
1943 *
1944 * default: bt_coex_active = true (BT_COEX_ENABLE)
1945 */
1946 module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
1947 bool, 0444);
1948 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
1949
1950 module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444);
1951 MODULE_PARM_DESC(led_mode, "0=system default, "
1952 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1953
1954 module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444);
1955 MODULE_PARM_DESC(power_save,
1956 "enable WiFi power management (default: disable)");
1957
1958 module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444);
1959 MODULE_PARM_DESC(power_level,
1960 "default power save level (range from 1 - 5, default: 1)");
1961
1962 module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
1963 MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
1964
1965 module_param_named(remove_when_gone,
1966 iwlwifi_mod_params.remove_when_gone, bool,
1967 0444);
1968 MODULE_PARM_DESC(remove_when_gone,
1969 "Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
1970
1971 module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
1972 S_IRUGO);
1973 MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
1974