1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2007-2015, 2018-2022 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/debugfs.h>
10 #include <linux/sched.h>
11 #include <linux/bitops.h>
12 #include <linux/gfp.h>
13 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/wait.h>
16 #include <linux/seq_file.h>
17 
18 #include "iwl-drv.h"
19 #include "iwl-trans.h"
20 #include "iwl-csr.h"
21 #include "iwl-prph.h"
22 #include "iwl-scd.h"
23 #include "iwl-agn-hw.h"
24 #include "fw/error-dump.h"
25 #include "fw/dbg.h"
26 #include "fw/api/tx.h"
27 #include "mei/iwl-mei.h"
28 #include "internal.h"
29 #include "iwl-fh.h"
30 #include "iwl-context-info-gen3.h"
31 
32 /* extended range in FW SRAM */
33 #define IWL_FW_MEM_EXTENDED_START	0x40000
34 #define IWL_FW_MEM_EXTENDED_END		0x57FFF
35 
iwl_trans_pcie_dump_regs(struct iwl_trans * trans)36 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
37 {
38 #define PCI_DUMP_SIZE		352
39 #define PCI_MEM_DUMP_SIZE	64
40 #define PCI_PARENT_DUMP_SIZE	524
41 #define PREFIX_LEN		32
42 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
43 	struct pci_dev *pdev = trans_pcie->pci_dev;
44 	u32 i, pos, alloc_size, *ptr, *buf;
45 	char *prefix;
46 
47 	if (trans_pcie->pcie_dbg_dumped_once)
48 		return;
49 
50 	/* Should be a multiple of 4 */
51 	BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
52 	BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
53 	BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
54 
55 	/* Alloc a max size buffer */
56 	alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
57 	alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
58 	alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
59 	alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
60 
61 	buf = kmalloc(alloc_size, GFP_ATOMIC);
62 	if (!buf)
63 		return;
64 	prefix = (char *)buf + alloc_size - PREFIX_LEN;
65 
66 	IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
67 
68 	/* Print wifi device registers */
69 	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
70 	IWL_ERR(trans, "iwlwifi device config registers:\n");
71 	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
72 		if (pci_read_config_dword(pdev, i, ptr))
73 			goto err_read;
74 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
75 
76 	IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
77 	for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
78 		*ptr = iwl_read32(trans, i);
79 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
80 
81 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
82 	if (pos) {
83 		IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
84 		for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
85 			if (pci_read_config_dword(pdev, pos + i, ptr))
86 				goto err_read;
87 		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
88 			       32, 4, buf, i, 0);
89 	}
90 
91 	/* Print parent device registers next */
92 	if (!pdev->bus->self)
93 		goto out;
94 
95 	pdev = pdev->bus->self;
96 	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
97 
98 	IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
99 		pci_name(pdev));
100 	for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
101 		if (pci_read_config_dword(pdev, i, ptr))
102 			goto err_read;
103 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
104 
105 	/* Print root port AER registers */
106 	pos = 0;
107 	pdev = pcie_find_root_port(pdev);
108 	if (pdev)
109 		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
110 	if (pos) {
111 		IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
112 			pci_name(pdev));
113 		sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
114 		for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
115 			if (pci_read_config_dword(pdev, pos + i, ptr))
116 				goto err_read;
117 		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
118 			       4, buf, i, 0);
119 	}
120 	goto out;
121 
122 err_read:
123 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
124 	IWL_ERR(trans, "Read failed at 0x%X\n", i);
125 out:
126 	trans_pcie->pcie_dbg_dumped_once = 1;
127 	kfree(buf);
128 }
129 
iwl_trans_pcie_sw_reset(struct iwl_trans * trans,bool retake_ownership)130 static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
131 				   bool retake_ownership)
132 {
133 	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
134 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
135 		iwl_set_bit(trans, CSR_GP_CNTRL,
136 			    CSR_GP_CNTRL_REG_FLAG_SW_RESET);
137 	else
138 		iwl_set_bit(trans, CSR_RESET,
139 			    CSR_RESET_REG_FLAG_SW_RESET);
140 	usleep_range(5000, 6000);
141 
142 	if (retake_ownership)
143 		return iwl_pcie_prepare_card_hw(trans);
144 
145 	return 0;
146 }
147 
iwl_pcie_free_fw_monitor(struct iwl_trans * trans)148 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
149 {
150 	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
151 
152 	if (!fw_mon->size)
153 		return;
154 
155 	dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
156 			  fw_mon->physical);
157 
158 	fw_mon->block = NULL;
159 	fw_mon->physical = 0;
160 	fw_mon->size = 0;
161 }
162 
iwl_pcie_alloc_fw_monitor_block(struct iwl_trans * trans,u8 max_power,u8 min_power)163 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
164 					    u8 max_power, u8 min_power)
165 {
166 	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
167 	void *block = NULL;
168 	dma_addr_t physical = 0;
169 	u32 size = 0;
170 	u8 power;
171 
172 	if (fw_mon->size)
173 		return;
174 
175 	for (power = max_power; power >= min_power; power--) {
176 		size = BIT(power);
177 		block = dma_alloc_coherent(trans->dev, size, &physical,
178 					   GFP_KERNEL | __GFP_NOWARN);
179 		if (!block)
180 			continue;
181 
182 		IWL_INFO(trans,
183 			 "Allocated 0x%08x bytes for firmware monitor.\n",
184 			 size);
185 		break;
186 	}
187 
188 	if (WARN_ON_ONCE(!block))
189 		return;
190 
191 	if (power != max_power)
192 		IWL_ERR(trans,
193 			"Sorry - debug buffer is only %luK while you requested %luK\n",
194 			(unsigned long)BIT(power - 10),
195 			(unsigned long)BIT(max_power - 10));
196 
197 	fw_mon->block = block;
198 	fw_mon->physical = physical;
199 	fw_mon->size = size;
200 }
201 
iwl_pcie_alloc_fw_monitor(struct iwl_trans * trans,u8 max_power)202 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
203 {
204 	if (!max_power) {
205 		/* default max_power is maximum */
206 		max_power = 26;
207 	} else {
208 		max_power += 11;
209 	}
210 
211 	if (WARN(max_power > 26,
212 		 "External buffer size for monitor is too big %d, check the FW TLV\n",
213 		 max_power))
214 		return;
215 
216 	if (trans->dbg.fw_mon.size)
217 		return;
218 
219 	iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
220 }
221 
iwl_trans_pcie_read_shr(struct iwl_trans * trans,u32 reg)222 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
223 {
224 	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
225 		    ((reg & 0x0000ffff) | (2 << 28)));
226 	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
227 }
228 
iwl_trans_pcie_write_shr(struct iwl_trans * trans,u32 reg,u32 val)229 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
230 {
231 	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
232 	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
233 		    ((reg & 0x0000ffff) | (3 << 28)));
234 }
235 
iwl_pcie_set_pwr(struct iwl_trans * trans,bool vaux)236 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
237 {
238 	if (trans->cfg->apmg_not_supported)
239 		return;
240 
241 	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
242 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
243 				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
244 				       ~APMG_PS_CTRL_MSK_PWR_SRC);
245 	else
246 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
247 				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
248 				       ~APMG_PS_CTRL_MSK_PWR_SRC);
249 }
250 
251 /* PCI registers */
252 #define PCI_CFG_RETRY_TIMEOUT	0x041
253 
iwl_pcie_apm_config(struct iwl_trans * trans)254 void iwl_pcie_apm_config(struct iwl_trans *trans)
255 {
256 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
257 	u16 lctl;
258 	u16 cap;
259 
260 	/*
261 	 * L0S states have been found to be unstable with our devices
262 	 * and in newer hardware they are not officially supported at
263 	 * all, so we must always set the L0S_DISABLED bit.
264 	 */
265 	iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
266 
267 	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
268 	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
269 
270 	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
271 	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
272 	IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
273 			(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
274 			trans->ltr_enabled ? "En" : "Dis");
275 }
276 
277 /*
278  * Start up NIC's basic functionality after it has been reset
279  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
280  * NOTE:  This does not load uCode nor start the embedded processor
281  */
iwl_pcie_apm_init(struct iwl_trans * trans)282 static int iwl_pcie_apm_init(struct iwl_trans *trans)
283 {
284 	int ret;
285 
286 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
287 
288 	/*
289 	 * Use "set_bit" below rather than "write", to preserve any hardware
290 	 * bits already set by default after reset.
291 	 */
292 
293 	/* Disable L0S exit timer (platform NMI Work/Around) */
294 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
295 		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
296 			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
297 
298 	/*
299 	 * Disable L0s without affecting L1;
300 	 *  don't wait for ICH L0s (ICH bug W/A)
301 	 */
302 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
303 		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
304 
305 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
306 	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
307 
308 	/*
309 	 * Enable HAP INTA (interrupt from management bus) to
310 	 * wake device's PCI Express link L1a -> L0s
311 	 */
312 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
313 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
314 
315 	iwl_pcie_apm_config(trans);
316 
317 	/* Configure analog phase-lock-loop before activating to D0A */
318 	if (trans->trans_cfg->base_params->pll_cfg)
319 		iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
320 
321 	ret = iwl_finish_nic_init(trans);
322 	if (ret)
323 		return ret;
324 
325 	if (trans->cfg->host_interrupt_operation_mode) {
326 		/*
327 		 * This is a bit of an abuse - This is needed for 7260 / 3160
328 		 * only check host_interrupt_operation_mode even if this is
329 		 * not related to host_interrupt_operation_mode.
330 		 *
331 		 * Enable the oscillator to count wake up time for L1 exit. This
332 		 * consumes slightly more power (100uA) - but allows to be sure
333 		 * that we wake up from L1 on time.
334 		 *
335 		 * This looks weird: read twice the same register, discard the
336 		 * value, set a bit, and yet again, read that same register
337 		 * just to discard the value. But that's the way the hardware
338 		 * seems to like it.
339 		 */
340 		iwl_read_prph(trans, OSC_CLK);
341 		iwl_read_prph(trans, OSC_CLK);
342 		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
343 		iwl_read_prph(trans, OSC_CLK);
344 		iwl_read_prph(trans, OSC_CLK);
345 	}
346 
347 	/*
348 	 * Enable DMA clock and wait for it to stabilize.
349 	 *
350 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
351 	 * bits do not disable clocks.  This preserves any hardware
352 	 * bits already set by default in "CLK_CTRL_REG" after reset.
353 	 */
354 	if (!trans->cfg->apmg_not_supported) {
355 		iwl_write_prph(trans, APMG_CLK_EN_REG,
356 			       APMG_CLK_VAL_DMA_CLK_RQT);
357 		udelay(20);
358 
359 		/* Disable L1-Active */
360 		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
361 				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
362 
363 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
364 		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
365 			       APMG_RTC_INT_STT_RFKILL);
366 	}
367 
368 	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
369 
370 	return 0;
371 }
372 
373 /*
374  * Enable LP XTAL to avoid HW bug where device may consume much power if
375  * FW is not loaded after device reset. LP XTAL is disabled by default
376  * after device HW reset. Do it only if XTAL is fed by internal source.
377  * Configure device's "persistence" mode to avoid resetting XTAL again when
378  * SHRD_HW_RST occurs in S3.
379  */
iwl_pcie_apm_lp_xtal_enable(struct iwl_trans * trans)380 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
381 {
382 	int ret;
383 	u32 apmg_gp1_reg;
384 	u32 apmg_xtal_cfg_reg;
385 	u32 dl_cfg_reg;
386 
387 	/* Force XTAL ON */
388 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
389 				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
390 
391 	ret = iwl_trans_pcie_sw_reset(trans, true);
392 
393 	if (!ret)
394 		ret = iwl_finish_nic_init(trans);
395 
396 	if (WARN_ON(ret)) {
397 		/* Release XTAL ON request */
398 		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
399 					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
400 		return;
401 	}
402 
403 	/*
404 	 * Clear "disable persistence" to avoid LP XTAL resetting when
405 	 * SHRD_HW_RST is applied in S3.
406 	 */
407 	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
408 				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
409 
410 	/*
411 	 * Force APMG XTAL to be active to prevent its disabling by HW
412 	 * caused by APMG idle state.
413 	 */
414 	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
415 						    SHR_APMG_XTAL_CFG_REG);
416 	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
417 				 apmg_xtal_cfg_reg |
418 				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
419 
420 	ret = iwl_trans_pcie_sw_reset(trans, true);
421 	if (ret)
422 		IWL_ERR(trans,
423 			"iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
424 
425 	/* Enable LP XTAL by indirect access through CSR */
426 	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
427 	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
428 				 SHR_APMG_GP1_WF_XTAL_LP_EN |
429 				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
430 
431 	/* Clear delay line clock power up */
432 	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
433 	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
434 				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
435 
436 	/*
437 	 * Enable persistence mode to avoid LP XTAL resetting when
438 	 * SHRD_HW_RST is applied in S3.
439 	 */
440 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
441 		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
442 
443 	/*
444 	 * Clear "initialization complete" bit to move adapter from
445 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
446 	 */
447 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
448 
449 	/* Activates XTAL resources monitor */
450 	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
451 				 CSR_MONITOR_XTAL_RESOURCES);
452 
453 	/* Release XTAL ON request */
454 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
455 				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
456 	udelay(10);
457 
458 	/* Release APMG XTAL */
459 	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
460 				 apmg_xtal_cfg_reg &
461 				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
462 }
463 
iwl_pcie_apm_stop_master(struct iwl_trans * trans)464 void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
465 {
466 	int ret;
467 
468 	/* stop device's busmaster DMA activity */
469 
470 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
471 		iwl_set_bit(trans, CSR_GP_CNTRL,
472 			    CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
473 
474 		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
475 				   CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
476 				   CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
477 				   100);
478 		msleep(100);
479 	} else {
480 		iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
481 
482 		ret = iwl_poll_bit(trans, CSR_RESET,
483 				   CSR_RESET_REG_FLAG_MASTER_DISABLED,
484 				   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
485 	}
486 
487 	if (ret < 0)
488 		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
489 
490 	IWL_DEBUG_INFO(trans, "stop master\n");
491 }
492 
iwl_pcie_apm_stop(struct iwl_trans * trans,bool op_mode_leave)493 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
494 {
495 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
496 
497 	if (op_mode_leave) {
498 		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
499 			iwl_pcie_apm_init(trans);
500 
501 		/* inform ME that we are leaving */
502 		if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
503 			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
504 					  APMG_PCIDEV_STT_VAL_WAKE_ME);
505 		else if (trans->trans_cfg->device_family >=
506 			 IWL_DEVICE_FAMILY_8000) {
507 			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
508 				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
509 			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
510 				    CSR_HW_IF_CONFIG_REG_PREPARE |
511 				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
512 			mdelay(1);
513 			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
514 				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
515 		}
516 		mdelay(5);
517 	}
518 
519 	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
520 
521 	/* Stop device's DMA activity */
522 	iwl_pcie_apm_stop_master(trans);
523 
524 	if (trans->cfg->lp_xtal_workaround) {
525 		iwl_pcie_apm_lp_xtal_enable(trans);
526 		return;
527 	}
528 
529 	iwl_trans_pcie_sw_reset(trans, false);
530 
531 	/*
532 	 * Clear "initialization complete" bit to move adapter from
533 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
534 	 */
535 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
536 }
537 
iwl_pcie_nic_init(struct iwl_trans * trans)538 static int iwl_pcie_nic_init(struct iwl_trans *trans)
539 {
540 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
541 	int ret;
542 
543 	/* nic_init */
544 	spin_lock_bh(&trans_pcie->irq_lock);
545 	ret = iwl_pcie_apm_init(trans);
546 	spin_unlock_bh(&trans_pcie->irq_lock);
547 
548 	if (ret)
549 		return ret;
550 
551 	iwl_pcie_set_pwr(trans, false);
552 
553 	iwl_op_mode_nic_config(trans->op_mode);
554 
555 	/* Allocate the RX queue, or reset if it is already allocated */
556 	ret = iwl_pcie_rx_init(trans);
557 	if (ret)
558 		return ret;
559 
560 	/* Allocate or reset and init all Tx and Command queues */
561 	if (iwl_pcie_tx_init(trans)) {
562 		iwl_pcie_rx_free(trans);
563 		return -ENOMEM;
564 	}
565 
566 	if (trans->trans_cfg->base_params->shadow_reg_enable) {
567 		/* enable shadow regs in HW */
568 		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
569 		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
570 	}
571 
572 	return 0;
573 }
574 
575 #define HW_READY_TIMEOUT (50)
576 
577 /* Note: returns poll_bit return value, which is >= 0 if success */
iwl_pcie_set_hw_ready(struct iwl_trans * trans)578 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
579 {
580 	int ret;
581 
582 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
583 		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
584 
585 	/* See if we got it */
586 	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
587 			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
588 			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
589 			   HW_READY_TIMEOUT);
590 
591 	if (ret >= 0)
592 		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
593 
594 	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
595 	return ret;
596 }
597 
598 /* Note: returns standard 0/-ERROR code */
iwl_pcie_prepare_card_hw(struct iwl_trans * trans)599 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
600 {
601 	int ret;
602 	int t = 0;
603 	int iter;
604 
605 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
606 
607 	ret = iwl_pcie_set_hw_ready(trans);
608 	/* If the card is ready, exit 0 */
609 	if (ret >= 0) {
610 		trans->csme_own = false;
611 		return 0;
612 	}
613 
614 	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
615 		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
616 	usleep_range(1000, 2000);
617 
618 	for (iter = 0; iter < 10; iter++) {
619 		/* If HW is not ready, prepare the conditions to check again */
620 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
621 			    CSR_HW_IF_CONFIG_REG_PREPARE);
622 
623 		do {
624 			ret = iwl_pcie_set_hw_ready(trans);
625 			if (ret >= 0) {
626 				trans->csme_own = false;
627 				return 0;
628 			}
629 
630 			if (iwl_mei_is_connected()) {
631 				IWL_DEBUG_INFO(trans,
632 					       "Couldn't prepare the card but SAP is connected\n");
633 				trans->csme_own = true;
634 				if (trans->trans_cfg->device_family !=
635 				    IWL_DEVICE_FAMILY_9000)
636 					IWL_ERR(trans,
637 						"SAP not supported for this NIC family\n");
638 
639 				return -EBUSY;
640 			}
641 
642 			usleep_range(200, 1000);
643 			t += 200;
644 		} while (t < 150000);
645 		msleep(25);
646 	}
647 
648 	IWL_ERR(trans, "Couldn't prepare the card\n");
649 
650 	return ret;
651 }
652 
653 /*
654  * ucode
655  */
iwl_pcie_load_firmware_chunk_fh(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)656 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
657 					    u32 dst_addr, dma_addr_t phy_addr,
658 					    u32 byte_cnt)
659 {
660 	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
661 		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
662 
663 	iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
664 		    dst_addr);
665 
666 	iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
667 		    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
668 
669 	iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
670 		    (iwl_get_dma_hi_addr(phy_addr)
671 			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
672 
673 	iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
674 		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
675 		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
676 		    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
677 
678 	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
679 		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
680 		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
681 		    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
682 }
683 
iwl_pcie_load_firmware_chunk(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)684 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
685 					u32 dst_addr, dma_addr_t phy_addr,
686 					u32 byte_cnt)
687 {
688 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
689 	int ret;
690 
691 	trans_pcie->ucode_write_complete = false;
692 
693 	if (!iwl_trans_grab_nic_access(trans))
694 		return -EIO;
695 
696 	iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
697 					byte_cnt);
698 	iwl_trans_release_nic_access(trans);
699 
700 	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
701 				 trans_pcie->ucode_write_complete, 5 * HZ);
702 	if (!ret) {
703 		IWL_ERR(trans, "Failed to load firmware chunk!\n");
704 		iwl_trans_pcie_dump_regs(trans);
705 		return -ETIMEDOUT;
706 	}
707 
708 	return 0;
709 }
710 
iwl_pcie_load_section(struct iwl_trans * trans,u8 section_num,const struct fw_desc * section)711 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
712 			    const struct fw_desc *section)
713 {
714 	u8 *v_addr;
715 	dma_addr_t p_addr;
716 	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
717 	int ret = 0;
718 
719 	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
720 		     section_num);
721 
722 	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
723 				    GFP_KERNEL | __GFP_NOWARN);
724 	if (!v_addr) {
725 		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
726 		chunk_sz = PAGE_SIZE;
727 		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
728 					    &p_addr, GFP_KERNEL);
729 		if (!v_addr)
730 			return -ENOMEM;
731 	}
732 
733 	for (offset = 0; offset < section->len; offset += chunk_sz) {
734 		u32 copy_size, dst_addr;
735 		bool extended_addr = false;
736 
737 		copy_size = min_t(u32, chunk_sz, section->len - offset);
738 		dst_addr = section->offset + offset;
739 
740 		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
741 		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
742 			extended_addr = true;
743 
744 		if (extended_addr)
745 			iwl_set_bits_prph(trans, LMPM_CHICK,
746 					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
747 
748 		memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
749 		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
750 						   copy_size);
751 
752 		if (extended_addr)
753 			iwl_clear_bits_prph(trans, LMPM_CHICK,
754 					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
755 
756 		if (ret) {
757 			IWL_ERR(trans,
758 				"Could not load the [%d] uCode section\n",
759 				section_num);
760 			break;
761 		}
762 	}
763 
764 	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
765 	return ret;
766 }
767 
iwl_pcie_load_cpu_sections_8000(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)768 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
769 					   const struct fw_img *image,
770 					   int cpu,
771 					   int *first_ucode_section)
772 {
773 	int shift_param;
774 	int i, ret = 0, sec_num = 0x1;
775 	u32 val, last_read_idx = 0;
776 
777 	if (cpu == 1) {
778 		shift_param = 0;
779 		*first_ucode_section = 0;
780 	} else {
781 		shift_param = 16;
782 		(*first_ucode_section)++;
783 	}
784 
785 	for (i = *first_ucode_section; i < image->num_sec; i++) {
786 		last_read_idx = i;
787 
788 		/*
789 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
790 		 * CPU1 to CPU2.
791 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
792 		 * CPU2 non paged to CPU2 paging sec.
793 		 */
794 		if (!image->sec[i].data ||
795 		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
796 		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
797 			IWL_DEBUG_FW(trans,
798 				     "Break since Data not valid or Empty section, sec = %d\n",
799 				     i);
800 			break;
801 		}
802 
803 		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
804 		if (ret)
805 			return ret;
806 
807 		/* Notify ucode of loaded section number and status */
808 		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
809 		val = val | (sec_num << shift_param);
810 		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
811 
812 		sec_num = (sec_num << 1) | 0x1;
813 	}
814 
815 	*first_ucode_section = last_read_idx;
816 
817 	iwl_enable_interrupts(trans);
818 
819 	if (trans->trans_cfg->use_tfh) {
820 		if (cpu == 1)
821 			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
822 				       0xFFFF);
823 		else
824 			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
825 				       0xFFFFFFFF);
826 	} else {
827 		if (cpu == 1)
828 			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
829 					   0xFFFF);
830 		else
831 			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
832 					   0xFFFFFFFF);
833 	}
834 
835 	return 0;
836 }
837 
iwl_pcie_load_cpu_sections(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)838 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
839 				      const struct fw_img *image,
840 				      int cpu,
841 				      int *first_ucode_section)
842 {
843 	int i, ret = 0;
844 	u32 last_read_idx = 0;
845 
846 	if (cpu == 1)
847 		*first_ucode_section = 0;
848 	else
849 		(*first_ucode_section)++;
850 
851 	for (i = *first_ucode_section; i < image->num_sec; i++) {
852 		last_read_idx = i;
853 
854 		/*
855 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
856 		 * CPU1 to CPU2.
857 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
858 		 * CPU2 non paged to CPU2 paging sec.
859 		 */
860 		if (!image->sec[i].data ||
861 		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
862 		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
863 			IWL_DEBUG_FW(trans,
864 				     "Break since Data not valid or Empty section, sec = %d\n",
865 				     i);
866 			break;
867 		}
868 
869 		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
870 		if (ret)
871 			return ret;
872 	}
873 
874 	*first_ucode_section = last_read_idx;
875 
876 	return 0;
877 }
878 
iwl_pcie_apply_destination_ini(struct iwl_trans * trans)879 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
880 {
881 	enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
882 	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
883 		&trans->dbg.fw_mon_cfg[alloc_id];
884 	struct iwl_dram_data *frag;
885 
886 	if (!iwl_trans_dbg_ini_valid(trans))
887 		return;
888 
889 	if (le32_to_cpu(fw_mon_cfg->buf_location) ==
890 	    IWL_FW_INI_LOCATION_SRAM_PATH) {
891 		IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
892 		/* set sram monitor by enabling bit 7 */
893 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
894 			    CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
895 
896 		return;
897 	}
898 
899 	if (le32_to_cpu(fw_mon_cfg->buf_location) !=
900 	    IWL_FW_INI_LOCATION_DRAM_PATH ||
901 	    !trans->dbg.fw_mon_ini[alloc_id].num_frags)
902 		return;
903 
904 	frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
905 
906 	IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
907 		     alloc_id);
908 
909 	iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
910 			    frag->physical >> MON_BUFF_SHIFT_VER2);
911 	iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
912 			    (frag->physical + frag->size - 256) >>
913 			    MON_BUFF_SHIFT_VER2);
914 }
915 
iwl_pcie_apply_destination(struct iwl_trans * trans)916 void iwl_pcie_apply_destination(struct iwl_trans *trans)
917 {
918 	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
919 	const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
920 	int i;
921 
922 	if (iwl_trans_dbg_ini_valid(trans)) {
923 		iwl_pcie_apply_destination_ini(trans);
924 		return;
925 	}
926 
927 	IWL_INFO(trans, "Applying debug destination %s\n",
928 		 get_fw_dbg_mode_string(dest->monitor_mode));
929 
930 	if (dest->monitor_mode == EXTERNAL_MODE)
931 		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
932 	else
933 		IWL_WARN(trans, "PCI should have external buffer debug\n");
934 
935 	for (i = 0; i < trans->dbg.n_dest_reg; i++) {
936 		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
937 		u32 val = le32_to_cpu(dest->reg_ops[i].val);
938 
939 		switch (dest->reg_ops[i].op) {
940 		case CSR_ASSIGN:
941 			iwl_write32(trans, addr, val);
942 			break;
943 		case CSR_SETBIT:
944 			iwl_set_bit(trans, addr, BIT(val));
945 			break;
946 		case CSR_CLEARBIT:
947 			iwl_clear_bit(trans, addr, BIT(val));
948 			break;
949 		case PRPH_ASSIGN:
950 			iwl_write_prph(trans, addr, val);
951 			break;
952 		case PRPH_SETBIT:
953 			iwl_set_bits_prph(trans, addr, BIT(val));
954 			break;
955 		case PRPH_CLEARBIT:
956 			iwl_clear_bits_prph(trans, addr, BIT(val));
957 			break;
958 		case PRPH_BLOCKBIT:
959 			if (iwl_read_prph(trans, addr) & BIT(val)) {
960 				IWL_ERR(trans,
961 					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
962 					val, addr);
963 				goto monitor;
964 			}
965 			break;
966 		default:
967 			IWL_ERR(trans, "FW debug - unknown OP %d\n",
968 				dest->reg_ops[i].op);
969 			break;
970 		}
971 	}
972 
973 monitor:
974 	if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
975 		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
976 			       fw_mon->physical >> dest->base_shift);
977 		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
978 			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
979 				       (fw_mon->physical + fw_mon->size -
980 					256) >> dest->end_shift);
981 		else
982 			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
983 				       (fw_mon->physical + fw_mon->size) >>
984 				       dest->end_shift);
985 	}
986 }
987 
iwl_pcie_load_given_ucode(struct iwl_trans * trans,const struct fw_img * image)988 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
989 				const struct fw_img *image)
990 {
991 	int ret = 0;
992 	int first_ucode_section;
993 
994 	IWL_DEBUG_FW(trans, "working with %s CPU\n",
995 		     image->is_dual_cpus ? "Dual" : "Single");
996 
997 	/* load to FW the binary non secured sections of CPU1 */
998 	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
999 	if (ret)
1000 		return ret;
1001 
1002 	if (image->is_dual_cpus) {
1003 		/* set CPU2 header address */
1004 		iwl_write_prph(trans,
1005 			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
1006 			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
1007 
1008 		/* load to FW the binary sections of CPU2 */
1009 		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
1010 						 &first_ucode_section);
1011 		if (ret)
1012 			return ret;
1013 	}
1014 
1015 	if (iwl_pcie_dbg_on(trans))
1016 		iwl_pcie_apply_destination(trans);
1017 
1018 	iwl_enable_interrupts(trans);
1019 
1020 	/* release CPU reset */
1021 	iwl_write32(trans, CSR_RESET, 0);
1022 
1023 	return 0;
1024 }
1025 
iwl_pcie_load_given_ucode_8000(struct iwl_trans * trans,const struct fw_img * image)1026 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1027 					  const struct fw_img *image)
1028 {
1029 	int ret = 0;
1030 	int first_ucode_section;
1031 
1032 	IWL_DEBUG_FW(trans, "working with %s CPU\n",
1033 		     image->is_dual_cpus ? "Dual" : "Single");
1034 
1035 	if (iwl_pcie_dbg_on(trans))
1036 		iwl_pcie_apply_destination(trans);
1037 
1038 	IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1039 			iwl_read_prph(trans, WFPM_GP2));
1040 
1041 	/*
1042 	 * Set default value. On resume reading the values that were
1043 	 * zeored can provide debug data on the resume flow.
1044 	 * This is for debugging only and has no functional impact.
1045 	 */
1046 	iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1047 
1048 	/* configure the ucode to be ready to get the secured image */
1049 	/* release CPU reset */
1050 	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1051 
1052 	/* load to FW the binary Secured sections of CPU1 */
1053 	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1054 					      &first_ucode_section);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/* load to FW the binary sections of CPU2 */
1059 	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1060 					       &first_ucode_section);
1061 }
1062 
iwl_pcie_check_hw_rf_kill(struct iwl_trans * trans)1063 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1064 {
1065 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1066 	bool hw_rfkill = iwl_is_rfkill_set(trans);
1067 	bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1068 	bool report;
1069 
1070 	if (hw_rfkill) {
1071 		set_bit(STATUS_RFKILL_HW, &trans->status);
1072 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1073 	} else {
1074 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1075 		if (trans_pcie->opmode_down)
1076 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1077 	}
1078 
1079 	report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1080 
1081 	if (prev != report)
1082 		iwl_trans_pcie_rf_kill(trans, report);
1083 
1084 	return hw_rfkill;
1085 }
1086 
1087 struct iwl_causes_list {
1088 	u16 mask_reg;
1089 	u8 bit;
1090 	u8 addr;
1091 };
1092 
1093 #define IWL_CAUSE(reg, mask)						\
1094 	{								\
1095 		.mask_reg = reg,					\
1096 		.bit = ilog2(mask),					\
1097 		.addr = ilog2(mask) +					\
1098 			((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 :	\
1099 			 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 :	\
1100 			 0xffff),	/* causes overflow warning */	\
1101 	}
1102 
1103 static const struct iwl_causes_list causes_list_common[] = {
1104 	IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
1105 	IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
1106 	IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
1107 	IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
1108 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
1109 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
1110 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
1111 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
1112 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
1113 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
1114 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
1115 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
1116 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
1117 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
1118 };
1119 
1120 static const struct iwl_causes_list causes_list_pre_bz[] = {
1121 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
1122 };
1123 
1124 static const struct iwl_causes_list causes_list_bz[] = {
1125 	IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
1126 };
1127 
iwl_pcie_map_list(struct iwl_trans * trans,const struct iwl_causes_list * causes,int arr_size,int val)1128 static void iwl_pcie_map_list(struct iwl_trans *trans,
1129 			      const struct iwl_causes_list *causes,
1130 			      int arr_size, int val)
1131 {
1132 	int i;
1133 
1134 	for (i = 0; i < arr_size; i++) {
1135 		iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1136 		iwl_clear_bit(trans, causes[i].mask_reg,
1137 			      BIT(causes[i].bit));
1138 	}
1139 }
1140 
iwl_pcie_map_non_rx_causes(struct iwl_trans * trans)1141 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1142 {
1143 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1144 	int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1145 	/*
1146 	 * Access all non RX causes and map them to the default irq.
1147 	 * In case we are missing at least one interrupt vector,
1148 	 * the first interrupt vector will serve non-RX and FBQ causes.
1149 	 */
1150 	iwl_pcie_map_list(trans, causes_list_common,
1151 			  ARRAY_SIZE(causes_list_common), val);
1152 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1153 		iwl_pcie_map_list(trans, causes_list_bz,
1154 				  ARRAY_SIZE(causes_list_bz), val);
1155 	else
1156 		iwl_pcie_map_list(trans, causes_list_pre_bz,
1157 				  ARRAY_SIZE(causes_list_pre_bz), val);
1158 }
1159 
iwl_pcie_map_rx_causes(struct iwl_trans * trans)1160 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1161 {
1162 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1163 	u32 offset =
1164 		trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1165 	u32 val, idx;
1166 
1167 	/*
1168 	 * The first RX queue - fallback queue, which is designated for
1169 	 * management frame, command responses etc, is always mapped to the
1170 	 * first interrupt vector. The other RX queues are mapped to
1171 	 * the other (N - 2) interrupt vectors.
1172 	 */
1173 	val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1174 	for (idx = 1; idx < trans->num_rx_queues; idx++) {
1175 		iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1176 			   MSIX_FH_INT_CAUSES_Q(idx - offset));
1177 		val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1178 	}
1179 	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1180 
1181 	val = MSIX_FH_INT_CAUSES_Q(0);
1182 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1183 		val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1184 	iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1185 
1186 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1187 		iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1188 }
1189 
iwl_pcie_conf_msix_hw(struct iwl_trans_pcie * trans_pcie)1190 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1191 {
1192 	struct iwl_trans *trans = trans_pcie->trans;
1193 
1194 	if (!trans_pcie->msix_enabled) {
1195 		if (trans->trans_cfg->mq_rx_supported &&
1196 		    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1197 			iwl_write_umac_prph(trans, UREG_CHICK,
1198 					    UREG_CHICK_MSI_ENABLE);
1199 		return;
1200 	}
1201 	/*
1202 	 * The IVAR table needs to be configured again after reset,
1203 	 * but if the device is disabled, we can't write to
1204 	 * prph.
1205 	 */
1206 	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1207 		iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1208 
1209 	/*
1210 	 * Each cause from the causes list above and the RX causes is
1211 	 * represented as a byte in the IVAR table. The first nibble
1212 	 * represents the bound interrupt vector of the cause, the second
1213 	 * represents no auto clear for this cause. This will be set if its
1214 	 * interrupt vector is bound to serve other causes.
1215 	 */
1216 	iwl_pcie_map_rx_causes(trans);
1217 
1218 	iwl_pcie_map_non_rx_causes(trans);
1219 }
1220 
iwl_pcie_init_msix(struct iwl_trans_pcie * trans_pcie)1221 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1222 {
1223 	struct iwl_trans *trans = trans_pcie->trans;
1224 
1225 	iwl_pcie_conf_msix_hw(trans_pcie);
1226 
1227 	if (!trans_pcie->msix_enabled)
1228 		return;
1229 
1230 	trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1231 	trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1232 	trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1233 	trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1234 }
1235 
_iwl_trans_pcie_stop_device(struct iwl_trans * trans)1236 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1237 {
1238 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1239 
1240 	lockdep_assert_held(&trans_pcie->mutex);
1241 
1242 	if (trans_pcie->is_down)
1243 		return;
1244 
1245 	trans_pcie->is_down = true;
1246 
1247 	/* tell the device to stop sending interrupts */
1248 	iwl_disable_interrupts(trans);
1249 
1250 	/* device going down, Stop using ICT table */
1251 	iwl_pcie_disable_ict(trans);
1252 
1253 	/*
1254 	 * If a HW restart happens during firmware loading,
1255 	 * then the firmware loading might call this function
1256 	 * and later it might be called again due to the
1257 	 * restart. So don't process again if the device is
1258 	 * already dead.
1259 	 */
1260 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1261 		IWL_DEBUG_INFO(trans,
1262 			       "DEVICE_ENABLED bit was set and is now cleared\n");
1263 		iwl_pcie_tx_stop(trans);
1264 		iwl_pcie_rx_stop(trans);
1265 
1266 		/* Power-down device's busmaster DMA clocks */
1267 		if (!trans->cfg->apmg_not_supported) {
1268 			iwl_write_prph(trans, APMG_CLK_DIS_REG,
1269 				       APMG_CLK_VAL_DMA_CLK_RQT);
1270 			udelay(5);
1271 		}
1272 	}
1273 
1274 	/* Make sure (redundant) we've released our request to stay awake */
1275 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1276 		iwl_clear_bit(trans, CSR_GP_CNTRL,
1277 			      CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1278 	else
1279 		iwl_clear_bit(trans, CSR_GP_CNTRL,
1280 			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1281 
1282 	/* Stop the device, and put it in low power state */
1283 	iwl_pcie_apm_stop(trans, false);
1284 
1285 	/* re-take ownership to prevent other users from stealing the device */
1286 	iwl_trans_pcie_sw_reset(trans, true);
1287 
1288 	/*
1289 	 * Upon stop, the IVAR table gets erased, so msi-x won't
1290 	 * work. This causes a bug in RF-KILL flows, since the interrupt
1291 	 * that enables radio won't fire on the correct irq, and the
1292 	 * driver won't be able to handle the interrupt.
1293 	 * Configure the IVAR table again after reset.
1294 	 */
1295 	iwl_pcie_conf_msix_hw(trans_pcie);
1296 
1297 	/*
1298 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1299 	 * This is a bug in certain verions of the hardware.
1300 	 * Certain devices also keep sending HW RF kill interrupt all
1301 	 * the time, unless the interrupt is ACKed even if the interrupt
1302 	 * should be masked. Re-ACK all the interrupts here.
1303 	 */
1304 	iwl_disable_interrupts(trans);
1305 
1306 	/* clear all status bits */
1307 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1308 	clear_bit(STATUS_INT_ENABLED, &trans->status);
1309 	clear_bit(STATUS_TPOWER_PMI, &trans->status);
1310 
1311 	/*
1312 	 * Even if we stop the HW, we still want the RF kill
1313 	 * interrupt
1314 	 */
1315 	iwl_enable_rfkill_int(trans);
1316 }
1317 
iwl_pcie_synchronize_irqs(struct iwl_trans * trans)1318 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1319 {
1320 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1321 
1322 	if (trans_pcie->msix_enabled) {
1323 		int i;
1324 
1325 		for (i = 0; i < trans_pcie->alloc_vecs; i++)
1326 			synchronize_irq(trans_pcie->msix_entries[i].vector);
1327 	} else {
1328 		synchronize_irq(trans_pcie->pci_dev->irq);
1329 	}
1330 }
1331 
iwl_trans_pcie_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1332 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1333 				   const struct fw_img *fw, bool run_in_rfkill)
1334 {
1335 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1336 	bool hw_rfkill;
1337 	int ret;
1338 
1339 	/* This may fail if AMT took ownership of the device */
1340 	if (iwl_pcie_prepare_card_hw(trans)) {
1341 		IWL_WARN(trans, "Exit HW not ready\n");
1342 		return -EIO;
1343 	}
1344 
1345 	iwl_enable_rfkill_int(trans);
1346 
1347 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1348 
1349 	/*
1350 	 * We enabled the RF-Kill interrupt and the handler may very
1351 	 * well be running. Disable the interrupts to make sure no other
1352 	 * interrupt can be fired.
1353 	 */
1354 	iwl_disable_interrupts(trans);
1355 
1356 	/* Make sure it finished running */
1357 	iwl_pcie_synchronize_irqs(trans);
1358 
1359 	mutex_lock(&trans_pcie->mutex);
1360 
1361 	/* If platform's RF_KILL switch is NOT set to KILL */
1362 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1363 	if (hw_rfkill && !run_in_rfkill) {
1364 		ret = -ERFKILL;
1365 		goto out;
1366 	}
1367 
1368 	/* Someone called stop_device, don't try to start_fw */
1369 	if (trans_pcie->is_down) {
1370 		IWL_WARN(trans,
1371 			 "Can't start_fw since the HW hasn't been started\n");
1372 		ret = -EIO;
1373 		goto out;
1374 	}
1375 
1376 	/* make sure rfkill handshake bits are cleared */
1377 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1378 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1379 		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1380 
1381 	/* clear (again), then enable host interrupts */
1382 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1383 
1384 	ret = iwl_pcie_nic_init(trans);
1385 	if (ret) {
1386 		IWL_ERR(trans, "Unable to init nic\n");
1387 		goto out;
1388 	}
1389 
1390 	/*
1391 	 * Now, we load the firmware and don't want to be interrupted, even
1392 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1393 	 * FH_TX interrupt which is needed to load the firmware). If the
1394 	 * RF-Kill switch is toggled, we will find out after having loaded
1395 	 * the firmware and return the proper value to the caller.
1396 	 */
1397 	iwl_enable_fw_load_int(trans);
1398 
1399 	/* really make sure rfkill handshake bits are cleared */
1400 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1401 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1402 
1403 	/* Load the given image to the HW */
1404 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1405 		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1406 	else
1407 		ret = iwl_pcie_load_given_ucode(trans, fw);
1408 
1409 	/* re-check RF-Kill state since we may have missed the interrupt */
1410 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1411 	if (hw_rfkill && !run_in_rfkill)
1412 		ret = -ERFKILL;
1413 
1414 out:
1415 	mutex_unlock(&trans_pcie->mutex);
1416 	return ret;
1417 }
1418 
iwl_trans_pcie_fw_alive(struct iwl_trans * trans,u32 scd_addr)1419 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1420 {
1421 	iwl_pcie_reset_ict(trans);
1422 	iwl_pcie_tx_start(trans, scd_addr);
1423 }
1424 
iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans * trans,bool was_in_rfkill)1425 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1426 				       bool was_in_rfkill)
1427 {
1428 	bool hw_rfkill;
1429 
1430 	/*
1431 	 * Check again since the RF kill state may have changed while
1432 	 * all the interrupts were disabled, in this case we couldn't
1433 	 * receive the RF kill interrupt and update the state in the
1434 	 * op_mode.
1435 	 * Don't call the op_mode if the rkfill state hasn't changed.
1436 	 * This allows the op_mode to call stop_device from the rfkill
1437 	 * notification without endless recursion. Under very rare
1438 	 * circumstances, we might have a small recursion if the rfkill
1439 	 * state changed exactly now while we were called from stop_device.
1440 	 * This is very unlikely but can happen and is supported.
1441 	 */
1442 	hw_rfkill = iwl_is_rfkill_set(trans);
1443 	if (hw_rfkill) {
1444 		set_bit(STATUS_RFKILL_HW, &trans->status);
1445 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1446 	} else {
1447 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1448 		clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1449 	}
1450 	if (hw_rfkill != was_in_rfkill)
1451 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1452 }
1453 
iwl_trans_pcie_stop_device(struct iwl_trans * trans)1454 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1455 {
1456 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1457 	bool was_in_rfkill;
1458 
1459 	iwl_op_mode_time_point(trans->op_mode,
1460 			       IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1461 			       NULL);
1462 
1463 	mutex_lock(&trans_pcie->mutex);
1464 	trans_pcie->opmode_down = true;
1465 	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1466 	_iwl_trans_pcie_stop_device(trans);
1467 	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1468 	mutex_unlock(&trans_pcie->mutex);
1469 }
1470 
iwl_trans_pcie_rf_kill(struct iwl_trans * trans,bool state)1471 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1472 {
1473 	struct iwl_trans_pcie __maybe_unused *trans_pcie =
1474 		IWL_TRANS_GET_PCIE_TRANS(trans);
1475 
1476 	lockdep_assert_held(&trans_pcie->mutex);
1477 
1478 	IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1479 		 state ? "disabled" : "enabled");
1480 	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1481 		if (trans->trans_cfg->gen2)
1482 			_iwl_trans_pcie_gen2_stop_device(trans);
1483 		else
1484 			_iwl_trans_pcie_stop_device(trans);
1485 	}
1486 }
1487 
iwl_pcie_d3_complete_suspend(struct iwl_trans * trans,bool test,bool reset)1488 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1489 				  bool test, bool reset)
1490 {
1491 	iwl_disable_interrupts(trans);
1492 
1493 	/*
1494 	 * in testing mode, the host stays awake and the
1495 	 * hardware won't be reset (not even partially)
1496 	 */
1497 	if (test)
1498 		return;
1499 
1500 	iwl_pcie_disable_ict(trans);
1501 
1502 	iwl_pcie_synchronize_irqs(trans);
1503 
1504 	iwl_clear_bit(trans, CSR_GP_CNTRL,
1505 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1506 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1507 
1508 	if (reset) {
1509 		/*
1510 		 * reset TX queues -- some of their registers reset during S3
1511 		 * so if we don't reset everything here the D3 image would try
1512 		 * to execute some invalid memory upon resume
1513 		 */
1514 		iwl_trans_pcie_tx_reset(trans);
1515 	}
1516 
1517 	iwl_pcie_set_pwr(trans, true);
1518 }
1519 
iwl_pcie_d3_handshake(struct iwl_trans * trans,bool suspend)1520 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
1521 {
1522 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1523 	int ret;
1524 
1525 	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
1526 		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1527 				    suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
1528 					      UREG_DOORBELL_TO_ISR6_RESUME);
1529 	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1530 		iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
1531 			    suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
1532 				      CSR_IPC_SLEEP_CONTROL_RESUME);
1533 		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1534 				    UREG_DOORBELL_TO_ISR6_SLEEP_CTRL);
1535 	} else {
1536 		return 0;
1537 	}
1538 
1539 	ret = wait_event_timeout(trans_pcie->sx_waitq,
1540 				 trans_pcie->sx_complete, 2 * HZ);
1541 
1542 	/* Invalidate it toward next suspend or resume */
1543 	trans_pcie->sx_complete = false;
1544 
1545 	if (!ret) {
1546 		IWL_ERR(trans, "Timeout %s D3\n",
1547 			suspend ? "entering" : "exiting");
1548 		return -ETIMEDOUT;
1549 	}
1550 
1551 	return 0;
1552 }
1553 
iwl_trans_pcie_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1554 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1555 				     bool reset)
1556 {
1557 	int ret;
1558 
1559 	if (!reset)
1560 		/* Enable persistence mode to avoid reset */
1561 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1562 			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1563 
1564 	ret = iwl_pcie_d3_handshake(trans, true);
1565 	if (ret)
1566 		return ret;
1567 
1568 	iwl_pcie_d3_complete_suspend(trans, test, reset);
1569 
1570 	return 0;
1571 }
1572 
iwl_trans_pcie_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1573 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1574 				    enum iwl_d3_status *status,
1575 				    bool test,  bool reset)
1576 {
1577 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1578 	u32 val;
1579 	int ret;
1580 
1581 	if (test) {
1582 		iwl_enable_interrupts(trans);
1583 		*status = IWL_D3_STATUS_ALIVE;
1584 		ret = 0;
1585 		goto out;
1586 	}
1587 
1588 	iwl_set_bit(trans, CSR_GP_CNTRL,
1589 		    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1590 
1591 	ret = iwl_finish_nic_init(trans);
1592 	if (ret)
1593 		return ret;
1594 
1595 	/*
1596 	 * Reconfigure IVAR table in case of MSIX or reset ict table in
1597 	 * MSI mode since HW reset erased it.
1598 	 * Also enables interrupts - none will happen as
1599 	 * the device doesn't know we're waking it up, only when
1600 	 * the opmode actually tells it after this call.
1601 	 */
1602 	iwl_pcie_conf_msix_hw(trans_pcie);
1603 	if (!trans_pcie->msix_enabled)
1604 		iwl_pcie_reset_ict(trans);
1605 	iwl_enable_interrupts(trans);
1606 
1607 	iwl_pcie_set_pwr(trans, false);
1608 
1609 	if (!reset) {
1610 		iwl_clear_bit(trans, CSR_GP_CNTRL,
1611 			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1612 	} else {
1613 		iwl_trans_pcie_tx_reset(trans);
1614 
1615 		ret = iwl_pcie_rx_init(trans);
1616 		if (ret) {
1617 			IWL_ERR(trans,
1618 				"Failed to resume the device (RX reset)\n");
1619 			return ret;
1620 		}
1621 	}
1622 
1623 	IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1624 			iwl_read_umac_prph(trans, WFPM_GP2));
1625 
1626 	val = iwl_read32(trans, CSR_RESET);
1627 	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1628 		*status = IWL_D3_STATUS_RESET;
1629 	else
1630 		*status = IWL_D3_STATUS_ALIVE;
1631 
1632 out:
1633 	if (*status == IWL_D3_STATUS_ALIVE)
1634 		ret = iwl_pcie_d3_handshake(trans, false);
1635 
1636 	return ret;
1637 }
1638 
1639 static void
iwl_pcie_set_interrupt_capa(struct pci_dev * pdev,struct iwl_trans * trans,const struct iwl_cfg_trans_params * cfg_trans)1640 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1641 			    struct iwl_trans *trans,
1642 			    const struct iwl_cfg_trans_params *cfg_trans)
1643 {
1644 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1645 	int max_irqs, num_irqs, i, ret;
1646 	u16 pci_cmd;
1647 	u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1648 
1649 	if (!cfg_trans->mq_rx_supported)
1650 		goto enable_msi;
1651 
1652 	if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1653 		max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1654 
1655 	max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1656 	for (i = 0; i < max_irqs; i++)
1657 		trans_pcie->msix_entries[i].entry = i;
1658 
1659 	num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1660 					 MSIX_MIN_INTERRUPT_VECTORS,
1661 					 max_irqs);
1662 	if (num_irqs < 0) {
1663 		IWL_DEBUG_INFO(trans,
1664 			       "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1665 			       num_irqs);
1666 		goto enable_msi;
1667 	}
1668 	trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1669 
1670 	IWL_DEBUG_INFO(trans,
1671 		       "MSI-X enabled. %d interrupt vectors were allocated\n",
1672 		       num_irqs);
1673 
1674 	/*
1675 	 * In case the OS provides fewer interrupts than requested, different
1676 	 * causes will share the same interrupt vector as follows:
1677 	 * One interrupt less: non rx causes shared with FBQ.
1678 	 * Two interrupts less: non rx causes shared with FBQ and RSS.
1679 	 * More than two interrupts: we will use fewer RSS queues.
1680 	 */
1681 	if (num_irqs <= max_irqs - 2) {
1682 		trans_pcie->trans->num_rx_queues = num_irqs + 1;
1683 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1684 			IWL_SHARED_IRQ_FIRST_RSS;
1685 	} else if (num_irqs == max_irqs - 1) {
1686 		trans_pcie->trans->num_rx_queues = num_irqs;
1687 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1688 	} else {
1689 		trans_pcie->trans->num_rx_queues = num_irqs - 1;
1690 	}
1691 
1692 	IWL_DEBUG_INFO(trans,
1693 		       "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1694 		       trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1695 
1696 	WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1697 
1698 	trans_pcie->alloc_vecs = num_irqs;
1699 	trans_pcie->msix_enabled = true;
1700 	return;
1701 
1702 enable_msi:
1703 	ret = pci_enable_msi(pdev);
1704 	if (ret) {
1705 		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1706 		/* enable rfkill interrupt: hw bug w/a */
1707 		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1708 		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1709 			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1710 			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1711 		}
1712 	}
1713 }
1714 
iwl_pcie_irq_set_affinity(struct iwl_trans * trans)1715 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1716 {
1717 	int iter_rx_q, i, ret, cpu, offset;
1718 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1719 
1720 	i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1721 	iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1722 	offset = 1 + i;
1723 	for (; i < iter_rx_q ; i++) {
1724 		/*
1725 		 * Get the cpu prior to the place to search
1726 		 * (i.e. return will be > i - 1).
1727 		 */
1728 		cpu = cpumask_next(i - offset, cpu_online_mask);
1729 		cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1730 		ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1731 					    &trans_pcie->affinity_mask[i]);
1732 		if (ret)
1733 			IWL_ERR(trans_pcie->trans,
1734 				"Failed to set affinity mask for IRQ %d\n",
1735 				trans_pcie->msix_entries[i].vector);
1736 	}
1737 }
1738 
iwl_pcie_init_msix_handler(struct pci_dev * pdev,struct iwl_trans_pcie * trans_pcie)1739 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1740 				      struct iwl_trans_pcie *trans_pcie)
1741 {
1742 	int i;
1743 
1744 	for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1745 		int ret;
1746 		struct msix_entry *msix_entry;
1747 		const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1748 
1749 		if (!qname)
1750 			return -ENOMEM;
1751 
1752 		msix_entry = &trans_pcie->msix_entries[i];
1753 		ret = devm_request_threaded_irq(&pdev->dev,
1754 						msix_entry->vector,
1755 						iwl_pcie_msix_isr,
1756 						(i == trans_pcie->def_irq) ?
1757 						iwl_pcie_irq_msix_handler :
1758 						iwl_pcie_irq_rx_msix_handler,
1759 						IRQF_SHARED,
1760 						qname,
1761 						msix_entry);
1762 		if (ret) {
1763 			IWL_ERR(trans_pcie->trans,
1764 				"Error allocating IRQ %d\n", i);
1765 
1766 			return ret;
1767 		}
1768 	}
1769 	iwl_pcie_irq_set_affinity(trans_pcie->trans);
1770 
1771 	return 0;
1772 }
1773 
iwl_trans_pcie_clear_persistence_bit(struct iwl_trans * trans)1774 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1775 {
1776 	u32 hpm, wprot;
1777 
1778 	switch (trans->trans_cfg->device_family) {
1779 	case IWL_DEVICE_FAMILY_9000:
1780 		wprot = PREG_PRPH_WPROT_9000;
1781 		break;
1782 	case IWL_DEVICE_FAMILY_22000:
1783 		wprot = PREG_PRPH_WPROT_22000;
1784 		break;
1785 	default:
1786 		return 0;
1787 	}
1788 
1789 	hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1790 	if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1791 		u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1792 
1793 		if (wprot_val & PREG_WFPM_ACCESS) {
1794 			IWL_ERR(trans,
1795 				"Error, can not clear persistence bit\n");
1796 			return -EPERM;
1797 		}
1798 		iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1799 					    hpm & ~PERSISTENCE_BIT);
1800 	}
1801 
1802 	return 0;
1803 }
1804 
iwl_pcie_gen2_force_power_gating(struct iwl_trans * trans)1805 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1806 {
1807 	int ret;
1808 
1809 	ret = iwl_finish_nic_init(trans);
1810 	if (ret < 0)
1811 		return ret;
1812 
1813 	iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1814 			  HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1815 	udelay(20);
1816 	iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1817 			  HPM_HIPM_GEN_CFG_CR_PG_EN |
1818 			  HPM_HIPM_GEN_CFG_CR_SLP_EN);
1819 	udelay(20);
1820 	iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1821 			    HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1822 
1823 	return iwl_trans_pcie_sw_reset(trans, true);
1824 }
1825 
_iwl_trans_pcie_start_hw(struct iwl_trans * trans)1826 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1827 {
1828 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1829 	int err;
1830 
1831 	lockdep_assert_held(&trans_pcie->mutex);
1832 
1833 	err = iwl_pcie_prepare_card_hw(trans);
1834 	if (err) {
1835 		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1836 		return err;
1837 	}
1838 
1839 	err = iwl_trans_pcie_clear_persistence_bit(trans);
1840 	if (err)
1841 		return err;
1842 
1843 	err = iwl_trans_pcie_sw_reset(trans, true);
1844 	if (err)
1845 		return err;
1846 
1847 	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1848 	    trans->trans_cfg->integrated) {
1849 		err = iwl_pcie_gen2_force_power_gating(trans);
1850 		if (err)
1851 			return err;
1852 	}
1853 
1854 	err = iwl_pcie_apm_init(trans);
1855 	if (err)
1856 		return err;
1857 
1858 	iwl_pcie_init_msix(trans_pcie);
1859 
1860 	/* From now on, the op_mode will be kept updated about RF kill state */
1861 	iwl_enable_rfkill_int(trans);
1862 
1863 	trans_pcie->opmode_down = false;
1864 
1865 	/* Set is_down to false here so that...*/
1866 	trans_pcie->is_down = false;
1867 
1868 	/* ...rfkill can call stop_device and set it false if needed */
1869 	iwl_pcie_check_hw_rf_kill(trans);
1870 
1871 	return 0;
1872 }
1873 
iwl_trans_pcie_start_hw(struct iwl_trans * trans)1874 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1875 {
1876 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1877 	int ret;
1878 
1879 	mutex_lock(&trans_pcie->mutex);
1880 	ret = _iwl_trans_pcie_start_hw(trans);
1881 	mutex_unlock(&trans_pcie->mutex);
1882 
1883 	return ret;
1884 }
1885 
iwl_trans_pcie_op_mode_leave(struct iwl_trans * trans)1886 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1887 {
1888 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1889 
1890 	mutex_lock(&trans_pcie->mutex);
1891 
1892 	/* disable interrupts - don't enable HW RF kill interrupt */
1893 	iwl_disable_interrupts(trans);
1894 
1895 	iwl_pcie_apm_stop(trans, true);
1896 
1897 	iwl_disable_interrupts(trans);
1898 
1899 	iwl_pcie_disable_ict(trans);
1900 
1901 	mutex_unlock(&trans_pcie->mutex);
1902 
1903 	iwl_pcie_synchronize_irqs(trans);
1904 }
1905 
iwl_trans_pcie_write8(struct iwl_trans * trans,u32 ofs,u8 val)1906 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1907 {
1908 	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1909 }
1910 
iwl_trans_pcie_write32(struct iwl_trans * trans,u32 ofs,u32 val)1911 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1912 {
1913 	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1914 }
1915 
iwl_trans_pcie_read32(struct iwl_trans * trans,u32 ofs)1916 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1917 {
1918 	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1919 }
1920 
iwl_trans_pcie_prph_msk(struct iwl_trans * trans)1921 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1922 {
1923 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1924 		return 0x00FFFFFF;
1925 	else
1926 		return 0x000FFFFF;
1927 }
1928 
iwl_trans_pcie_read_prph(struct iwl_trans * trans,u32 reg)1929 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1930 {
1931 	u32 mask = iwl_trans_pcie_prph_msk(trans);
1932 
1933 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1934 			       ((reg & mask) | (3 << 24)));
1935 	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1936 }
1937 
iwl_trans_pcie_write_prph(struct iwl_trans * trans,u32 addr,u32 val)1938 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1939 				      u32 val)
1940 {
1941 	u32 mask = iwl_trans_pcie_prph_msk(trans);
1942 
1943 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1944 			       ((addr & mask) | (3 << 24)));
1945 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1946 }
1947 
iwl_trans_pcie_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)1948 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1949 				     const struct iwl_trans_config *trans_cfg)
1950 {
1951 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1952 
1953 	/* free all first - we might be reconfigured for a different size */
1954 	iwl_pcie_free_rbs_pool(trans);
1955 
1956 	trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
1957 	trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1958 	trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1959 	trans->txqs.page_offs = trans_cfg->cb_data_offs;
1960 	trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1961 	trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
1962 
1963 	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1964 		trans_pcie->n_no_reclaim_cmds = 0;
1965 	else
1966 		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1967 	if (trans_pcie->n_no_reclaim_cmds)
1968 		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1969 		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1970 
1971 	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1972 	trans_pcie->rx_page_order =
1973 		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1974 	trans_pcie->rx_buf_bytes =
1975 		iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1976 	trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1977 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1978 		trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1979 
1980 	trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1981 	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1982 
1983 	trans->command_groups = trans_cfg->command_groups;
1984 	trans->command_groups_size = trans_cfg->command_groups_size;
1985 
1986 	/* Initialize NAPI here - it should be before registering to mac80211
1987 	 * in the opmode but after the HW struct is allocated.
1988 	 * As this function may be called again in some corner cases don't
1989 	 * do anything if NAPI was already initialized.
1990 	 */
1991 	if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1992 		init_dummy_netdev(&trans_pcie->napi_dev);
1993 
1994 	trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
1995 }
1996 
iwl_trans_pcie_free(struct iwl_trans * trans)1997 void iwl_trans_pcie_free(struct iwl_trans *trans)
1998 {
1999 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2000 	int i;
2001 
2002 	iwl_pcie_synchronize_irqs(trans);
2003 
2004 	if (trans->trans_cfg->gen2)
2005 		iwl_txq_gen2_tx_free(trans);
2006 	else
2007 		iwl_pcie_tx_free(trans);
2008 	iwl_pcie_rx_free(trans);
2009 
2010 	if (trans_pcie->rba.alloc_wq) {
2011 		destroy_workqueue(trans_pcie->rba.alloc_wq);
2012 		trans_pcie->rba.alloc_wq = NULL;
2013 	}
2014 
2015 	if (trans_pcie->msix_enabled) {
2016 		for (i = 0; i < trans_pcie->alloc_vecs; i++) {
2017 			irq_set_affinity_hint(
2018 				trans_pcie->msix_entries[i].vector,
2019 				NULL);
2020 		}
2021 
2022 		trans_pcie->msix_enabled = false;
2023 	} else {
2024 		iwl_pcie_free_ict(trans);
2025 	}
2026 
2027 	iwl_pcie_free_fw_monitor(trans);
2028 
2029 	if (trans_pcie->pnvm_dram.size)
2030 		dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
2031 				  trans_pcie->pnvm_dram.block,
2032 				  trans_pcie->pnvm_dram.physical);
2033 
2034 	if (trans_pcie->reduce_power_dram.size)
2035 		dma_free_coherent(trans->dev,
2036 				  trans_pcie->reduce_power_dram.size,
2037 				  trans_pcie->reduce_power_dram.block,
2038 				  trans_pcie->reduce_power_dram.physical);
2039 
2040 	mutex_destroy(&trans_pcie->mutex);
2041 	iwl_trans_free(trans);
2042 }
2043 
iwl_trans_pcie_set_pmi(struct iwl_trans * trans,bool state)2044 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
2045 {
2046 	if (state)
2047 		set_bit(STATUS_TPOWER_PMI, &trans->status);
2048 	else
2049 		clear_bit(STATUS_TPOWER_PMI, &trans->status);
2050 }
2051 
2052 struct iwl_trans_pcie_removal {
2053 	struct pci_dev *pdev;
2054 	struct work_struct work;
2055 };
2056 
iwl_trans_pcie_removal_wk(struct work_struct * wk)2057 static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
2058 {
2059 	struct iwl_trans_pcie_removal *removal =
2060 		container_of(wk, struct iwl_trans_pcie_removal, work);
2061 	struct pci_dev *pdev = removal->pdev;
2062 	static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
2063 
2064 	dev_err(&pdev->dev, "Device gone - attempting removal\n");
2065 	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
2066 	pci_lock_rescan_remove();
2067 	pci_dev_put(pdev);
2068 	pci_stop_and_remove_bus_device(pdev);
2069 	pci_unlock_rescan_remove();
2070 
2071 	kfree(removal);
2072 	module_put(THIS_MODULE);
2073 }
2074 
2075 /*
2076  * This version doesn't disable BHs but rather assumes they're
2077  * already disabled.
2078  */
__iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)2079 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2080 {
2081 	int ret;
2082 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2083 	u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
2084 	u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2085 		   CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
2086 	u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
2087 
2088 	spin_lock(&trans_pcie->reg_lock);
2089 
2090 	if (trans_pcie->cmd_hold_nic_awake)
2091 		goto out;
2092 
2093 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2094 		write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
2095 		mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2096 		poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2097 	}
2098 
2099 	/* this bit wakes up the NIC */
2100 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
2101 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2102 		udelay(2);
2103 
2104 	/*
2105 	 * These bits say the device is running, and should keep running for
2106 	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2107 	 * but they do not indicate that embedded SRAM is restored yet;
2108 	 * HW with volatile SRAM must save/restore contents to/from
2109 	 * host DRAM when sleeping/waking for power-saving.
2110 	 * Each direction takes approximately 1/4 millisecond; with this
2111 	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2112 	 * series of register accesses are expected (e.g. reading Event Log),
2113 	 * to keep device from sleeping.
2114 	 *
2115 	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2116 	 * SRAM is okay/restored.  We don't check that here because this call
2117 	 * is just for hardware register access; but GP1 MAC_SLEEP
2118 	 * check is a good idea before accessing the SRAM of HW with
2119 	 * volatile SRAM (e.g. reading Event Log).
2120 	 *
2121 	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2122 	 * and do not save/restore SRAM when power cycling.
2123 	 */
2124 	ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
2125 	if (unlikely(ret < 0)) {
2126 		u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2127 
2128 		WARN_ONCE(1,
2129 			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2130 			  cntrl);
2131 
2132 		iwl_trans_pcie_dump_regs(trans);
2133 
2134 		if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2135 			struct iwl_trans_pcie_removal *removal;
2136 
2137 			if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2138 				goto err;
2139 
2140 			IWL_ERR(trans, "Device gone - scheduling removal!\n");
2141 
2142 			/*
2143 			 * get a module reference to avoid doing this
2144 			 * while unloading anyway and to avoid
2145 			 * scheduling a work with code that's being
2146 			 * removed.
2147 			 */
2148 			if (!try_module_get(THIS_MODULE)) {
2149 				IWL_ERR(trans,
2150 					"Module is being unloaded - abort\n");
2151 				goto err;
2152 			}
2153 
2154 			removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2155 			if (!removal) {
2156 				module_put(THIS_MODULE);
2157 				goto err;
2158 			}
2159 			/*
2160 			 * we don't need to clear this flag, because
2161 			 * the trans will be freed and reallocated.
2162 			*/
2163 			set_bit(STATUS_TRANS_DEAD, &trans->status);
2164 
2165 			removal->pdev = to_pci_dev(trans->dev);
2166 			INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2167 			pci_dev_get(removal->pdev);
2168 			schedule_work(&removal->work);
2169 		} else {
2170 			iwl_write32(trans, CSR_RESET,
2171 				    CSR_RESET_REG_FLAG_FORCE_NMI);
2172 		}
2173 
2174 err:
2175 		spin_unlock(&trans_pcie->reg_lock);
2176 		return false;
2177 	}
2178 
2179 out:
2180 	/*
2181 	 * Fool sparse by faking we release the lock - sparse will
2182 	 * track nic_access anyway.
2183 	 */
2184 	__release(&trans_pcie->reg_lock);
2185 	return true;
2186 }
2187 
iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)2188 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2189 {
2190 	bool ret;
2191 
2192 	local_bh_disable();
2193 	ret = __iwl_trans_pcie_grab_nic_access(trans);
2194 	if (ret) {
2195 		/* keep BHs disabled until iwl_trans_pcie_release_nic_access */
2196 		return ret;
2197 	}
2198 	local_bh_enable();
2199 	return false;
2200 }
2201 
iwl_trans_pcie_release_nic_access(struct iwl_trans * trans)2202 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2203 {
2204 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2205 
2206 	lockdep_assert_held(&trans_pcie->reg_lock);
2207 
2208 	/*
2209 	 * Fool sparse by faking we acquiring the lock - sparse will
2210 	 * track nic_access anyway.
2211 	 */
2212 	__acquire(&trans_pcie->reg_lock);
2213 
2214 	if (trans_pcie->cmd_hold_nic_awake)
2215 		goto out;
2216 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2217 		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2218 					   CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
2219 	else
2220 		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2221 					   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2222 	/*
2223 	 * Above we read the CSR_GP_CNTRL register, which will flush
2224 	 * any previous writes, but we need the write that clears the
2225 	 * MAC_ACCESS_REQ bit to be performed before any other writes
2226 	 * scheduled on different CPUs (after we drop reg_lock).
2227 	 */
2228 out:
2229 	spin_unlock_bh(&trans_pcie->reg_lock);
2230 }
2231 
iwl_trans_pcie_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)2232 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2233 				   void *buf, int dwords)
2234 {
2235 	int offs = 0;
2236 	u32 *vals = buf;
2237 
2238 	while (offs < dwords) {
2239 		/* limit the time we spin here under lock to 1/2s */
2240 		unsigned long end = jiffies + HZ / 2;
2241 		bool resched = false;
2242 
2243 		if (iwl_trans_grab_nic_access(trans)) {
2244 			iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2245 				    addr + 4 * offs);
2246 
2247 			while (offs < dwords) {
2248 				vals[offs] = iwl_read32(trans,
2249 							HBUS_TARG_MEM_RDAT);
2250 				offs++;
2251 
2252 				if (time_after(jiffies, end)) {
2253 					resched = true;
2254 					break;
2255 				}
2256 			}
2257 			iwl_trans_release_nic_access(trans);
2258 
2259 			if (resched)
2260 				cond_resched();
2261 		} else {
2262 			return -EBUSY;
2263 		}
2264 	}
2265 
2266 	return 0;
2267 }
2268 
iwl_trans_pcie_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)2269 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2270 				    const void *buf, int dwords)
2271 {
2272 	int offs, ret = 0;
2273 	const u32 *vals = buf;
2274 
2275 	if (iwl_trans_grab_nic_access(trans)) {
2276 		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2277 		for (offs = 0; offs < dwords; offs++)
2278 			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2279 				    vals ? vals[offs] : 0);
2280 		iwl_trans_release_nic_access(trans);
2281 	} else {
2282 		ret = -EBUSY;
2283 	}
2284 	return ret;
2285 }
2286 
iwl_trans_pcie_read_config32(struct iwl_trans * trans,u32 ofs,u32 * val)2287 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2288 					u32 *val)
2289 {
2290 	return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2291 				     ofs, val);
2292 }
2293 
iwl_trans_pcie_block_txq_ptrs(struct iwl_trans * trans,bool block)2294 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2295 {
2296 	int i;
2297 
2298 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2299 		struct iwl_txq *txq = trans->txqs.txq[i];
2300 
2301 		if (i == trans->txqs.cmd.q_id)
2302 			continue;
2303 
2304 		spin_lock_bh(&txq->lock);
2305 
2306 		if (!block && !(WARN_ON_ONCE(!txq->block))) {
2307 			txq->block--;
2308 			if (!txq->block) {
2309 				iwl_write32(trans, HBUS_TARG_WRPTR,
2310 					    txq->write_ptr | (i << 8));
2311 			}
2312 		} else if (block) {
2313 			txq->block++;
2314 		}
2315 
2316 		spin_unlock_bh(&txq->lock);
2317 	}
2318 }
2319 
2320 #define IWL_FLUSH_WAIT_MS	2000
2321 
iwl_trans_pcie_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)2322 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2323 				       struct iwl_trans_rxq_dma_data *data)
2324 {
2325 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2326 
2327 	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2328 		return -EINVAL;
2329 
2330 	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2331 	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2332 	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2333 	data->fr_bd_wid = 0;
2334 
2335 	return 0;
2336 }
2337 
iwl_trans_pcie_wait_txq_empty(struct iwl_trans * trans,int txq_idx)2338 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2339 {
2340 	struct iwl_txq *txq;
2341 	unsigned long now = jiffies;
2342 	bool overflow_tx;
2343 	u8 wr_ptr;
2344 
2345 	/* Make sure the NIC is still alive in the bus */
2346 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2347 		return -ENODEV;
2348 
2349 	if (!test_bit(txq_idx, trans->txqs.queue_used))
2350 		return -EINVAL;
2351 
2352 	IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2353 	txq = trans->txqs.txq[txq_idx];
2354 
2355 	spin_lock_bh(&txq->lock);
2356 	overflow_tx = txq->overflow_tx ||
2357 		      !skb_queue_empty(&txq->overflow_q);
2358 	spin_unlock_bh(&txq->lock);
2359 
2360 	wr_ptr = READ_ONCE(txq->write_ptr);
2361 
2362 	while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2363 		overflow_tx) &&
2364 	       !time_after(jiffies,
2365 			   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2366 		u8 write_ptr = READ_ONCE(txq->write_ptr);
2367 
2368 		/*
2369 		 * If write pointer moved during the wait, warn only
2370 		 * if the TX came from op mode. In case TX came from
2371 		 * trans layer (overflow TX) don't warn.
2372 		 */
2373 		if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2374 			      "WR pointer moved while flushing %d -> %d\n",
2375 			      wr_ptr, write_ptr))
2376 			return -ETIMEDOUT;
2377 		wr_ptr = write_ptr;
2378 
2379 		usleep_range(1000, 2000);
2380 
2381 		spin_lock_bh(&txq->lock);
2382 		overflow_tx = txq->overflow_tx ||
2383 			      !skb_queue_empty(&txq->overflow_q);
2384 		spin_unlock_bh(&txq->lock);
2385 	}
2386 
2387 	if (txq->read_ptr != txq->write_ptr) {
2388 		IWL_ERR(trans,
2389 			"fail to flush all tx fifo queues Q %d\n", txq_idx);
2390 		iwl_txq_log_scd_error(trans, txq);
2391 		return -ETIMEDOUT;
2392 	}
2393 
2394 	IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2395 
2396 	return 0;
2397 }
2398 
iwl_trans_pcie_wait_txqs_empty(struct iwl_trans * trans,u32 txq_bm)2399 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2400 {
2401 	int cnt;
2402 	int ret = 0;
2403 
2404 	/* waiting for all the tx frames complete might take a while */
2405 	for (cnt = 0;
2406 	     cnt < trans->trans_cfg->base_params->num_of_queues;
2407 	     cnt++) {
2408 
2409 		if (cnt == trans->txqs.cmd.q_id)
2410 			continue;
2411 		if (!test_bit(cnt, trans->txqs.queue_used))
2412 			continue;
2413 		if (!(BIT(cnt) & txq_bm))
2414 			continue;
2415 
2416 		ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2417 		if (ret)
2418 			break;
2419 	}
2420 
2421 	return ret;
2422 }
2423 
iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)2424 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2425 					 u32 mask, u32 value)
2426 {
2427 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2428 
2429 	spin_lock_bh(&trans_pcie->reg_lock);
2430 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2431 	spin_unlock_bh(&trans_pcie->reg_lock);
2432 }
2433 
get_csr_string(int cmd)2434 static const char *get_csr_string(int cmd)
2435 {
2436 #define IWL_CMD(x) case x: return #x
2437 	switch (cmd) {
2438 	IWL_CMD(CSR_HW_IF_CONFIG_REG);
2439 	IWL_CMD(CSR_INT_COALESCING);
2440 	IWL_CMD(CSR_INT);
2441 	IWL_CMD(CSR_INT_MASK);
2442 	IWL_CMD(CSR_FH_INT_STATUS);
2443 	IWL_CMD(CSR_GPIO_IN);
2444 	IWL_CMD(CSR_RESET);
2445 	IWL_CMD(CSR_GP_CNTRL);
2446 	IWL_CMD(CSR_HW_REV);
2447 	IWL_CMD(CSR_EEPROM_REG);
2448 	IWL_CMD(CSR_EEPROM_GP);
2449 	IWL_CMD(CSR_OTP_GP_REG);
2450 	IWL_CMD(CSR_GIO_REG);
2451 	IWL_CMD(CSR_GP_UCODE_REG);
2452 	IWL_CMD(CSR_GP_DRIVER_REG);
2453 	IWL_CMD(CSR_UCODE_DRV_GP1);
2454 	IWL_CMD(CSR_UCODE_DRV_GP2);
2455 	IWL_CMD(CSR_LED_REG);
2456 	IWL_CMD(CSR_DRAM_INT_TBL_REG);
2457 	IWL_CMD(CSR_GIO_CHICKEN_BITS);
2458 	IWL_CMD(CSR_ANA_PLL_CFG);
2459 	IWL_CMD(CSR_HW_REV_WA_REG);
2460 	IWL_CMD(CSR_MONITOR_STATUS_REG);
2461 	IWL_CMD(CSR_DBG_HPET_MEM_REG);
2462 	default:
2463 		return "UNKNOWN";
2464 	}
2465 #undef IWL_CMD
2466 }
2467 
iwl_pcie_dump_csr(struct iwl_trans * trans)2468 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2469 {
2470 	int i;
2471 	static const u32 csr_tbl[] = {
2472 		CSR_HW_IF_CONFIG_REG,
2473 		CSR_INT_COALESCING,
2474 		CSR_INT,
2475 		CSR_INT_MASK,
2476 		CSR_FH_INT_STATUS,
2477 		CSR_GPIO_IN,
2478 		CSR_RESET,
2479 		CSR_GP_CNTRL,
2480 		CSR_HW_REV,
2481 		CSR_EEPROM_REG,
2482 		CSR_EEPROM_GP,
2483 		CSR_OTP_GP_REG,
2484 		CSR_GIO_REG,
2485 		CSR_GP_UCODE_REG,
2486 		CSR_GP_DRIVER_REG,
2487 		CSR_UCODE_DRV_GP1,
2488 		CSR_UCODE_DRV_GP2,
2489 		CSR_LED_REG,
2490 		CSR_DRAM_INT_TBL_REG,
2491 		CSR_GIO_CHICKEN_BITS,
2492 		CSR_ANA_PLL_CFG,
2493 		CSR_MONITOR_STATUS_REG,
2494 		CSR_HW_REV_WA_REG,
2495 		CSR_DBG_HPET_MEM_REG
2496 	};
2497 	IWL_ERR(trans, "CSR values:\n");
2498 	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2499 		"CSR_INT_PERIODIC_REG)\n");
2500 	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2501 		IWL_ERR(trans, "  %25s: 0X%08x\n",
2502 			get_csr_string(csr_tbl[i]),
2503 			iwl_read32(trans, csr_tbl[i]));
2504 	}
2505 }
2506 
2507 #ifdef CONFIG_IWLWIFI_DEBUGFS
2508 /* create and remove of files */
2509 #define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
2510 	debugfs_create_file(#name, mode, parent, trans,			\
2511 			    &iwl_dbgfs_##name##_ops);			\
2512 } while (0)
2513 
2514 /* file operation */
2515 #define DEBUGFS_READ_FILE_OPS(name)					\
2516 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
2517 	.read = iwl_dbgfs_##name##_read,				\
2518 	.open = simple_open,						\
2519 	.llseek = generic_file_llseek,					\
2520 };
2521 
2522 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2523 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2524 	.write = iwl_dbgfs_##name##_write,                              \
2525 	.open = simple_open,						\
2526 	.llseek = generic_file_llseek,					\
2527 };
2528 
2529 #define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
2530 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
2531 	.write = iwl_dbgfs_##name##_write,				\
2532 	.read = iwl_dbgfs_##name##_read,				\
2533 	.open = simple_open,						\
2534 	.llseek = generic_file_llseek,					\
2535 };
2536 
2537 struct iwl_dbgfs_tx_queue_priv {
2538 	struct iwl_trans *trans;
2539 };
2540 
2541 struct iwl_dbgfs_tx_queue_state {
2542 	loff_t pos;
2543 };
2544 
iwl_dbgfs_tx_queue_seq_start(struct seq_file * seq,loff_t * pos)2545 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2546 {
2547 	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2548 	struct iwl_dbgfs_tx_queue_state *state;
2549 
2550 	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2551 		return NULL;
2552 
2553 	state = kmalloc(sizeof(*state), GFP_KERNEL);
2554 	if (!state)
2555 		return NULL;
2556 	state->pos = *pos;
2557 	return state;
2558 }
2559 
iwl_dbgfs_tx_queue_seq_next(struct seq_file * seq,void * v,loff_t * pos)2560 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2561 					 void *v, loff_t *pos)
2562 {
2563 	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2564 	struct iwl_dbgfs_tx_queue_state *state = v;
2565 
2566 	*pos = ++state->pos;
2567 
2568 	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2569 		return NULL;
2570 
2571 	return state;
2572 }
2573 
iwl_dbgfs_tx_queue_seq_stop(struct seq_file * seq,void * v)2574 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2575 {
2576 	kfree(v);
2577 }
2578 
iwl_dbgfs_tx_queue_seq_show(struct seq_file * seq,void * v)2579 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2580 {
2581 	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2582 	struct iwl_dbgfs_tx_queue_state *state = v;
2583 	struct iwl_trans *trans = priv->trans;
2584 	struct iwl_txq *txq = trans->txqs.txq[state->pos];
2585 
2586 	seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2587 		   (unsigned int)state->pos,
2588 		   !!test_bit(state->pos, trans->txqs.queue_used),
2589 		   !!test_bit(state->pos, trans->txqs.queue_stopped));
2590 	if (txq)
2591 		seq_printf(seq,
2592 			   "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2593 			   txq->read_ptr, txq->write_ptr,
2594 			   txq->need_update, txq->frozen,
2595 			   txq->n_window, txq->ampdu);
2596 	else
2597 		seq_puts(seq, "(unallocated)");
2598 
2599 	if (state->pos == trans->txqs.cmd.q_id)
2600 		seq_puts(seq, " (HCMD)");
2601 	seq_puts(seq, "\n");
2602 
2603 	return 0;
2604 }
2605 
2606 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2607 	.start = iwl_dbgfs_tx_queue_seq_start,
2608 	.next = iwl_dbgfs_tx_queue_seq_next,
2609 	.stop = iwl_dbgfs_tx_queue_seq_stop,
2610 	.show = iwl_dbgfs_tx_queue_seq_show,
2611 };
2612 
iwl_dbgfs_tx_queue_open(struct inode * inode,struct file * filp)2613 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2614 {
2615 	struct iwl_dbgfs_tx_queue_priv *priv;
2616 
2617 	priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2618 				  sizeof(*priv));
2619 
2620 	if (!priv)
2621 		return -ENOMEM;
2622 
2623 	priv->trans = inode->i_private;
2624 	return 0;
2625 }
2626 
iwl_dbgfs_rx_queue_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2627 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2628 				       char __user *user_buf,
2629 				       size_t count, loff_t *ppos)
2630 {
2631 	struct iwl_trans *trans = file->private_data;
2632 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2633 	char *buf;
2634 	int pos = 0, i, ret;
2635 	size_t bufsz;
2636 
2637 	bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2638 
2639 	if (!trans_pcie->rxq)
2640 		return -EAGAIN;
2641 
2642 	buf = kzalloc(bufsz, GFP_KERNEL);
2643 	if (!buf)
2644 		return -ENOMEM;
2645 
2646 	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2647 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2648 
2649 		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2650 				 i);
2651 		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2652 				 rxq->read);
2653 		pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2654 				 rxq->write);
2655 		pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2656 				 rxq->write_actual);
2657 		pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2658 				 rxq->need_update);
2659 		pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2660 				 rxq->free_count);
2661 		if (rxq->rb_stts) {
2662 			u32 r =	__le16_to_cpu(iwl_get_closed_rb_stts(trans,
2663 								     rxq));
2664 			pos += scnprintf(buf + pos, bufsz - pos,
2665 					 "\tclosed_rb_num: %u\n",
2666 					 r & 0x0FFF);
2667 		} else {
2668 			pos += scnprintf(buf + pos, bufsz - pos,
2669 					 "\tclosed_rb_num: Not Allocated\n");
2670 		}
2671 	}
2672 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2673 	kfree(buf);
2674 
2675 	return ret;
2676 }
2677 
iwl_dbgfs_interrupt_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2678 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2679 					char __user *user_buf,
2680 					size_t count, loff_t *ppos)
2681 {
2682 	struct iwl_trans *trans = file->private_data;
2683 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2684 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2685 
2686 	int pos = 0;
2687 	char *buf;
2688 	int bufsz = 24 * 64; /* 24 items * 64 char per item */
2689 	ssize_t ret;
2690 
2691 	buf = kzalloc(bufsz, GFP_KERNEL);
2692 	if (!buf)
2693 		return -ENOMEM;
2694 
2695 	pos += scnprintf(buf + pos, bufsz - pos,
2696 			"Interrupt Statistics Report:\n");
2697 
2698 	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2699 		isr_stats->hw);
2700 	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2701 		isr_stats->sw);
2702 	if (isr_stats->sw || isr_stats->hw) {
2703 		pos += scnprintf(buf + pos, bufsz - pos,
2704 			"\tLast Restarting Code:  0x%X\n",
2705 			isr_stats->err_code);
2706 	}
2707 #ifdef CONFIG_IWLWIFI_DEBUG
2708 	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2709 		isr_stats->sch);
2710 	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2711 		isr_stats->alive);
2712 #endif
2713 	pos += scnprintf(buf + pos, bufsz - pos,
2714 		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2715 
2716 	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2717 		isr_stats->ctkill);
2718 
2719 	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2720 		isr_stats->wakeup);
2721 
2722 	pos += scnprintf(buf + pos, bufsz - pos,
2723 		"Rx command responses:\t\t %u\n", isr_stats->rx);
2724 
2725 	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2726 		isr_stats->tx);
2727 
2728 	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2729 		isr_stats->unhandled);
2730 
2731 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2732 	kfree(buf);
2733 	return ret;
2734 }
2735 
iwl_dbgfs_interrupt_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2736 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2737 					 const char __user *user_buf,
2738 					 size_t count, loff_t *ppos)
2739 {
2740 	struct iwl_trans *trans = file->private_data;
2741 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2742 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2743 	u32 reset_flag;
2744 	int ret;
2745 
2746 	ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2747 	if (ret)
2748 		return ret;
2749 	if (reset_flag == 0)
2750 		memset(isr_stats, 0, sizeof(*isr_stats));
2751 
2752 	return count;
2753 }
2754 
iwl_dbgfs_csr_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2755 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2756 				   const char __user *user_buf,
2757 				   size_t count, loff_t *ppos)
2758 {
2759 	struct iwl_trans *trans = file->private_data;
2760 
2761 	iwl_pcie_dump_csr(trans);
2762 
2763 	return count;
2764 }
2765 
iwl_dbgfs_fh_reg_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2766 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2767 				     char __user *user_buf,
2768 				     size_t count, loff_t *ppos)
2769 {
2770 	struct iwl_trans *trans = file->private_data;
2771 	char *buf = NULL;
2772 	ssize_t ret;
2773 
2774 	ret = iwl_dump_fh(trans, &buf);
2775 	if (ret < 0)
2776 		return ret;
2777 	if (!buf)
2778 		return -EINVAL;
2779 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2780 	kfree(buf);
2781 	return ret;
2782 }
2783 
iwl_dbgfs_rfkill_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2784 static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2785 				     char __user *user_buf,
2786 				     size_t count, loff_t *ppos)
2787 {
2788 	struct iwl_trans *trans = file->private_data;
2789 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2790 	char buf[100];
2791 	int pos;
2792 
2793 	pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2794 			trans_pcie->debug_rfkill,
2795 			!(iwl_read32(trans, CSR_GP_CNTRL) &
2796 				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2797 
2798 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2799 }
2800 
iwl_dbgfs_rfkill_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2801 static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2802 				      const char __user *user_buf,
2803 				      size_t count, loff_t *ppos)
2804 {
2805 	struct iwl_trans *trans = file->private_data;
2806 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2807 	bool new_value;
2808 	int ret;
2809 
2810 	ret = kstrtobool_from_user(user_buf, count, &new_value);
2811 	if (ret)
2812 		return ret;
2813 	if (new_value == trans_pcie->debug_rfkill)
2814 		return count;
2815 	IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2816 		 trans_pcie->debug_rfkill, new_value);
2817 	trans_pcie->debug_rfkill = new_value;
2818 	iwl_pcie_handle_rfkill_irq(trans);
2819 
2820 	return count;
2821 }
2822 
iwl_dbgfs_monitor_data_open(struct inode * inode,struct file * file)2823 static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2824 				       struct file *file)
2825 {
2826 	struct iwl_trans *trans = inode->i_private;
2827 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2828 
2829 	if (!trans->dbg.dest_tlv ||
2830 	    trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2831 		IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2832 		return -ENOENT;
2833 	}
2834 
2835 	if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2836 		return -EBUSY;
2837 
2838 	trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2839 	return simple_open(inode, file);
2840 }
2841 
iwl_dbgfs_monitor_data_release(struct inode * inode,struct file * file)2842 static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2843 					  struct file *file)
2844 {
2845 	struct iwl_trans_pcie *trans_pcie =
2846 		IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2847 
2848 	if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2849 		trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2850 	return 0;
2851 }
2852 
iwl_write_to_user_buf(char __user * user_buf,ssize_t count,void * buf,ssize_t * size,ssize_t * bytes_copied)2853 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2854 				  void *buf, ssize_t *size,
2855 				  ssize_t *bytes_copied)
2856 {
2857 	int buf_size_left = count - *bytes_copied;
2858 
2859 	buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2860 	if (*size > buf_size_left)
2861 		*size = buf_size_left;
2862 
2863 	*size -= copy_to_user(user_buf, buf, *size);
2864 	*bytes_copied += *size;
2865 
2866 	if (buf_size_left == *size)
2867 		return true;
2868 	return false;
2869 }
2870 
iwl_dbgfs_monitor_data_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2871 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2872 					   char __user *user_buf,
2873 					   size_t count, loff_t *ppos)
2874 {
2875 	struct iwl_trans *trans = file->private_data;
2876 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2877 	u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2878 	struct cont_rec *data = &trans_pcie->fw_mon_data;
2879 	u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2880 	ssize_t size, bytes_copied = 0;
2881 	bool b_full;
2882 
2883 	if (trans->dbg.dest_tlv) {
2884 		write_ptr_addr =
2885 			le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2886 		wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2887 	} else {
2888 		write_ptr_addr = MON_BUFF_WRPTR;
2889 		wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2890 	}
2891 
2892 	if (unlikely(!trans->dbg.rec_on))
2893 		return 0;
2894 
2895 	mutex_lock(&data->mutex);
2896 	if (data->state ==
2897 	    IWL_FW_MON_DBGFS_STATE_DISABLED) {
2898 		mutex_unlock(&data->mutex);
2899 		return 0;
2900 	}
2901 
2902 	/* write_ptr position in bytes rather then DW */
2903 	write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2904 	wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2905 
2906 	if (data->prev_wrap_cnt == wrap_cnt) {
2907 		size = write_ptr - data->prev_wr_ptr;
2908 		curr_buf = cpu_addr + data->prev_wr_ptr;
2909 		b_full = iwl_write_to_user_buf(user_buf, count,
2910 					       curr_buf, &size,
2911 					       &bytes_copied);
2912 		data->prev_wr_ptr += size;
2913 
2914 	} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2915 		   write_ptr < data->prev_wr_ptr) {
2916 		size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2917 		curr_buf = cpu_addr + data->prev_wr_ptr;
2918 		b_full = iwl_write_to_user_buf(user_buf, count,
2919 					       curr_buf, &size,
2920 					       &bytes_copied);
2921 		data->prev_wr_ptr += size;
2922 
2923 		if (!b_full) {
2924 			size = write_ptr;
2925 			b_full = iwl_write_to_user_buf(user_buf, count,
2926 						       cpu_addr, &size,
2927 						       &bytes_copied);
2928 			data->prev_wr_ptr = size;
2929 			data->prev_wrap_cnt++;
2930 		}
2931 	} else {
2932 		if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2933 		    write_ptr > data->prev_wr_ptr)
2934 			IWL_WARN(trans,
2935 				 "write pointer passed previous write pointer, start copying from the beginning\n");
2936 		else if (!unlikely(data->prev_wrap_cnt == 0 &&
2937 				   data->prev_wr_ptr == 0))
2938 			IWL_WARN(trans,
2939 				 "monitor data is out of sync, start copying from the beginning\n");
2940 
2941 		size = write_ptr;
2942 		b_full = iwl_write_to_user_buf(user_buf, count,
2943 					       cpu_addr, &size,
2944 					       &bytes_copied);
2945 		data->prev_wr_ptr = size;
2946 		data->prev_wrap_cnt = wrap_cnt;
2947 	}
2948 
2949 	mutex_unlock(&data->mutex);
2950 
2951 	return bytes_copied;
2952 }
2953 
iwl_dbgfs_rf_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2954 static ssize_t iwl_dbgfs_rf_read(struct file *file,
2955 				 char __user *user_buf,
2956 				 size_t count, loff_t *ppos)
2957 {
2958 	struct iwl_trans *trans = file->private_data;
2959 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2960 
2961 	if (!trans_pcie->rf_name[0])
2962 		return -ENODEV;
2963 
2964 	return simple_read_from_buffer(user_buf, count, ppos,
2965 				       trans_pcie->rf_name,
2966 				       strlen(trans_pcie->rf_name));
2967 }
2968 
2969 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2970 DEBUGFS_READ_FILE_OPS(fh_reg);
2971 DEBUGFS_READ_FILE_OPS(rx_queue);
2972 DEBUGFS_WRITE_FILE_OPS(csr);
2973 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2974 DEBUGFS_READ_FILE_OPS(rf);
2975 
2976 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
2977 	.owner = THIS_MODULE,
2978 	.open = iwl_dbgfs_tx_queue_open,
2979 	.read = seq_read,
2980 	.llseek = seq_lseek,
2981 	.release = seq_release_private,
2982 };
2983 
2984 static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2985 	.read = iwl_dbgfs_monitor_data_read,
2986 	.open = iwl_dbgfs_monitor_data_open,
2987 	.release = iwl_dbgfs_monitor_data_release,
2988 };
2989 
2990 /* Create the debugfs files and directories */
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans)2991 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2992 {
2993 	struct dentry *dir = trans->dbgfs_dir;
2994 
2995 	DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2996 	DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2997 	DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2998 	DEBUGFS_ADD_FILE(csr, dir, 0200);
2999 	DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
3000 	DEBUGFS_ADD_FILE(rfkill, dir, 0600);
3001 	DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
3002 	DEBUGFS_ADD_FILE(rf, dir, 0400);
3003 }
3004 
iwl_trans_pcie_debugfs_cleanup(struct iwl_trans * trans)3005 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
3006 {
3007 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3008 	struct cont_rec *data = &trans_pcie->fw_mon_data;
3009 
3010 	mutex_lock(&data->mutex);
3011 	data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
3012 	mutex_unlock(&data->mutex);
3013 }
3014 #endif /*CONFIG_IWLWIFI_DEBUGFS */
3015 
iwl_trans_pcie_get_cmdlen(struct iwl_trans * trans,void * tfd)3016 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
3017 {
3018 	u32 cmdlen = 0;
3019 	int i;
3020 
3021 	for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
3022 		cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
3023 
3024 	return cmdlen;
3025 }
3026 
iwl_trans_pcie_dump_rbs(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,int allocated_rb_nums)3027 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
3028 				   struct iwl_fw_error_dump_data **data,
3029 				   int allocated_rb_nums)
3030 {
3031 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3032 	int max_len = trans_pcie->rx_buf_bytes;
3033 	/* Dump RBs is supported only for pre-9000 devices (1 queue) */
3034 	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3035 	u32 i, r, j, rb_len = 0;
3036 
3037 	spin_lock(&rxq->lock);
3038 
3039 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
3040 
3041 	for (i = rxq->read, j = 0;
3042 	     i != r && j < allocated_rb_nums;
3043 	     i = (i + 1) & RX_QUEUE_MASK, j++) {
3044 		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
3045 		struct iwl_fw_error_dump_rb *rb;
3046 
3047 		dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
3048 					max_len, DMA_FROM_DEVICE);
3049 
3050 		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
3051 
3052 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
3053 		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
3054 		rb = (void *)(*data)->data;
3055 		rb->index = cpu_to_le32(i);
3056 		memcpy(rb->data, page_address(rxb->page), max_len);
3057 
3058 		*data = iwl_fw_error_next_data(*data);
3059 	}
3060 
3061 	spin_unlock(&rxq->lock);
3062 
3063 	return rb_len;
3064 }
3065 #define IWL_CSR_TO_DUMP (0x250)
3066 
iwl_trans_pcie_dump_csr(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)3067 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
3068 				   struct iwl_fw_error_dump_data **data)
3069 {
3070 	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
3071 	__le32 *val;
3072 	int i;
3073 
3074 	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
3075 	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3076 	val = (void *)(*data)->data;
3077 
3078 	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
3079 		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3080 
3081 	*data = iwl_fw_error_next_data(*data);
3082 
3083 	return csr_len;
3084 }
3085 
iwl_trans_pcie_fh_regs_dump(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)3086 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
3087 				       struct iwl_fw_error_dump_data **data)
3088 {
3089 	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3090 	__le32 *val;
3091 	int i;
3092 
3093 	if (!iwl_trans_grab_nic_access(trans))
3094 		return 0;
3095 
3096 	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3097 	(*data)->len = cpu_to_le32(fh_regs_len);
3098 	val = (void *)(*data)->data;
3099 
3100 	if (!trans->trans_cfg->gen2)
3101 		for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
3102 		     i += sizeof(u32))
3103 			*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3104 	else
3105 		for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
3106 		     i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
3107 		     i += sizeof(u32))
3108 			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3109 								      i));
3110 
3111 	iwl_trans_release_nic_access(trans);
3112 
3113 	*data = iwl_fw_error_next_data(*data);
3114 
3115 	return sizeof(**data) + fh_regs_len;
3116 }
3117 
3118 static u32
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data,u32 monitor_len)3119 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3120 				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3121 				 u32 monitor_len)
3122 {
3123 	u32 buf_size_in_dwords = (monitor_len >> 2);
3124 	u32 *buffer = (u32 *)fw_mon_data->data;
3125 	u32 i;
3126 
3127 	if (!iwl_trans_grab_nic_access(trans))
3128 		return 0;
3129 
3130 	iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3131 	for (i = 0; i < buf_size_in_dwords; i++)
3132 		buffer[i] = iwl_read_umac_prph_no_grab(trans,
3133 						       MON_DMARB_RD_DATA_ADDR);
3134 	iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3135 
3136 	iwl_trans_release_nic_access(trans);
3137 
3138 	return monitor_len;
3139 }
3140 
3141 static void
iwl_trans_pcie_dump_pointers(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data)3142 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3143 			     struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3144 {
3145 	u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3146 
3147 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3148 		base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3149 		base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3150 		write_ptr = DBGC_CUR_DBGBUF_STATUS;
3151 		wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3152 	} else if (trans->dbg.dest_tlv) {
3153 		write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3154 		wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3155 		base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3156 	} else {
3157 		base = MON_BUFF_BASE_ADDR;
3158 		write_ptr = MON_BUFF_WRPTR;
3159 		wrap_cnt = MON_BUFF_CYCLE_CNT;
3160 	}
3161 
3162 	write_ptr_val = iwl_read_prph(trans, write_ptr);
3163 	fw_mon_data->fw_mon_cycle_cnt =
3164 		cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3165 	fw_mon_data->fw_mon_base_ptr =
3166 		cpu_to_le32(iwl_read_prph(trans, base));
3167 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3168 		fw_mon_data->fw_mon_base_high_ptr =
3169 			cpu_to_le32(iwl_read_prph(trans, base_high));
3170 		write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3171 		/* convert wrtPtr to DWs, to align with all HWs */
3172 		write_ptr_val >>= 2;
3173 	}
3174 	fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3175 }
3176 
3177 static u32
iwl_trans_pcie_dump_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,u32 monitor_len)3178 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3179 			    struct iwl_fw_error_dump_data **data,
3180 			    u32 monitor_len)
3181 {
3182 	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3183 	u32 len = 0;
3184 
3185 	if (trans->dbg.dest_tlv ||
3186 	    (fw_mon->size &&
3187 	     (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3188 	      trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3189 		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3190 
3191 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3192 		fw_mon_data = (void *)(*data)->data;
3193 
3194 		iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3195 
3196 		len += sizeof(**data) + sizeof(*fw_mon_data);
3197 		if (fw_mon->size) {
3198 			memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3199 			monitor_len = fw_mon->size;
3200 		} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3201 			u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3202 			/*
3203 			 * Update pointers to reflect actual values after
3204 			 * shifting
3205 			 */
3206 			if (trans->dbg.dest_tlv->version) {
3207 				base = (iwl_read_prph(trans, base) &
3208 					IWL_LDBG_M2S_BUF_BA_MSK) <<
3209 				       trans->dbg.dest_tlv->base_shift;
3210 				base *= IWL_M2S_UNIT_SIZE;
3211 				base += trans->cfg->smem_offset;
3212 			} else {
3213 				base = iwl_read_prph(trans, base) <<
3214 				       trans->dbg.dest_tlv->base_shift;
3215 			}
3216 
3217 			iwl_trans_read_mem(trans, base, fw_mon_data->data,
3218 					   monitor_len / sizeof(u32));
3219 		} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3220 			monitor_len =
3221 				iwl_trans_pci_dump_marbh_monitor(trans,
3222 								 fw_mon_data,
3223 								 monitor_len);
3224 		} else {
3225 			/* Didn't match anything - output no monitor data */
3226 			monitor_len = 0;
3227 		}
3228 
3229 		len += monitor_len;
3230 		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3231 	}
3232 
3233 	return len;
3234 }
3235 
iwl_trans_get_fw_monitor_len(struct iwl_trans * trans,u32 * len)3236 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3237 {
3238 	if (trans->dbg.fw_mon.size) {
3239 		*len += sizeof(struct iwl_fw_error_dump_data) +
3240 			sizeof(struct iwl_fw_error_dump_fw_mon) +
3241 			trans->dbg.fw_mon.size;
3242 		return trans->dbg.fw_mon.size;
3243 	} else if (trans->dbg.dest_tlv) {
3244 		u32 base, end, cfg_reg, monitor_len;
3245 
3246 		if (trans->dbg.dest_tlv->version == 1) {
3247 			cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3248 			cfg_reg = iwl_read_prph(trans, cfg_reg);
3249 			base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3250 				trans->dbg.dest_tlv->base_shift;
3251 			base *= IWL_M2S_UNIT_SIZE;
3252 			base += trans->cfg->smem_offset;
3253 
3254 			monitor_len =
3255 				(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3256 				trans->dbg.dest_tlv->end_shift;
3257 			monitor_len *= IWL_M2S_UNIT_SIZE;
3258 		} else {
3259 			base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3260 			end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3261 
3262 			base = iwl_read_prph(trans, base) <<
3263 			       trans->dbg.dest_tlv->base_shift;
3264 			end = iwl_read_prph(trans, end) <<
3265 			      trans->dbg.dest_tlv->end_shift;
3266 
3267 			/* Make "end" point to the actual end */
3268 			if (trans->trans_cfg->device_family >=
3269 			    IWL_DEVICE_FAMILY_8000 ||
3270 			    trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3271 				end += (1 << trans->dbg.dest_tlv->end_shift);
3272 			monitor_len = end - base;
3273 		}
3274 		*len += sizeof(struct iwl_fw_error_dump_data) +
3275 			sizeof(struct iwl_fw_error_dump_fw_mon) +
3276 			monitor_len;
3277 		return monitor_len;
3278 	}
3279 	return 0;
3280 }
3281 
3282 static struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans * trans,u32 dump_mask,const struct iwl_dump_sanitize_ops * sanitize_ops,void * sanitize_ctx)3283 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3284 			 u32 dump_mask,
3285 			 const struct iwl_dump_sanitize_ops *sanitize_ops,
3286 			 void *sanitize_ctx)
3287 {
3288 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3289 	struct iwl_fw_error_dump_data *data;
3290 	struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3291 	struct iwl_fw_error_dump_txcmd *txcmd;
3292 	struct iwl_trans_dump_data *dump_data;
3293 	u32 len, num_rbs = 0, monitor_len = 0;
3294 	int i, ptr;
3295 	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3296 			!trans->trans_cfg->mq_rx_supported &&
3297 			dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3298 
3299 	if (!dump_mask)
3300 		return NULL;
3301 
3302 	/* transport dump header */
3303 	len = sizeof(*dump_data);
3304 
3305 	/* host commands */
3306 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3307 		len += sizeof(*data) +
3308 			cmdq->n_window * (sizeof(*txcmd) +
3309 					  TFD_MAX_PAYLOAD_SIZE);
3310 
3311 	/* FW monitor */
3312 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3313 		monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3314 
3315 	/* CSR registers */
3316 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3317 		len += sizeof(*data) + IWL_CSR_TO_DUMP;
3318 
3319 	/* FH registers */
3320 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3321 		if (trans->trans_cfg->gen2)
3322 			len += sizeof(*data) +
3323 			       (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3324 				iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3325 		else
3326 			len += sizeof(*data) +
3327 			       (FH_MEM_UPPER_BOUND -
3328 				FH_MEM_LOWER_BOUND);
3329 	}
3330 
3331 	if (dump_rbs) {
3332 		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
3333 		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3334 		/* RBs */
3335 		num_rbs =
3336 			le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3337 			& 0x0FFF;
3338 		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3339 		len += num_rbs * (sizeof(*data) +
3340 				  sizeof(struct iwl_fw_error_dump_rb) +
3341 				  (PAGE_SIZE << trans_pcie->rx_page_order));
3342 	}
3343 
3344 	/* Paged memory for gen2 HW */
3345 	if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3346 		for (i = 0; i < trans->init_dram.paging_cnt; i++)
3347 			len += sizeof(*data) +
3348 			       sizeof(struct iwl_fw_error_dump_paging) +
3349 			       trans->init_dram.paging[i].size;
3350 
3351 	dump_data = vzalloc(len);
3352 	if (!dump_data)
3353 		return NULL;
3354 
3355 	len = 0;
3356 	data = (void *)dump_data->data;
3357 
3358 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3359 		u16 tfd_size = trans->txqs.tfd.size;
3360 
3361 		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3362 		txcmd = (void *)data->data;
3363 		spin_lock_bh(&cmdq->lock);
3364 		ptr = cmdq->write_ptr;
3365 		for (i = 0; i < cmdq->n_window; i++) {
3366 			u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3367 			u8 tfdidx;
3368 			u32 caplen, cmdlen;
3369 
3370 			if (trans->trans_cfg->use_tfh)
3371 				tfdidx = idx;
3372 			else
3373 				tfdidx = ptr;
3374 
3375 			cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3376 							   (u8 *)cmdq->tfds +
3377 							   tfd_size * tfdidx);
3378 			caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3379 
3380 			if (cmdlen) {
3381 				len += sizeof(*txcmd) + caplen;
3382 				txcmd->cmdlen = cpu_to_le32(cmdlen);
3383 				txcmd->caplen = cpu_to_le32(caplen);
3384 				memcpy(txcmd->data, cmdq->entries[idx].cmd,
3385 				       caplen);
3386 				if (sanitize_ops && sanitize_ops->frob_hcmd)
3387 					sanitize_ops->frob_hcmd(sanitize_ctx,
3388 								txcmd->data,
3389 								caplen);
3390 				txcmd = (void *)((u8 *)txcmd->data + caplen);
3391 			}
3392 
3393 			ptr = iwl_txq_dec_wrap(trans, ptr);
3394 		}
3395 		spin_unlock_bh(&cmdq->lock);
3396 
3397 		data->len = cpu_to_le32(len);
3398 		len += sizeof(*data);
3399 		data = iwl_fw_error_next_data(data);
3400 	}
3401 
3402 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3403 		len += iwl_trans_pcie_dump_csr(trans, &data);
3404 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3405 		len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3406 	if (dump_rbs)
3407 		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3408 
3409 	/* Paged memory for gen2 HW */
3410 	if (trans->trans_cfg->gen2 &&
3411 	    dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3412 		for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3413 			struct iwl_fw_error_dump_paging *paging;
3414 			u32 page_len = trans->init_dram.paging[i].size;
3415 
3416 			data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3417 			data->len = cpu_to_le32(sizeof(*paging) + page_len);
3418 			paging = (void *)data->data;
3419 			paging->index = cpu_to_le32(i);
3420 			memcpy(paging->data,
3421 			       trans->init_dram.paging[i].block, page_len);
3422 			data = iwl_fw_error_next_data(data);
3423 
3424 			len += sizeof(*data) + sizeof(*paging) + page_len;
3425 		}
3426 	}
3427 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3428 		len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3429 
3430 	dump_data->len = len;
3431 
3432 	return dump_data;
3433 }
3434 
iwl_trans_pci_interrupts(struct iwl_trans * trans,bool enable)3435 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3436 {
3437 	if (enable)
3438 		iwl_enable_interrupts(trans);
3439 	else
3440 		iwl_disable_interrupts(trans);
3441 }
3442 
iwl_trans_pcie_sync_nmi(struct iwl_trans * trans)3443 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3444 {
3445 	u32 inta_addr, sw_err_bit;
3446 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3447 
3448 	if (trans_pcie->msix_enabled) {
3449 		inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3450 		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3451 			sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
3452 		else
3453 			sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3454 	} else {
3455 		inta_addr = CSR_INT;
3456 		sw_err_bit = CSR_INT_BIT_SW_ERR;
3457 	}
3458 
3459 	iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3460 }
3461 
3462 #define IWL_TRANS_COMMON_OPS						\
3463 	.op_mode_leave = iwl_trans_pcie_op_mode_leave,			\
3464 	.write8 = iwl_trans_pcie_write8,				\
3465 	.write32 = iwl_trans_pcie_write32,				\
3466 	.read32 = iwl_trans_pcie_read32,				\
3467 	.read_prph = iwl_trans_pcie_read_prph,				\
3468 	.write_prph = iwl_trans_pcie_write_prph,			\
3469 	.read_mem = iwl_trans_pcie_read_mem,				\
3470 	.write_mem = iwl_trans_pcie_write_mem,				\
3471 	.read_config32 = iwl_trans_pcie_read_config32,			\
3472 	.configure = iwl_trans_pcie_configure,				\
3473 	.set_pmi = iwl_trans_pcie_set_pmi,				\
3474 	.sw_reset = iwl_trans_pcie_sw_reset,				\
3475 	.grab_nic_access = iwl_trans_pcie_grab_nic_access,		\
3476 	.release_nic_access = iwl_trans_pcie_release_nic_access,	\
3477 	.set_bits_mask = iwl_trans_pcie_set_bits_mask,			\
3478 	.dump_data = iwl_trans_pcie_dump_data,				\
3479 	.d3_suspend = iwl_trans_pcie_d3_suspend,			\
3480 	.d3_resume = iwl_trans_pcie_d3_resume,				\
3481 	.interrupts = iwl_trans_pci_interrupts,				\
3482 	.sync_nmi = iwl_trans_pcie_sync_nmi,				\
3483 	.imr_dma_data = iwl_trans_pcie_copy_imr				\
3484 
3485 static const struct iwl_trans_ops trans_ops_pcie = {
3486 	IWL_TRANS_COMMON_OPS,
3487 	.start_hw = iwl_trans_pcie_start_hw,
3488 	.fw_alive = iwl_trans_pcie_fw_alive,
3489 	.start_fw = iwl_trans_pcie_start_fw,
3490 	.stop_device = iwl_trans_pcie_stop_device,
3491 
3492 	.send_cmd = iwl_pcie_enqueue_hcmd,
3493 
3494 	.tx = iwl_trans_pcie_tx,
3495 	.reclaim = iwl_txq_reclaim,
3496 
3497 	.txq_disable = iwl_trans_pcie_txq_disable,
3498 	.txq_enable = iwl_trans_pcie_txq_enable,
3499 
3500 	.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3501 
3502 	.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3503 
3504 	.freeze_txq_timer = iwl_trans_txq_freeze_timer,
3505 	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3506 #ifdef CONFIG_IWLWIFI_DEBUGFS
3507 	.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3508 #endif
3509 };
3510 
3511 static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3512 	IWL_TRANS_COMMON_OPS,
3513 	.start_hw = iwl_trans_pcie_start_hw,
3514 	.fw_alive = iwl_trans_pcie_gen2_fw_alive,
3515 	.start_fw = iwl_trans_pcie_gen2_start_fw,
3516 	.stop_device = iwl_trans_pcie_gen2_stop_device,
3517 
3518 	.send_cmd = iwl_pcie_gen2_enqueue_hcmd,
3519 
3520 	.tx = iwl_txq_gen2_tx,
3521 	.reclaim = iwl_txq_reclaim,
3522 
3523 	.set_q_ptrs = iwl_txq_set_q_ptrs,
3524 
3525 	.txq_alloc = iwl_txq_dyn_alloc,
3526 	.txq_free = iwl_txq_dyn_free,
3527 	.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3528 	.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3529 	.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3530 	.set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
3531 #ifdef CONFIG_IWLWIFI_DEBUGFS
3532 	.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3533 #endif
3534 };
3535 
iwl_trans_pcie_alloc(struct pci_dev * pdev,const struct pci_device_id * ent,const struct iwl_cfg_trans_params * cfg_trans)3536 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3537 			       const struct pci_device_id *ent,
3538 			       const struct iwl_cfg_trans_params *cfg_trans)
3539 {
3540 	struct iwl_trans_pcie *trans_pcie;
3541 	struct iwl_trans *trans;
3542 	int ret, addr_size;
3543 	const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3544 	void __iomem * const *table;
3545 
3546 	if (!cfg_trans->gen2)
3547 		ops = &trans_ops_pcie;
3548 
3549 	ret = pcim_enable_device(pdev);
3550 	if (ret)
3551 		return ERR_PTR(ret);
3552 
3553 	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3554 				cfg_trans);
3555 	if (!trans)
3556 		return ERR_PTR(-ENOMEM);
3557 
3558 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3559 
3560 	trans_pcie->trans = trans;
3561 	trans_pcie->opmode_down = true;
3562 	spin_lock_init(&trans_pcie->irq_lock);
3563 	spin_lock_init(&trans_pcie->reg_lock);
3564 	spin_lock_init(&trans_pcie->alloc_page_lock);
3565 	mutex_init(&trans_pcie->mutex);
3566 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3567 	init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3568 	init_waitqueue_head(&trans_pcie->imr_waitq);
3569 
3570 	trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3571 						   WQ_HIGHPRI | WQ_UNBOUND, 1);
3572 	if (!trans_pcie->rba.alloc_wq) {
3573 		ret = -ENOMEM;
3574 		goto out_free_trans;
3575 	}
3576 	INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3577 
3578 	trans_pcie->debug_rfkill = -1;
3579 
3580 	if (!cfg_trans->base_params->pcie_l1_allowed) {
3581 		/*
3582 		 * W/A - seems to solve weird behavior. We need to remove this
3583 		 * if we don't want to stay in L1 all the time. This wastes a
3584 		 * lot of power.
3585 		 */
3586 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3587 				       PCIE_LINK_STATE_L1 |
3588 				       PCIE_LINK_STATE_CLKPM);
3589 	}
3590 
3591 	trans_pcie->def_rx_queue = 0;
3592 
3593 	pci_set_master(pdev);
3594 
3595 	addr_size = trans->txqs.tfd.addr_size;
3596 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
3597 	if (ret) {
3598 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3599 		/* both attempts failed: */
3600 		if (ret) {
3601 			dev_err(&pdev->dev, "No suitable DMA available\n");
3602 			goto out_no_pci;
3603 		}
3604 	}
3605 
3606 	ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3607 	if (ret) {
3608 		dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3609 		goto out_no_pci;
3610 	}
3611 
3612 	table = pcim_iomap_table(pdev);
3613 	if (!table) {
3614 		dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3615 		ret = -ENOMEM;
3616 		goto out_no_pci;
3617 	}
3618 
3619 	trans_pcie->hw_base = table[0];
3620 	if (!trans_pcie->hw_base) {
3621 		dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
3622 		ret = -ENODEV;
3623 		goto out_no_pci;
3624 	}
3625 
3626 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
3627 	 * PCI Tx retries from interfering with C3 CPU state */
3628 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3629 
3630 	trans_pcie->pci_dev = pdev;
3631 	iwl_disable_interrupts(trans);
3632 
3633 	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3634 	if (trans->hw_rev == 0xffffffff) {
3635 		dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3636 		ret = -EIO;
3637 		goto out_no_pci;
3638 	}
3639 
3640 	/*
3641 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3642 	 * changed, and now the revision step also includes bit 0-1 (no more
3643 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3644 	 * in the old format.
3645 	 */
3646 	if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3647 		trans->hw_rev_step = trans->hw_rev & 0xF;
3648 	else
3649 		trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
3650 
3651 	IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3652 
3653 	iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3654 	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3655 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3656 		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3657 
3658 	init_waitqueue_head(&trans_pcie->sx_waitq);
3659 
3660 
3661 	if (trans_pcie->msix_enabled) {
3662 		ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3663 		if (ret)
3664 			goto out_no_pci;
3665 	 } else {
3666 		ret = iwl_pcie_alloc_ict(trans);
3667 		if (ret)
3668 			goto out_no_pci;
3669 
3670 		ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3671 						iwl_pcie_isr,
3672 						iwl_pcie_irq_handler,
3673 						IRQF_SHARED, DRV_NAME, trans);
3674 		if (ret) {
3675 			IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3676 			goto out_free_ict;
3677 		}
3678 	 }
3679 
3680 #ifdef CONFIG_IWLWIFI_DEBUGFS
3681 	trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3682 	mutex_init(&trans_pcie->fw_mon_data.mutex);
3683 #endif
3684 
3685 	iwl_dbg_tlv_init(trans);
3686 
3687 	return trans;
3688 
3689 out_free_ict:
3690 	iwl_pcie_free_ict(trans);
3691 out_no_pci:
3692 	destroy_workqueue(trans_pcie->rba.alloc_wq);
3693 out_free_trans:
3694 	iwl_trans_free(trans);
3695 	return ERR_PTR(ret);
3696 }
3697 
iwl_trans_pcie_copy_imr_fh(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)3698 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
3699 				u32 dst_addr, u64 src_addr, u32 byte_cnt)
3700 {
3701 	iwl_write_prph(trans, IMR_UREG_CHICK,
3702 		       iwl_read_prph(trans, IMR_UREG_CHICK) |
3703 		       IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
3704 	iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
3705 	iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
3706 		       (u32)(src_addr & 0xFFFFFFFF));
3707 	iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
3708 		       iwl_get_dma_hi_addr(src_addr));
3709 	iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
3710 	iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
3711 		       IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
3712 		       IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
3713 		       IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
3714 }
3715 
iwl_trans_pcie_copy_imr(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)3716 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
3717 			    u32 dst_addr, u64 src_addr, u32 byte_cnt)
3718 {
3719 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3720 	int ret = -1;
3721 
3722 	trans_pcie->imr_status = IMR_D2S_REQUESTED;
3723 	iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
3724 	ret = wait_event_timeout(trans_pcie->imr_waitq,
3725 				 trans_pcie->imr_status !=
3726 				 IMR_D2S_REQUESTED, 5 * HZ);
3727 	if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
3728 		IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
3729 		iwl_trans_pcie_dump_regs(trans);
3730 		return -ETIMEDOUT;
3731 	}
3732 	trans_pcie->imr_status = IMR_D2S_IDLE;
3733 	return 0;
3734 }
3735