1 /******************************************************************************
2  *
3  * GPL LICENSE SUMMARY
4  *
5  * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of version 2 of the GNU General Public License as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19  * USA
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29 
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 
35 #include "iwl-dev.h"
36 #include "iwl-core.h"
37 #include "iwl-io.h"
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
40 #include "iwl-agn.h"
41 #include "iwl-agn-calib.h"
42 
43 #define IWL_AC_UNSET -1
44 
45 struct queue_to_fifo_ac {
46 	s8 fifo, ac;
47 };
48 
49 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
50 	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
51 	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
52 	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
53 	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
54 	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
55 	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
56 	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
57 	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
58 	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
59 	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
60 };
61 
62 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
63 	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
64 	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
65 	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
66 	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
67 	{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
68 	{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
69 	{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
70 	{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
71 	{ IWL_TX_FIFO_BE_IPAN, 2, },
72 	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
73 };
74 
75 static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
76 	{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
77 	 0, COEX_UNASSOC_IDLE_FLAGS},
78 	{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
79 	 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
80 	{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
81 	 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
82 	{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
83 	 0, COEX_CALIBRATION_FLAGS},
84 	{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
85 	 0, COEX_PERIODIC_CALIBRATION_FLAGS},
86 	{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
87 	 0, COEX_CONNECTION_ESTAB_FLAGS},
88 	{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
89 	 0, COEX_ASSOCIATED_IDLE_FLAGS},
90 	{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
91 	 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
92 	{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
93 	 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
94 	{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
95 	 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
96 	{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
97 	{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
98 	{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
99 	 0, COEX_STAND_ALONE_DEBUG_FLAGS},
100 	{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
101 	 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
102 	{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
103 	{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
104 };
105 
106 /*
107  * ucode
108  */
iwlagn_load_section(struct iwl_priv * priv,const char * name,struct fw_desc * image,u32 dst_addr)109 static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
110 				struct fw_desc *image, u32 dst_addr)
111 {
112 	dma_addr_t phy_addr = image->p_addr;
113 	u32 byte_cnt = image->len;
114 	int ret;
115 
116 	priv->ucode_write_complete = 0;
117 
118 	iwl_write_direct32(priv,
119 		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
120 		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
121 
122 	iwl_write_direct32(priv,
123 		FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
124 
125 	iwl_write_direct32(priv,
126 		FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
127 		phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
128 
129 	iwl_write_direct32(priv,
130 		FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
131 		(iwl_get_dma_hi_addr(phy_addr)
132 			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
133 
134 	iwl_write_direct32(priv,
135 		FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
136 		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
137 		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
138 		FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
139 
140 	iwl_write_direct32(priv,
141 		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
142 		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
143 		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
144 		FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
145 
146 	IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
147 	ret = wait_event_interruptible_timeout(priv->wait_command_queue,
148 					priv->ucode_write_complete, 5 * HZ);
149 	if (ret == -ERESTARTSYS) {
150 		IWL_ERR(priv, "Could not load the %s uCode section due "
151 			"to interrupt\n", name);
152 		return ret;
153 	}
154 	if (!ret) {
155 		IWL_ERR(priv, "Could not load the %s uCode section\n",
156 			name);
157 		return -ETIMEDOUT;
158 	}
159 
160 	return 0;
161 }
162 
iwlagn_load_given_ucode(struct iwl_priv * priv,struct fw_desc * inst_image,struct fw_desc * data_image)163 static int iwlagn_load_given_ucode(struct iwl_priv *priv,
164 		struct fw_desc *inst_image,
165 		struct fw_desc *data_image)
166 {
167 	int ret = 0;
168 
169 	ret = iwlagn_load_section(priv, "INST", inst_image,
170 				   IWLAGN_RTC_INST_LOWER_BOUND);
171 	if (ret)
172 		return ret;
173 
174 	return iwlagn_load_section(priv, "DATA", data_image,
175 				    IWLAGN_RTC_DATA_LOWER_BOUND);
176 }
177 
iwlagn_load_ucode(struct iwl_priv * priv)178 int iwlagn_load_ucode(struct iwl_priv *priv)
179 {
180 	int ret = 0;
181 
182 	/* check whether init ucode should be loaded, or rather runtime ucode */
183 	if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
184 		IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
185 		ret = iwlagn_load_given_ucode(priv,
186 			&priv->ucode_init, &priv->ucode_init_data);
187 		if (!ret) {
188 			IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
189 			priv->ucode_type = UCODE_INIT;
190 		}
191 	} else {
192 		IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
193 			"Loading runtime ucode...\n");
194 		ret = iwlagn_load_given_ucode(priv,
195 			&priv->ucode_code, &priv->ucode_data);
196 		if (!ret) {
197 			IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
198 			priv->ucode_type = UCODE_RT;
199 		}
200 	}
201 
202 	return ret;
203 }
204 
205 /*
206  *  Calibration
207  */
iwlagn_set_Xtal_calib(struct iwl_priv * priv)208 static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
209 {
210 	struct iwl_calib_xtal_freq_cmd cmd;
211 	__le16 *xtal_calib =
212 		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
213 
214 	cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
215 	cmd.hdr.first_group = 0;
216 	cmd.hdr.groups_num = 1;
217 	cmd.hdr.data_valid = 1;
218 	cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
219 	cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
220 	return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
221 			     (u8 *)&cmd, sizeof(cmd));
222 }
223 
iwlagn_set_temperature_offset_calib(struct iwl_priv * priv)224 static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
225 {
226 	struct iwl_calib_temperature_offset_cmd cmd;
227 	__le16 *offset_calib =
228 		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
229 	cmd.hdr.op_code = IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD;
230 	cmd.hdr.first_group = 0;
231 	cmd.hdr.groups_num = 1;
232 	cmd.hdr.data_valid = 1;
233 	cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
234 	if (!(cmd.radio_sensor_offset))
235 		cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
236 	cmd.reserved = 0;
237 	IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
238 			cmd.radio_sensor_offset);
239 	return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
240 			     (u8 *)&cmd, sizeof(cmd));
241 }
242 
iwlagn_send_calib_cfg(struct iwl_priv * priv)243 static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
244 {
245 	struct iwl_calib_cfg_cmd calib_cfg_cmd;
246 	struct iwl_host_cmd cmd = {
247 		.id = CALIBRATION_CFG_CMD,
248 		.len = sizeof(struct iwl_calib_cfg_cmd),
249 		.data = &calib_cfg_cmd,
250 	};
251 
252 	memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
253 	calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
254 	calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
255 	calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
256 	calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
257 
258 	return iwl_send_cmd(priv, &cmd);
259 }
260 
iwlagn_rx_calib_result(struct iwl_priv * priv,struct iwl_rx_mem_buffer * rxb)261 void iwlagn_rx_calib_result(struct iwl_priv *priv,
262 			     struct iwl_rx_mem_buffer *rxb)
263 {
264 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
265 	struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
266 	int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
267 	int index;
268 
269 	/* reduce the size of the length field itself */
270 	len -= 4;
271 
272 	/* Define the order in which the results will be sent to the runtime
273 	 * uCode. iwl_send_calib_results sends them in a row according to
274 	 * their index. We sort them here
275 	 */
276 	switch (hdr->op_code) {
277 	case IWL_PHY_CALIBRATE_DC_CMD:
278 		index = IWL_CALIB_DC;
279 		break;
280 	case IWL_PHY_CALIBRATE_LO_CMD:
281 		index = IWL_CALIB_LO;
282 		break;
283 	case IWL_PHY_CALIBRATE_TX_IQ_CMD:
284 		index = IWL_CALIB_TX_IQ;
285 		break;
286 	case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
287 		index = IWL_CALIB_TX_IQ_PERD;
288 		break;
289 	case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
290 		index = IWL_CALIB_BASE_BAND;
291 		break;
292 	default:
293 		IWL_ERR(priv, "Unknown calibration notification %d\n",
294 			  hdr->op_code);
295 		return;
296 	}
297 	iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
298 }
299 
iwlagn_rx_calib_complete(struct iwl_priv * priv,struct iwl_rx_mem_buffer * rxb)300 void iwlagn_rx_calib_complete(struct iwl_priv *priv,
301 			       struct iwl_rx_mem_buffer *rxb)
302 {
303 	IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
304 	queue_work(priv->workqueue, &priv->restart);
305 }
306 
iwlagn_init_alive_start(struct iwl_priv * priv)307 void iwlagn_init_alive_start(struct iwl_priv *priv)
308 {
309 	int ret = 0;
310 
311 	/* initialize uCode was loaded... verify inst image.
312 	 * This is a paranoid check, because we would not have gotten the
313 	 * "initialize" alive if code weren't properly loaded.  */
314 	if (iwl_verify_ucode(priv)) {
315 		/* Runtime instruction load was bad;
316 		 * take it all the way back down so we can try again */
317 		IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
318 		goto restart;
319 	}
320 
321 	ret = priv->cfg->ops->lib->alive_notify(priv);
322 	if (ret) {
323 		IWL_WARN(priv,
324 			"Could not complete ALIVE transition: %d\n", ret);
325 		goto restart;
326 	}
327 
328 	if (priv->cfg->bt_params &&
329 	    priv->cfg->bt_params->advanced_bt_coexist) {
330 		/*
331 		 * Tell uCode we are ready to perform calibration
332 		 * need to perform this before any calibration
333 		 * no need to close the envlope since we are going
334 		 * to load the runtime uCode later.
335 		 */
336 		iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
337 			BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
338 
339 	}
340 	iwlagn_send_calib_cfg(priv);
341 
342 	/**
343 	 * temperature offset calibration is only needed for runtime ucode,
344 	 * so prepare the value now.
345 	 */
346 	if (priv->cfg->need_temp_offset_calib)
347 		iwlagn_set_temperature_offset_calib(priv);
348 
349 	return;
350 
351 restart:
352 	/* real restart (first load init_ucode) */
353 	queue_work(priv->workqueue, &priv->restart);
354 }
355 
iwlagn_send_wimax_coex(struct iwl_priv * priv)356 static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
357 {
358 	struct iwl_wimax_coex_cmd coex_cmd;
359 
360 	if (priv->cfg->base_params->support_wimax_coexist) {
361 		/* UnMask wake up src at associated sleep */
362 		coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
363 
364 		/* UnMask wake up src at unassociated sleep */
365 		coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
366 		memcpy(coex_cmd.sta_prio, cu_priorities,
367 			sizeof(struct iwl_wimax_coex_event_entry) *
368 			 COEX_NUM_OF_EVENTS);
369 
370 		/* enabling the coexistence feature */
371 		coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
372 
373 		/* enabling the priorities tables */
374 		coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
375 	} else {
376 		/* coexistence is disabled */
377 		memset(&coex_cmd, 0, sizeof(coex_cmd));
378 	}
379 	return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
380 				sizeof(coex_cmd), &coex_cmd);
381 }
382 
383 static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
384 	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
385 		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
386 	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
387 		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
388 	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
389 		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
390 	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
391 		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
392 	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
393 		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
394 	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
395 		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
396 	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
397 		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
398 	((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
399 		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
400 	((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
401 		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
402 	0, 0, 0, 0, 0, 0, 0
403 };
404 
iwlagn_send_prio_tbl(struct iwl_priv * priv)405 void iwlagn_send_prio_tbl(struct iwl_priv *priv)
406 {
407 	struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
408 
409 	memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
410 		sizeof(iwlagn_bt_prio_tbl));
411 	if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
412 				sizeof(prio_tbl_cmd), &prio_tbl_cmd))
413 		IWL_ERR(priv, "failed to send BT prio tbl command\n");
414 }
415 
iwlagn_send_bt_env(struct iwl_priv * priv,u8 action,u8 type)416 void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
417 {
418 	struct iwl_bt_coex_prot_env_cmd env_cmd;
419 
420 	env_cmd.action = action;
421 	env_cmd.type = type;
422 	if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
423 			     sizeof(env_cmd), &env_cmd))
424 		IWL_ERR(priv, "failed to send BT env command\n");
425 }
426 
427 
iwlagn_alive_notify(struct iwl_priv * priv)428 int iwlagn_alive_notify(struct iwl_priv *priv)
429 {
430 	const struct queue_to_fifo_ac *queue_to_fifo;
431 	u32 a;
432 	unsigned long flags;
433 	int i, chan;
434 	u32 reg_val;
435 
436 	spin_lock_irqsave(&priv->lock, flags);
437 
438 	priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
439 	a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
440 	for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
441 		a += 4)
442 		iwl_write_targ_mem(priv, a, 0);
443 	for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
444 		a += 4)
445 		iwl_write_targ_mem(priv, a, 0);
446 	for (; a < priv->scd_base_addr +
447 	       IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
448 		iwl_write_targ_mem(priv, a, 0);
449 
450 	iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
451 		       priv->scd_bc_tbls.dma >> 10);
452 
453 	/* Enable DMA channel */
454 	for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
455 		iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
456 				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
457 				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
458 
459 	/* Update FH chicken bits */
460 	reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
461 	iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
462 			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
463 
464 	iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
465 		IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
466 	iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
467 
468 	/* initiate the queues */
469 	for (i = 0; i < priv->hw_params.max_txq_num; i++) {
470 		iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
471 		iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
472 		iwl_write_targ_mem(priv, priv->scd_base_addr +
473 				IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
474 		iwl_write_targ_mem(priv, priv->scd_base_addr +
475 				IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
476 				sizeof(u32),
477 				((SCD_WIN_SIZE <<
478 				IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
479 				IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
480 				((SCD_FRAME_LIMIT <<
481 				IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
482 				IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
483 	}
484 
485 	iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
486 			IWL_MASK(0, priv->hw_params.max_txq_num));
487 
488 	/* Activate all Tx DMA/FIFO channels */
489 	priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
490 
491 	/* map queues to FIFOs */
492 	if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
493 		queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
494 	else
495 		queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
496 
497 	iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
498 
499 	/* make sure all queue are not stopped */
500 	memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
501 	for (i = 0; i < 4; i++)
502 		atomic_set(&priv->queue_stop_count[i], 0);
503 
504 	/* reset to 0 to enable all the queue first */
505 	priv->txq_ctx_active_msk = 0;
506 
507 	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
508 	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
509 
510 	for (i = 0; i < 10; i++) {
511 		int fifo = queue_to_fifo[i].fifo;
512 		int ac = queue_to_fifo[i].ac;
513 
514 		iwl_txq_ctx_activate(priv, i);
515 
516 		if (fifo == IWL_TX_FIFO_UNUSED)
517 			continue;
518 
519 		if (ac != IWL_AC_UNSET)
520 			iwl_set_swq_id(&priv->txq[i], ac, i);
521 		iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
522 	}
523 
524 	spin_unlock_irqrestore(&priv->lock, flags);
525 
526 	/* Enable L1-Active */
527 	iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
528 			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
529 
530 	iwlagn_send_wimax_coex(priv);
531 
532 	iwlagn_set_Xtal_calib(priv);
533 	iwl_send_calib_results(priv);
534 
535 	return 0;
536 }
537 
538 
539 /**
540  * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
541  *   using sample data 100 bytes apart.  If these sample points are good,
542  *   it's a pretty good bet that everything between them is good, too.
543  */
iwlcore_verify_inst_sparse(struct iwl_priv * priv,__le32 * image,u32 len)544 static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
545 {
546 	u32 val;
547 	int ret = 0;
548 	u32 errcnt = 0;
549 	u32 i;
550 
551 	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
552 
553 	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
554 		/* read data comes through single port, auto-incr addr */
555 		/* NOTE: Use the debugless read so we don't flood kernel log
556 		 * if IWL_DL_IO is set */
557 		iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
558 			i + IWLAGN_RTC_INST_LOWER_BOUND);
559 		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
560 		if (val != le32_to_cpu(*image)) {
561 			ret = -EIO;
562 			errcnt++;
563 			if (errcnt >= 3)
564 				break;
565 		}
566 	}
567 
568 	return ret;
569 }
570 
571 /**
572  * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
573  *     looking at all data.
574  */
iwl_verify_inst_full(struct iwl_priv * priv,__le32 * image,u32 len)575 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
576 				 u32 len)
577 {
578 	u32 val;
579 	u32 save_len = len;
580 	int ret = 0;
581 	u32 errcnt;
582 
583 	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
584 
585 	iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
586 			   IWLAGN_RTC_INST_LOWER_BOUND);
587 
588 	errcnt = 0;
589 	for (; len > 0; len -= sizeof(u32), image++) {
590 		/* read data comes through single port, auto-incr addr */
591 		/* NOTE: Use the debugless read so we don't flood kernel log
592 		 * if IWL_DL_IO is set */
593 		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
594 		if (val != le32_to_cpu(*image)) {
595 			IWL_ERR(priv, "uCode INST section is invalid at "
596 				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
597 				  save_len - len, val, le32_to_cpu(*image));
598 			ret = -EIO;
599 			errcnt++;
600 			if (errcnt >= 20)
601 				break;
602 		}
603 	}
604 
605 	if (!errcnt)
606 		IWL_DEBUG_INFO(priv,
607 		    "ucode image in INSTRUCTION memory is good\n");
608 
609 	return ret;
610 }
611 
612 /**
613  * iwl_verify_ucode - determine which instruction image is in SRAM,
614  *    and verify its contents
615  */
iwl_verify_ucode(struct iwl_priv * priv)616 int iwl_verify_ucode(struct iwl_priv *priv)
617 {
618 	__le32 *image;
619 	u32 len;
620 	int ret;
621 
622 	/* Try bootstrap */
623 	image = (__le32 *)priv->ucode_boot.v_addr;
624 	len = priv->ucode_boot.len;
625 	ret = iwlcore_verify_inst_sparse(priv, image, len);
626 	if (!ret) {
627 		IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
628 		return 0;
629 	}
630 
631 	/* Try initialize */
632 	image = (__le32 *)priv->ucode_init.v_addr;
633 	len = priv->ucode_init.len;
634 	ret = iwlcore_verify_inst_sparse(priv, image, len);
635 	if (!ret) {
636 		IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
637 		return 0;
638 	}
639 
640 	/* Try runtime/protocol */
641 	image = (__le32 *)priv->ucode_code.v_addr;
642 	len = priv->ucode_code.len;
643 	ret = iwlcore_verify_inst_sparse(priv, image, len);
644 	if (!ret) {
645 		IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
646 		return 0;
647 	}
648 
649 	IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
650 
651 	/* Since nothing seems to match, show first several data entries in
652 	 * instruction SRAM, so maybe visual inspection will give a clue.
653 	 * Selection of bootstrap image (vs. other images) is arbitrary. */
654 	image = (__le32 *)priv->ucode_boot.v_addr;
655 	len = priv->ucode_boot.len;
656 	ret = iwl_verify_inst_full(priv, image, len);
657 
658 	return ret;
659 }
660