1 /******************************************************************************
2  *
3  * GPL LICENSE SUMMARY
4  *
5  * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of version 2 of the GNU General Public License as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19  * USA
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29 
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 
35 #include "iwl-dev.h"
36 #include "iwl-core.h"
37 #include "iwl-sta.h"
38 #include "iwl-io.h"
39 #include "iwl-helpers.h"
40 #include "iwl-agn-hw.h"
41 #include "iwl-agn.h"
42 
43 /*
44  * mac80211 queues, ACs, hardware queues, FIFOs.
45  *
46  * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47  *
48  * Mac80211 uses the following numbers, which we get as from it
49  * by way of skb_get_queue_mapping(skb):
50  *
51  *	VO	0
52  *	VI	1
53  *	BE	2
54  *	BK	3
55  *
56  *
57  * Regular (not A-MPDU) frames are put into hardware queues corresponding
58  * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59  * own queue per aggregation session (RA/TID combination), such queues are
60  * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61  * order to map frames to the right queue, we also need an AC->hw queue
62  * mapping. This is implemented here.
63  *
64  * Due to the way hw queues are set up (by the hw specific modules like
65  * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
66  * mapping.
67  */
68 
69 static const u8 tid_to_ac[] = {
70 	IEEE80211_AC_BE,
71 	IEEE80211_AC_BK,
72 	IEEE80211_AC_BK,
73 	IEEE80211_AC_BE,
74 	IEEE80211_AC_VI,
75 	IEEE80211_AC_VI,
76 	IEEE80211_AC_VO,
77 	IEEE80211_AC_VO
78 };
79 
get_ac_from_tid(u16 tid)80 static inline int get_ac_from_tid(u16 tid)
81 {
82 	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 		return tid_to_ac[tid];
84 
85 	/* no support for TIDs 8-15 yet */
86 	return -EINVAL;
87 }
88 
get_fifo_from_tid(struct iwl_rxon_context * ctx,u16 tid)89 static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
90 {
91 	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
92 		return ctx->ac_to_fifo[tid_to_ac[tid]];
93 
94 	/* no support for TIDs 8-15 yet */
95 	return -EINVAL;
96 }
97 
98 /**
99  * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
100  */
iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv * priv,struct iwl_tx_queue * txq,u16 byte_cnt)101 void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
102 					    struct iwl_tx_queue *txq,
103 					    u16 byte_cnt)
104 {
105 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
106 	int write_ptr = txq->q.write_ptr;
107 	int txq_id = txq->q.id;
108 	u8 sec_ctl = 0;
109 	u8 sta_id = 0;
110 	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
111 	__le16 bc_ent;
112 
113 	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
114 
115 	if (txq_id != priv->cmd_queue) {
116 		sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
117 		sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
118 
119 		switch (sec_ctl & TX_CMD_SEC_MSK) {
120 		case TX_CMD_SEC_CCM:
121 			len += CCMP_MIC_LEN;
122 			break;
123 		case TX_CMD_SEC_TKIP:
124 			len += TKIP_ICV_LEN;
125 			break;
126 		case TX_CMD_SEC_WEP:
127 			len += WEP_IV_LEN + WEP_ICV_LEN;
128 			break;
129 		}
130 	}
131 
132 	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
133 
134 	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
135 
136 	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
137 		scd_bc_tbl[txq_id].
138 			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
139 }
140 
iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv * priv,struct iwl_tx_queue * txq)141 void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
142 					   struct iwl_tx_queue *txq)
143 {
144 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
145 	int txq_id = txq->q.id;
146 	int read_ptr = txq->q.read_ptr;
147 	u8 sta_id = 0;
148 	__le16 bc_ent;
149 
150 	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
151 
152 	if (txq_id != priv->cmd_queue)
153 		sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
154 
155 	bc_ent = cpu_to_le16(1 | (sta_id << 12));
156 	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
157 
158 	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
159 		scd_bc_tbl[txq_id].
160 			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
161 }
162 
iwlagn_tx_queue_set_q2ratid(struct iwl_priv * priv,u16 ra_tid,u16 txq_id)163 static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
164 					u16 txq_id)
165 {
166 	u32 tbl_dw_addr;
167 	u32 tbl_dw;
168 	u16 scd_q2ratid;
169 
170 	scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
171 
172 	tbl_dw_addr = priv->scd_base_addr +
173 			IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
174 
175 	tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
176 
177 	if (txq_id & 0x1)
178 		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
179 	else
180 		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
181 
182 	iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
183 
184 	return 0;
185 }
186 
iwlagn_tx_queue_stop_scheduler(struct iwl_priv * priv,u16 txq_id)187 static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
188 {
189 	/* Simply stop the queue, but don't change any configuration;
190 	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
191 	iwl_write_prph(priv,
192 		IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
193 		(0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
194 		(1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
195 }
196 
iwlagn_set_wr_ptrs(struct iwl_priv * priv,int txq_id,u32 index)197 void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
198 				int txq_id, u32 index)
199 {
200 	iwl_write_direct32(priv, HBUS_TARG_WRPTR,
201 			(index & 0xff) | (txq_id << 8));
202 	iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
203 }
204 
iwlagn_tx_queue_set_status(struct iwl_priv * priv,struct iwl_tx_queue * txq,int tx_fifo_id,int scd_retry)205 void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
206 					struct iwl_tx_queue *txq,
207 					int tx_fifo_id, int scd_retry)
208 {
209 	int txq_id = txq->q.id;
210 	int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
211 
212 	iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
213 			(active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
214 			(tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
215 			(1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
216 			IWLAGN_SCD_QUEUE_STTS_REG_MSK);
217 
218 	txq->sched_retry = scd_retry;
219 
220 	IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
221 		       active ? "Activate" : "Deactivate",
222 		       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
223 }
224 
iwlagn_txq_agg_enable(struct iwl_priv * priv,int txq_id,int tx_fifo,int sta_id,int tid,u16 ssn_idx)225 int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
226 			  int tx_fifo, int sta_id, int tid, u16 ssn_idx)
227 {
228 	unsigned long flags;
229 	u16 ra_tid;
230 	int ret;
231 
232 	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
233 	    (IWLAGN_FIRST_AMPDU_QUEUE +
234 		priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
235 		IWL_WARN(priv,
236 			"queue number out of range: %d, must be %d to %d\n",
237 			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
238 			IWLAGN_FIRST_AMPDU_QUEUE +
239 			priv->cfg->base_params->num_of_ampdu_queues - 1);
240 		return -EINVAL;
241 	}
242 
243 	ra_tid = BUILD_RAxTID(sta_id, tid);
244 
245 	/* Modify device's station table to Tx this TID */
246 	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
247 	if (ret)
248 		return ret;
249 
250 	spin_lock_irqsave(&priv->lock, flags);
251 
252 	/* Stop this Tx queue before configuring it */
253 	iwlagn_tx_queue_stop_scheduler(priv, txq_id);
254 
255 	/* Map receiver-address / traffic-ID to this queue */
256 	iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
257 
258 	/* Set this queue as a chain-building queue */
259 	iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
260 
261 	/* enable aggregations for the queue */
262 	iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
263 
264 	/* Place first TFD at index corresponding to start sequence number.
265 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
266 	priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
267 	priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
268 	iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
269 
270 	/* Set up Tx window size and frame limit for this queue */
271 	iwl_write_targ_mem(priv, priv->scd_base_addr +
272 			IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
273 			sizeof(u32),
274 			((SCD_WIN_SIZE <<
275 			IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
276 			IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
277 			((SCD_FRAME_LIMIT <<
278 			IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
279 			IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
280 
281 	iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
282 
283 	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
284 	iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
285 
286 	spin_unlock_irqrestore(&priv->lock, flags);
287 
288 	return 0;
289 }
290 
iwlagn_txq_agg_disable(struct iwl_priv * priv,u16 txq_id,u16 ssn_idx,u8 tx_fifo)291 int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
292 			   u16 ssn_idx, u8 tx_fifo)
293 {
294 	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
295 	    (IWLAGN_FIRST_AMPDU_QUEUE +
296 		priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
297 		IWL_ERR(priv,
298 			"queue number out of range: %d, must be %d to %d\n",
299 			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
300 			IWLAGN_FIRST_AMPDU_QUEUE +
301 			priv->cfg->base_params->num_of_ampdu_queues - 1);
302 		return -EINVAL;
303 	}
304 
305 	iwlagn_tx_queue_stop_scheduler(priv, txq_id);
306 
307 	iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
308 
309 	priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
310 	priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
311 	/* supposes that ssn_idx is valid (!= 0xFFF) */
312 	iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
313 
314 	iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
315 	iwl_txq_ctx_deactivate(priv, txq_id);
316 	iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
317 
318 	return 0;
319 }
320 
321 /*
322  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
323  * must be called under priv->lock and mac access
324  */
iwlagn_txq_set_sched(struct iwl_priv * priv,u32 mask)325 void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
326 {
327 	iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
328 }
329 
330 /*
331  * handle build REPLY_TX command notification.
332  */
iwlagn_tx_cmd_build_basic(struct iwl_priv * priv,struct sk_buff * skb,struct iwl_tx_cmd * tx_cmd,struct ieee80211_tx_info * info,struct ieee80211_hdr * hdr,u8 std_id)333 static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
334 					struct sk_buff *skb,
335 					struct iwl_tx_cmd *tx_cmd,
336 					struct ieee80211_tx_info *info,
337 					struct ieee80211_hdr *hdr,
338 					u8 std_id)
339 {
340 	__le16 fc = hdr->frame_control;
341 	__le32 tx_flags = tx_cmd->tx_flags;
342 
343 	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
344 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
345 		tx_flags |= TX_CMD_FLG_ACK_MSK;
346 		if (ieee80211_is_mgmt(fc))
347 			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
348 		if (ieee80211_is_probe_resp(fc) &&
349 		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
350 			tx_flags |= TX_CMD_FLG_TSF_MSK;
351 	} else {
352 		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
353 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
354 	}
355 
356 	if (ieee80211_is_back_req(fc))
357 		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
358 	else if (info->band == IEEE80211_BAND_2GHZ &&
359 		 priv->cfg->bt_params &&
360 		 priv->cfg->bt_params->advanced_bt_coexist &&
361 		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
362 		 ieee80211_is_reassoc_req(fc) ||
363 		 skb->protocol == cpu_to_be16(ETH_P_PAE)))
364 		tx_flags |= TX_CMD_FLG_IGNORE_BT;
365 
366 
367 	tx_cmd->sta_id = std_id;
368 	if (ieee80211_has_morefrags(fc))
369 		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
370 
371 	if (ieee80211_is_data_qos(fc)) {
372 		u8 *qc = ieee80211_get_qos_ctl(hdr);
373 		tx_cmd->tid_tspec = qc[0] & 0xf;
374 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
375 	} else {
376 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
377 	}
378 
379 	priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
380 
381 	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
382 	if (ieee80211_is_mgmt(fc)) {
383 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
384 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
385 		else
386 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
387 	} else {
388 		tx_cmd->timeout.pm_frame_timeout = 0;
389 	}
390 
391 	tx_cmd->driver_txop = 0;
392 	tx_cmd->tx_flags = tx_flags;
393 	tx_cmd->next_frame_len = 0;
394 }
395 
396 #define RTS_DFAULT_RETRY_LIMIT		60
397 
iwlagn_tx_cmd_build_rate(struct iwl_priv * priv,struct iwl_tx_cmd * tx_cmd,struct ieee80211_tx_info * info,__le16 fc)398 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
399 			      struct iwl_tx_cmd *tx_cmd,
400 			      struct ieee80211_tx_info *info,
401 			      __le16 fc)
402 {
403 	u32 rate_flags;
404 	int rate_idx;
405 	u8 rts_retry_limit;
406 	u8 data_retry_limit;
407 	u8 rate_plcp;
408 
409 	/* Set retry limit on DATA packets and Probe Responses*/
410 	if (ieee80211_is_probe_resp(fc))
411 		data_retry_limit = 3;
412 	else
413 		data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
414 	tx_cmd->data_retry_limit = data_retry_limit;
415 
416 	/* Set retry limit on RTS packets */
417 	rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
418 	if (data_retry_limit < rts_retry_limit)
419 		rts_retry_limit = data_retry_limit;
420 	tx_cmd->rts_retry_limit = rts_retry_limit;
421 
422 	/* DATA packets will use the uCode station table for rate/antenna
423 	 * selection */
424 	if (ieee80211_is_data(fc)) {
425 		tx_cmd->initial_rate_index = 0;
426 		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
427 		return;
428 	}
429 
430 	/**
431 	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
432 	 * not really a TX rate.  Thus, we use the lowest supported rate for
433 	 * this band.  Also use the lowest supported rate if the stored rate
434 	 * index is invalid.
435 	 */
436 	rate_idx = info->control.rates[0].idx;
437 	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
438 			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
439 		rate_idx = rate_lowest_index(&priv->bands[info->band],
440 				info->control.sta);
441 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
442 	if (info->band == IEEE80211_BAND_5GHZ)
443 		rate_idx += IWL_FIRST_OFDM_RATE;
444 	/* Get PLCP rate for tx_cmd->rate_n_flags */
445 	rate_plcp = iwl_rates[rate_idx].plcp;
446 	/* Zero out flags for this packet */
447 	rate_flags = 0;
448 
449 	/* Set CCK flag as needed */
450 	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
451 		rate_flags |= RATE_MCS_CCK_MSK;
452 
453 	/* Set up antennas */
454 	 if (priv->cfg->bt_params &&
455 	     priv->cfg->bt_params->advanced_bt_coexist &&
456 	     priv->bt_full_concurrent) {
457 		/* operated as 1x1 in full concurrency mode */
458 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
459 				first_antenna(priv->hw_params.valid_tx_ant));
460 	} else
461 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
462 					      priv->hw_params.valid_tx_ant);
463 	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
464 
465 	/* Set the rate in the TX cmd */
466 	tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
467 }
468 
iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv * priv,struct ieee80211_tx_info * info,struct iwl_tx_cmd * tx_cmd,struct sk_buff * skb_frag,int sta_id)469 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
470 				      struct ieee80211_tx_info *info,
471 				      struct iwl_tx_cmd *tx_cmd,
472 				      struct sk_buff *skb_frag,
473 				      int sta_id)
474 {
475 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
476 
477 	switch (keyconf->cipher) {
478 	case WLAN_CIPHER_SUITE_CCMP:
479 		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
480 		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
481 		if (info->flags & IEEE80211_TX_CTL_AMPDU)
482 			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
483 		IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
484 		break;
485 
486 	case WLAN_CIPHER_SUITE_TKIP:
487 		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
488 		ieee80211_get_tkip_key(keyconf, skb_frag,
489 			IEEE80211_TKIP_P2_KEY, tx_cmd->key);
490 		IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
491 		break;
492 
493 	case WLAN_CIPHER_SUITE_WEP104:
494 		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
495 		/* fall through */
496 	case WLAN_CIPHER_SUITE_WEP40:
497 		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
498 			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
499 
500 		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
501 
502 		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
503 			     "with key %d\n", keyconf->keyidx);
504 		break;
505 
506 	default:
507 		IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
508 		break;
509 	}
510 }
511 
512 /*
513  * start REPLY_TX command process
514  */
iwlagn_tx_skb(struct iwl_priv * priv,struct sk_buff * skb)515 int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
516 {
517 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
518 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
519 	struct ieee80211_sta *sta = info->control.sta;
520 	struct iwl_station_priv *sta_priv = NULL;
521 	struct iwl_tx_queue *txq;
522 	struct iwl_queue *q;
523 	struct iwl_device_cmd *out_cmd;
524 	struct iwl_cmd_meta *out_meta;
525 	struct iwl_tx_cmd *tx_cmd;
526 	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
527 	int txq_id;
528 	dma_addr_t phys_addr;
529 	dma_addr_t txcmd_phys;
530 	dma_addr_t scratch_phys;
531 	u16 len, firstlen, secondlen;
532 	u16 seq_number = 0;
533 	__le16 fc;
534 	u8 hdr_len;
535 	u8 sta_id;
536 	u8 wait_write_ptr = 0;
537 	u8 tid = 0;
538 	u8 *qc = NULL;
539 	unsigned long flags;
540 	bool is_agg = false;
541 
542 	/*
543 	 * If the frame needs to go out off-channel, then
544 	 * we'll have put the PAN context to that channel,
545 	 * so make the frame go out there.
546 	 */
547 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
548 		ctx = &priv->contexts[IWL_RXON_CTX_PAN];
549 	else if (info->control.vif)
550 		ctx = iwl_rxon_ctx_from_vif(info->control.vif);
551 
552 	spin_lock_irqsave(&priv->lock, flags);
553 	if (iwl_is_rfkill(priv)) {
554 		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
555 		goto drop_unlock;
556 	}
557 
558 	fc = hdr->frame_control;
559 
560 #ifdef CONFIG_IWLWIFI_DEBUG
561 	if (ieee80211_is_auth(fc))
562 		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
563 	else if (ieee80211_is_assoc_req(fc))
564 		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
565 	else if (ieee80211_is_reassoc_req(fc))
566 		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
567 #endif
568 
569 	hdr_len = ieee80211_hdrlen(fc);
570 
571 	/* For management frames use broadcast id to do not break aggregation */
572 	if (!ieee80211_is_data(fc))
573 		sta_id = ctx->bcast_sta_id;
574 	else {
575 		/* Find index into station table for destination station */
576 		sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
577 		if (sta_id == IWL_INVALID_STATION) {
578 			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
579 				       hdr->addr1);
580 			goto drop_unlock;
581 		}
582 	}
583 
584 	IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
585 
586 	if (sta)
587 		sta_priv = (void *)sta->drv_priv;
588 
589 	if (sta_priv && sta_priv->asleep &&
590 	    (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
591 		/*
592 		 * This sends an asynchronous command to the device,
593 		 * but we can rely on it being processed before the
594 		 * next frame is processed -- and the next frame to
595 		 * this station is the one that will consume this
596 		 * counter.
597 		 * For now set the counter to just 1 since we do not
598 		 * support uAPSD yet.
599 		 */
600 		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
601 	}
602 
603 	/*
604 	 * Send this frame after DTIM -- there's a special queue
605 	 * reserved for this for contexts that support AP mode.
606 	 */
607 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
608 		txq_id = ctx->mcast_queue;
609 		/*
610 		 * The microcode will clear the more data
611 		 * bit in the last frame it transmits.
612 		 */
613 		hdr->frame_control |=
614 			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
615 	} else
616 		txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
617 
618 	/* irqs already disabled/saved above when locking priv->lock */
619 	spin_lock(&priv->sta_lock);
620 
621 	if (ieee80211_is_data_qos(fc)) {
622 		qc = ieee80211_get_qos_ctl(hdr);
623 		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
624 		if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
625 			spin_unlock(&priv->sta_lock);
626 			goto drop_unlock;
627 		}
628 		seq_number = priv->stations[sta_id].tid[tid].seq_number;
629 		seq_number &= IEEE80211_SCTL_SEQ;
630 		hdr->seq_ctrl = hdr->seq_ctrl &
631 				cpu_to_le16(IEEE80211_SCTL_FRAG);
632 		hdr->seq_ctrl |= cpu_to_le16(seq_number);
633 		seq_number += 0x10;
634 		/* aggregation is on for this <sta,tid> */
635 		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
636 		    priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
637 			txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
638 			is_agg = true;
639 		}
640 	}
641 
642 	txq = &priv->txq[txq_id];
643 	q = &txq->q;
644 
645 	if (unlikely(iwl_queue_space(q) < q->high_mark)) {
646 		spin_unlock(&priv->sta_lock);
647 		goto drop_unlock;
648 	}
649 
650 	if (ieee80211_is_data_qos(fc)) {
651 		priv->stations[sta_id].tid[tid].tfds_in_queue++;
652 		if (!ieee80211_has_morefrags(fc))
653 			priv->stations[sta_id].tid[tid].seq_number = seq_number;
654 	}
655 
656 	spin_unlock(&priv->sta_lock);
657 
658 	/* Set up driver data for this TFD */
659 	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
660 	txq->txb[q->write_ptr].skb = skb;
661 	txq->txb[q->write_ptr].ctx = ctx;
662 
663 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
664 	out_cmd = txq->cmd[q->write_ptr];
665 	out_meta = &txq->meta[q->write_ptr];
666 	tx_cmd = &out_cmd->cmd.tx;
667 	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
668 	memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
669 
670 	/*
671 	 * Set up the Tx-command (not MAC!) header.
672 	 * Store the chosen Tx queue and TFD index within the sequence field;
673 	 * after Tx, uCode's Tx response will return this value so driver can
674 	 * locate the frame within the tx queue and do post-tx processing.
675 	 */
676 	out_cmd->hdr.cmd = REPLY_TX;
677 	out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
678 				INDEX_TO_SEQ(q->write_ptr)));
679 
680 	/* Copy MAC header from skb into command buffer */
681 	memcpy(tx_cmd->hdr, hdr, hdr_len);
682 
683 
684 	/* Total # bytes to be transmitted */
685 	len = (u16)skb->len;
686 	tx_cmd->len = cpu_to_le16(len);
687 
688 	if (info->control.hw_key)
689 		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
690 
691 	/* TODO need this for burst mode later on */
692 	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
693 	iwl_dbg_log_tx_data_frame(priv, len, hdr);
694 
695 	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
696 
697 	iwl_update_stats(priv, true, fc, len);
698 	/*
699 	 * Use the first empty entry in this queue's command buffer array
700 	 * to contain the Tx command and MAC header concatenated together
701 	 * (payload data will be in another buffer).
702 	 * Size of this varies, due to varying MAC header length.
703 	 * If end is not dword aligned, we'll have 2 extra bytes at the end
704 	 * of the MAC header (device reads on dword boundaries).
705 	 * We'll tell device about this padding later.
706 	 */
707 	len = sizeof(struct iwl_tx_cmd) +
708 		sizeof(struct iwl_cmd_header) + hdr_len;
709 	firstlen = (len + 3) & ~3;
710 
711 	/* Tell NIC about any 2-byte padding after MAC header */
712 	if (firstlen != len)
713 		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
714 
715 	/* Physical address of this Tx command's header (not MAC header!),
716 	 * within command buffer array. */
717 	txcmd_phys = pci_map_single(priv->pci_dev,
718 				    &out_cmd->hdr, firstlen,
719 				    PCI_DMA_BIDIRECTIONAL);
720 	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
721 	dma_unmap_len_set(out_meta, len, firstlen);
722 	/* Add buffer containing Tx command and MAC(!) header to TFD's
723 	 * first entry */
724 	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
725 						   txcmd_phys, firstlen, 1, 0);
726 
727 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
728 		txq->need_update = 1;
729 	} else {
730 		wait_write_ptr = 1;
731 		txq->need_update = 0;
732 	}
733 
734 	/* Set up TFD's 2nd entry to point directly to remainder of skb,
735 	 * if any (802.11 null frames have no payload). */
736 	secondlen = skb->len - hdr_len;
737 	if (secondlen > 0) {
738 		phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
739 					   secondlen, PCI_DMA_TODEVICE);
740 		priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
741 							   phys_addr, secondlen,
742 							   0, 0);
743 	}
744 
745 	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
746 				offsetof(struct iwl_tx_cmd, scratch);
747 
748 	/* take back ownership of DMA buffer to enable update */
749 	pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
750 				    firstlen, PCI_DMA_BIDIRECTIONAL);
751 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
752 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
753 
754 	IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
755 		     le16_to_cpu(out_cmd->hdr.sequence));
756 	IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
757 	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
758 	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
759 
760 	/* Set up entry for this TFD in Tx byte-count array */
761 	if (info->flags & IEEE80211_TX_CTL_AMPDU)
762 		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
763 						     le16_to_cpu(tx_cmd->len));
764 
765 	pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
766 				       firstlen, PCI_DMA_BIDIRECTIONAL);
767 
768 	trace_iwlwifi_dev_tx(priv,
769 			     &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
770 			     sizeof(struct iwl_tfd),
771 			     &out_cmd->hdr, firstlen,
772 			     skb->data + hdr_len, secondlen);
773 
774 	/* Tell device the write index *just past* this latest filled TFD */
775 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
776 	iwl_txq_update_write_ptr(priv, txq);
777 	spin_unlock_irqrestore(&priv->lock, flags);
778 
779 	/*
780 	 * At this point the frame is "transmitted" successfully
781 	 * and we will get a TX status notification eventually,
782 	 * regardless of the value of ret. "ret" only indicates
783 	 * whether or not we should update the write pointer.
784 	 */
785 
786 	/*
787 	 * Avoid atomic ops if it isn't an associated client.
788 	 * Also, if this is a packet for aggregation, don't
789 	 * increase the counter because the ucode will stop
790 	 * aggregation queues when their respective station
791 	 * goes to sleep.
792 	 */
793 	if (sta_priv && sta_priv->client && !is_agg)
794 		atomic_inc(&sta_priv->pending_frames);
795 
796 	if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
797 		if (wait_write_ptr) {
798 			spin_lock_irqsave(&priv->lock, flags);
799 			txq->need_update = 1;
800 			iwl_txq_update_write_ptr(priv, txq);
801 			spin_unlock_irqrestore(&priv->lock, flags);
802 		} else {
803 			iwl_stop_queue(priv, txq);
804 		}
805 	}
806 
807 	return 0;
808 
809 drop_unlock:
810 	spin_unlock_irqrestore(&priv->lock, flags);
811 	return -1;
812 }
813 
iwlagn_alloc_dma_ptr(struct iwl_priv * priv,struct iwl_dma_ptr * ptr,size_t size)814 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
815 				    struct iwl_dma_ptr *ptr, size_t size)
816 {
817 	ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
818 				       GFP_KERNEL);
819 	if (!ptr->addr)
820 		return -ENOMEM;
821 	ptr->size = size;
822 	return 0;
823 }
824 
iwlagn_free_dma_ptr(struct iwl_priv * priv,struct iwl_dma_ptr * ptr)825 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
826 				    struct iwl_dma_ptr *ptr)
827 {
828 	if (unlikely(!ptr->addr))
829 		return;
830 
831 	dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
832 	memset(ptr, 0, sizeof(*ptr));
833 }
834 
835 /**
836  * iwlagn_hw_txq_ctx_free - Free TXQ Context
837  *
838  * Destroy all TX DMA queues and structures
839  */
iwlagn_hw_txq_ctx_free(struct iwl_priv * priv)840 void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
841 {
842 	int txq_id;
843 
844 	/* Tx queues */
845 	if (priv->txq) {
846 		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
847 			if (txq_id == priv->cmd_queue)
848 				iwl_cmd_queue_free(priv);
849 			else
850 				iwl_tx_queue_free(priv, txq_id);
851 	}
852 	iwlagn_free_dma_ptr(priv, &priv->kw);
853 
854 	iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
855 
856 	/* free tx queue structure */
857 	iwl_free_txq_mem(priv);
858 }
859 
860 /**
861  * iwlagn_txq_ctx_alloc - allocate TX queue context
862  * Allocate all Tx DMA structures and initialize them
863  *
864  * @param priv
865  * @return error code
866  */
iwlagn_txq_ctx_alloc(struct iwl_priv * priv)867 int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
868 {
869 	int ret;
870 	int txq_id, slots_num;
871 	unsigned long flags;
872 
873 	/* Free all tx/cmd queues and keep-warm buffer */
874 	iwlagn_hw_txq_ctx_free(priv);
875 
876 	ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
877 				priv->hw_params.scd_bc_tbls_size);
878 	if (ret) {
879 		IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
880 		goto error_bc_tbls;
881 	}
882 	/* Alloc keep-warm buffer */
883 	ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
884 	if (ret) {
885 		IWL_ERR(priv, "Keep Warm allocation failed\n");
886 		goto error_kw;
887 	}
888 
889 	/* allocate tx queue structure */
890 	ret = iwl_alloc_txq_mem(priv);
891 	if (ret)
892 		goto error;
893 
894 	spin_lock_irqsave(&priv->lock, flags);
895 
896 	/* Turn off all Tx DMA fifos */
897 	priv->cfg->ops->lib->txq_set_sched(priv, 0);
898 
899 	/* Tell NIC where to find the "keep warm" buffer */
900 	iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
901 
902 	spin_unlock_irqrestore(&priv->lock, flags);
903 
904 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
905 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
906 		slots_num = (txq_id == priv->cmd_queue) ?
907 					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
908 		ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
909 				       txq_id);
910 		if (ret) {
911 			IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
912 			goto error;
913 		}
914 	}
915 
916 	return ret;
917 
918  error:
919 	iwlagn_hw_txq_ctx_free(priv);
920 	iwlagn_free_dma_ptr(priv, &priv->kw);
921  error_kw:
922 	iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
923  error_bc_tbls:
924 	return ret;
925 }
926 
iwlagn_txq_ctx_reset(struct iwl_priv * priv)927 void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
928 {
929 	int txq_id, slots_num;
930 	unsigned long flags;
931 
932 	spin_lock_irqsave(&priv->lock, flags);
933 
934 	/* Turn off all Tx DMA fifos */
935 	priv->cfg->ops->lib->txq_set_sched(priv, 0);
936 
937 	/* Tell NIC where to find the "keep warm" buffer */
938 	iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
939 
940 	spin_unlock_irqrestore(&priv->lock, flags);
941 
942 	/* Alloc and init all Tx queues, including the command queue (#4) */
943 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
944 		slots_num = txq_id == priv->cmd_queue ?
945 			    TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
946 		iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
947 	}
948 }
949 
950 /**
951  * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
952  */
iwlagn_txq_ctx_stop(struct iwl_priv * priv)953 void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
954 {
955 	int ch, txq_id;
956 	unsigned long flags;
957 
958 	/* Turn off all Tx DMA fifos */
959 	spin_lock_irqsave(&priv->lock, flags);
960 
961 	priv->cfg->ops->lib->txq_set_sched(priv, 0);
962 
963 	/* Stop each Tx DMA channel, and wait for it to be idle */
964 	for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
965 		iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
966 		if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
967 				    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
968 				    1000))
969 			IWL_ERR(priv, "Failing on timeout while stopping"
970 			    " DMA channel %d [0x%08x]", ch,
971 			    iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
972 	}
973 	spin_unlock_irqrestore(&priv->lock, flags);
974 
975 	if (!priv->txq)
976 		return;
977 
978 	/* Unmap DMA from host system and free skb's */
979 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
980 		if (txq_id == priv->cmd_queue)
981 			iwl_cmd_queue_unmap(priv);
982 		else
983 			iwl_tx_queue_unmap(priv, txq_id);
984 }
985 
986 /*
987  * Find first available (lowest unused) Tx Queue, mark it "active".
988  * Called only when finding queue for aggregation.
989  * Should never return anything < 7, because they should already
990  * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
991  */
iwlagn_txq_ctx_activate_free(struct iwl_priv * priv)992 static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
993 {
994 	int txq_id;
995 
996 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
997 		if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
998 			return txq_id;
999 	return -1;
1000 }
1001 
iwlagn_tx_agg_start(struct iwl_priv * priv,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 * ssn)1002 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1003 			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
1004 {
1005 	int sta_id;
1006 	int tx_fifo;
1007 	int txq_id;
1008 	int ret;
1009 	unsigned long flags;
1010 	struct iwl_tid_data *tid_data;
1011 
1012 	tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1013 	if (unlikely(tx_fifo < 0))
1014 		return tx_fifo;
1015 
1016 	IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1017 			__func__, sta->addr, tid);
1018 
1019 	sta_id = iwl_sta_id(sta);
1020 	if (sta_id == IWL_INVALID_STATION) {
1021 		IWL_ERR(priv, "Start AGG on invalid station\n");
1022 		return -ENXIO;
1023 	}
1024 	if (unlikely(tid >= MAX_TID_COUNT))
1025 		return -EINVAL;
1026 
1027 	if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1028 		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1029 		return -ENXIO;
1030 	}
1031 
1032 	txq_id = iwlagn_txq_ctx_activate_free(priv);
1033 	if (txq_id == -1) {
1034 		IWL_ERR(priv, "No free aggregation queue available\n");
1035 		return -ENXIO;
1036 	}
1037 
1038 	spin_lock_irqsave(&priv->sta_lock, flags);
1039 	tid_data = &priv->stations[sta_id].tid[tid];
1040 	*ssn = SEQ_TO_SN(tid_data->seq_number);
1041 	tid_data->agg.txq_id = txq_id;
1042 	iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
1043 	spin_unlock_irqrestore(&priv->sta_lock, flags);
1044 
1045 	ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1046 						  sta_id, tid, *ssn);
1047 	if (ret)
1048 		return ret;
1049 
1050 	spin_lock_irqsave(&priv->sta_lock, flags);
1051 	tid_data = &priv->stations[sta_id].tid[tid];
1052 	if (tid_data->tfds_in_queue == 0) {
1053 		IWL_DEBUG_HT(priv, "HW queue is empty\n");
1054 		tid_data->agg.state = IWL_AGG_ON;
1055 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1056 	} else {
1057 		IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1058 			     tid_data->tfds_in_queue);
1059 		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1060 	}
1061 	spin_unlock_irqrestore(&priv->sta_lock, flags);
1062 	return ret;
1063 }
1064 
iwlagn_tx_agg_stop(struct iwl_priv * priv,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid)1065 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1066 		       struct ieee80211_sta *sta, u16 tid)
1067 {
1068 	int tx_fifo_id, txq_id, sta_id, ssn;
1069 	struct iwl_tid_data *tid_data;
1070 	int write_ptr, read_ptr;
1071 	unsigned long flags;
1072 
1073 	tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1074 	if (unlikely(tx_fifo_id < 0))
1075 		return tx_fifo_id;
1076 
1077 	sta_id = iwl_sta_id(sta);
1078 
1079 	if (sta_id == IWL_INVALID_STATION) {
1080 		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1081 		return -ENXIO;
1082 	}
1083 
1084 	spin_lock_irqsave(&priv->sta_lock, flags);
1085 
1086 	tid_data = &priv->stations[sta_id].tid[tid];
1087 	ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1088 	txq_id = tid_data->agg.txq_id;
1089 
1090 	switch (priv->stations[sta_id].tid[tid].agg.state) {
1091 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
1092 		/*
1093 		 * This can happen if the peer stops aggregation
1094 		 * again before we've had a chance to drain the
1095 		 * queue we selected previously, i.e. before the
1096 		 * session was really started completely.
1097 		 */
1098 		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1099 		goto turn_off;
1100 	case IWL_AGG_ON:
1101 		break;
1102 	default:
1103 		IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1104 	}
1105 
1106 	write_ptr = priv->txq[txq_id].q.write_ptr;
1107 	read_ptr = priv->txq[txq_id].q.read_ptr;
1108 
1109 	/* The queue is not empty */
1110 	if (write_ptr != read_ptr) {
1111 		IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1112 		priv->stations[sta_id].tid[tid].agg.state =
1113 				IWL_EMPTYING_HW_QUEUE_DELBA;
1114 		spin_unlock_irqrestore(&priv->sta_lock, flags);
1115 		return 0;
1116 	}
1117 
1118 	IWL_DEBUG_HT(priv, "HW queue is empty\n");
1119  turn_off:
1120 	priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1121 
1122 	/* do not restore/save irqs */
1123 	spin_unlock(&priv->sta_lock);
1124 	spin_lock(&priv->lock);
1125 
1126 	/*
1127 	 * the only reason this call can fail is queue number out of range,
1128 	 * which can happen if uCode is reloaded and all the station
1129 	 * information are lost. if it is outside the range, there is no need
1130 	 * to deactivate the uCode queue, just return "success" to allow
1131 	 *  mac80211 to clean up it own data.
1132 	 */
1133 	priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1134 						   tx_fifo_id);
1135 	spin_unlock_irqrestore(&priv->lock, flags);
1136 
1137 	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1138 
1139 	return 0;
1140 }
1141 
iwlagn_txq_check_empty(struct iwl_priv * priv,int sta_id,u8 tid,int txq_id)1142 int iwlagn_txq_check_empty(struct iwl_priv *priv,
1143 			   int sta_id, u8 tid, int txq_id)
1144 {
1145 	struct iwl_queue *q = &priv->txq[txq_id].q;
1146 	u8 *addr = priv->stations[sta_id].sta.sta.addr;
1147 	struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1148 	struct iwl_rxon_context *ctx;
1149 
1150 	ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1151 
1152 	lockdep_assert_held(&priv->sta_lock);
1153 
1154 	switch (priv->stations[sta_id].tid[tid].agg.state) {
1155 	case IWL_EMPTYING_HW_QUEUE_DELBA:
1156 		/* We are reclaiming the last packet of the */
1157 		/* aggregated HW queue */
1158 		if ((txq_id  == tid_data->agg.txq_id) &&
1159 		    (q->read_ptr == q->write_ptr)) {
1160 			u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1161 			int tx_fifo = get_fifo_from_tid(ctx, tid);
1162 			IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1163 			priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1164 							     ssn, tx_fifo);
1165 			tid_data->agg.state = IWL_AGG_OFF;
1166 			ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1167 		}
1168 		break;
1169 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
1170 		/* We are reclaiming the last packet of the queue */
1171 		if (tid_data->tfds_in_queue == 0) {
1172 			IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1173 			tid_data->agg.state = IWL_AGG_ON;
1174 			ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1175 		}
1176 		break;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
iwlagn_non_agg_tx_status(struct iwl_priv * priv,struct iwl_rxon_context * ctx,const u8 * addr1)1182 static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
1183 				     struct iwl_rxon_context *ctx,
1184 				     const u8 *addr1)
1185 {
1186 	struct ieee80211_sta *sta;
1187 	struct iwl_station_priv *sta_priv;
1188 
1189 	rcu_read_lock();
1190 	sta = ieee80211_find_sta(ctx->vif, addr1);
1191 	if (sta) {
1192 		sta_priv = (void *)sta->drv_priv;
1193 		/* avoid atomic ops if this isn't a client */
1194 		if (sta_priv->client &&
1195 		    atomic_dec_return(&sta_priv->pending_frames) == 0)
1196 			ieee80211_sta_block_awake(priv->hw, sta, false);
1197 	}
1198 	rcu_read_unlock();
1199 }
1200 
iwlagn_tx_status(struct iwl_priv * priv,struct iwl_tx_info * tx_info,bool is_agg)1201 static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1202 			     bool is_agg)
1203 {
1204 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1205 
1206 	if (!is_agg)
1207 		iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1208 
1209 	ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1210 }
1211 
iwlagn_tx_queue_reclaim(struct iwl_priv * priv,int txq_id,int index)1212 int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1213 {
1214 	struct iwl_tx_queue *txq = &priv->txq[txq_id];
1215 	struct iwl_queue *q = &txq->q;
1216 	struct iwl_tx_info *tx_info;
1217 	int nfreed = 0;
1218 	struct ieee80211_hdr *hdr;
1219 
1220 	if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1221 		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1222 			  "is out of range [0-%d] %d %d.\n", txq_id,
1223 			  index, q->n_bd, q->write_ptr, q->read_ptr);
1224 		return 0;
1225 	}
1226 
1227 	for (index = iwl_queue_inc_wrap(index, q->n_bd);
1228 	     q->read_ptr != index;
1229 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1230 
1231 		tx_info = &txq->txb[txq->q.read_ptr];
1232 
1233 		if (WARN_ON_ONCE(tx_info->skb == NULL))
1234 			continue;
1235 
1236 		hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1237 		if (ieee80211_is_data_qos(hdr->frame_control))
1238 			nfreed++;
1239 
1240 		iwlagn_tx_status(priv, tx_info,
1241 				 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1242 		tx_info->skb = NULL;
1243 
1244 		if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1245 			priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1246 
1247 		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1248 	}
1249 	return nfreed;
1250 }
1251 
1252 /**
1253  * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
1254  *
1255  * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1256  * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
1257  */
iwlagn_tx_status_reply_compressed_ba(struct iwl_priv * priv,struct iwl_ht_agg * agg,struct iwl_compressed_ba_resp * ba_resp)1258 static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1259 				 struct iwl_ht_agg *agg,
1260 				 struct iwl_compressed_ba_resp *ba_resp)
1261 
1262 {
1263 	int i, sh, ack;
1264 	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1265 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1266 	int successes = 0;
1267 	struct ieee80211_tx_info *info;
1268 
1269 	if (unlikely(!agg->wait_for_ba))  {
1270 		if (unlikely(ba_resp->bitmap))
1271 			IWL_ERR(priv, "Received BA when not expected\n");
1272 		return -EINVAL;
1273 	}
1274 
1275 	/* Mark that the expected block-ack response arrived */
1276 	agg->wait_for_ba = 0;
1277 	IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1278 
1279 	/* Calculate shift to align block-ack bits with our Tx window bits */
1280 	sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1281 	if (sh < 0) /* tbw something is wrong with indices */
1282 		sh += 0x100;
1283 
1284 	if (agg->frame_count > (64 - sh)) {
1285 		IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1286 		return -1;
1287 	}
1288 	if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
1289 		/*
1290 		 * sent and ack information provided by uCode
1291 		 * use it instead of figure out ourself
1292 		 */
1293 		if (ba_resp->txed_2_done > ba_resp->txed) {
1294 			IWL_DEBUG_TX_REPLY(priv,
1295 				"bogus sent(%d) and ack(%d) count\n",
1296 				ba_resp->txed, ba_resp->txed_2_done);
1297 			/*
1298 			 * set txed_2_done = txed,
1299 			 * so it won't impact rate scale
1300 			 */
1301 			ba_resp->txed = ba_resp->txed_2_done;
1302 		}
1303 		IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1304 				ba_resp->txed, ba_resp->txed_2_done);
1305 	} else {
1306 		u64 bitmap, sent_bitmap;
1307 
1308 		/* don't use 64-bit values for now */
1309 		bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1310 
1311 		/* check for success or failure according to the
1312 		 * transmitted bitmap and block-ack bitmap */
1313 		sent_bitmap = bitmap & agg->bitmap;
1314 
1315 		/* For each frame attempted in aggregation,
1316 		 * update driver's record of tx frame's status. */
1317 		i = 0;
1318 		while (sent_bitmap) {
1319 			ack = sent_bitmap & 1ULL;
1320 			successes += ack;
1321 			IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1322 				ack ? "ACK" : "NACK", i,
1323 				(agg->start_idx + i) & 0xff,
1324 				agg->start_idx + i);
1325 			sent_bitmap >>= 1;
1326 			++i;
1327 		}
1328 
1329 		IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1330 				   (unsigned long long)bitmap);
1331 	}
1332 
1333 	info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1334 	memset(&info->status, 0, sizeof(info->status));
1335 	info->flags |= IEEE80211_TX_STAT_ACK;
1336 	info->flags |= IEEE80211_TX_STAT_AMPDU;
1337 	if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
1338 		info->status.ampdu_ack_len = ba_resp->txed_2_done;
1339 		info->status.ampdu_len = ba_resp->txed;
1340 
1341 	} else {
1342 		info->status.ampdu_ack_len = successes;
1343 		info->status.ampdu_len = agg->frame_count;
1344 	}
1345 	iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1346 
1347 	return 0;
1348 }
1349 
1350 /**
1351  * translate ucode response to mac80211 tx status control values
1352  */
iwlagn_hwrate_to_tx_control(struct iwl_priv * priv,u32 rate_n_flags,struct ieee80211_tx_info * info)1353 void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1354 				  struct ieee80211_tx_info *info)
1355 {
1356 	struct ieee80211_tx_rate *r = &info->control.rates[0];
1357 
1358 	info->antenna_sel_tx =
1359 		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1360 	if (rate_n_flags & RATE_MCS_HT_MSK)
1361 		r->flags |= IEEE80211_TX_RC_MCS;
1362 	if (rate_n_flags & RATE_MCS_GF_MSK)
1363 		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1364 	if (rate_n_flags & RATE_MCS_HT40_MSK)
1365 		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1366 	if (rate_n_flags & RATE_MCS_DUP_MSK)
1367 		r->flags |= IEEE80211_TX_RC_DUP_DATA;
1368 	if (rate_n_flags & RATE_MCS_SGI_MSK)
1369 		r->flags |= IEEE80211_TX_RC_SHORT_GI;
1370 	r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1371 }
1372 
1373 /**
1374  * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1375  *
1376  * Handles block-acknowledge notification from device, which reports success
1377  * of frames sent via aggregation.
1378  */
iwlagn_rx_reply_compressed_ba(struct iwl_priv * priv,struct iwl_rx_mem_buffer * rxb)1379 void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1380 					   struct iwl_rx_mem_buffer *rxb)
1381 {
1382 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1383 	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1384 	struct iwl_tx_queue *txq = NULL;
1385 	struct iwl_ht_agg *agg;
1386 	int index;
1387 	int sta_id;
1388 	int tid;
1389 	unsigned long flags;
1390 
1391 	/* "flow" corresponds to Tx queue */
1392 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1393 
1394 	/* "ssn" is start of block-ack Tx window, corresponds to index
1395 	 * (in Tx queue's circular buffer) of first TFD/frame in window */
1396 	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1397 
1398 	if (scd_flow >= priv->hw_params.max_txq_num) {
1399 		IWL_ERR(priv,
1400 			"BUG_ON scd_flow is bigger than number of queues\n");
1401 		return;
1402 	}
1403 
1404 	txq = &priv->txq[scd_flow];
1405 	sta_id = ba_resp->sta_id;
1406 	tid = ba_resp->tid;
1407 	agg = &priv->stations[sta_id].tid[tid].agg;
1408 	if (unlikely(agg->txq_id != scd_flow)) {
1409 		/*
1410 		 * FIXME: this is a uCode bug which need to be addressed,
1411 		 * log the information and return for now!
1412 		 * since it is possible happen very often and in order
1413 		 * not to fill the syslog, don't enable the logging by default
1414 		 */
1415 		IWL_DEBUG_TX_REPLY(priv,
1416 			"BA scd_flow %d does not match txq_id %d\n",
1417 			scd_flow, agg->txq_id);
1418 		return;
1419 	}
1420 
1421 	/* Find index just before block-ack window */
1422 	index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1423 
1424 	spin_lock_irqsave(&priv->sta_lock, flags);
1425 
1426 	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1427 			   "sta_id = %d\n",
1428 			   agg->wait_for_ba,
1429 			   (u8 *) &ba_resp->sta_addr_lo32,
1430 			   ba_resp->sta_id);
1431 	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1432 			   "%d, scd_ssn = %d\n",
1433 			   ba_resp->tid,
1434 			   ba_resp->seq_ctl,
1435 			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1436 			   ba_resp->scd_flow,
1437 			   ba_resp->scd_ssn);
1438 	IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1439 			   agg->start_idx,
1440 			   (unsigned long long)agg->bitmap);
1441 
1442 	/* Update driver's record of ACK vs. not for each frame in window */
1443 	iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1444 
1445 	/* Release all TFDs before the SSN, i.e. all TFDs in front of
1446 	 * block-ack window (we assume that they've been successfully
1447 	 * transmitted ... if not, it's too late anyway). */
1448 	if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1449 		/* calculate mac80211 ampdu sw queue to wake */
1450 		int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
1451 		iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1452 
1453 		if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1454 		    priv->mac80211_registered &&
1455 		    (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1456 			iwl_wake_queue(priv, txq);
1457 
1458 		iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1459 	}
1460 
1461 	spin_unlock_irqrestore(&priv->sta_lock, flags);
1462 }
1463 
1464 #ifdef CONFIG_IWLWIFI_DEBUG
iwl_get_tx_fail_reason(u32 status)1465 const char *iwl_get_tx_fail_reason(u32 status)
1466 {
1467 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1468 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1469 
1470 	switch (status & TX_STATUS_MSK) {
1471 	case TX_STATUS_SUCCESS:
1472 		return "SUCCESS";
1473 	TX_STATUS_POSTPONE(DELAY);
1474 	TX_STATUS_POSTPONE(FEW_BYTES);
1475 	TX_STATUS_POSTPONE(BT_PRIO);
1476 	TX_STATUS_POSTPONE(QUIET_PERIOD);
1477 	TX_STATUS_POSTPONE(CALC_TTAK);
1478 	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1479 	TX_STATUS_FAIL(SHORT_LIMIT);
1480 	TX_STATUS_FAIL(LONG_LIMIT);
1481 	TX_STATUS_FAIL(FIFO_UNDERRUN);
1482 	TX_STATUS_FAIL(DRAIN_FLOW);
1483 	TX_STATUS_FAIL(RFKILL_FLUSH);
1484 	TX_STATUS_FAIL(LIFE_EXPIRE);
1485 	TX_STATUS_FAIL(DEST_PS);
1486 	TX_STATUS_FAIL(HOST_ABORTED);
1487 	TX_STATUS_FAIL(BT_RETRY);
1488 	TX_STATUS_FAIL(STA_INVALID);
1489 	TX_STATUS_FAIL(FRAG_DROPPED);
1490 	TX_STATUS_FAIL(TID_DISABLE);
1491 	TX_STATUS_FAIL(FIFO_FLUSHED);
1492 	TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1493 	TX_STATUS_FAIL(PASSIVE_NO_RX);
1494 	TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1495 	}
1496 
1497 	return "UNKNOWN";
1498 
1499 #undef TX_STATUS_FAIL
1500 #undef TX_STATUS_POSTPONE
1501 }
1502 #endif /* CONFIG_IWLWIFI_DEBUG */
1503