1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/dma-mapping.h>
18 #include "ath9k.h"
19 #include "ar9003_mac.h"
20 
21 #define BITS_PER_BYTE           8
22 #define OFDM_PLCP_BITS          22
23 #define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
24 #define L_STF                   8
25 #define L_LTF                   8
26 #define L_SIG                   4
27 #define HT_SIG                  8
28 #define HT_STF                  4
29 #define HT_LTF(_ns)             (4 * (_ns))
30 #define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 
35 
36 static u16 bits_per_symbol[][2] = {
37 	/* 20MHz 40MHz */
38 	{    26,   54 },     /*  0: BPSK */
39 	{    52,  108 },     /*  1: QPSK 1/2 */
40 	{    78,  162 },     /*  2: QPSK 3/4 */
41 	{   104,  216 },     /*  3: 16-QAM 1/2 */
42 	{   156,  324 },     /*  4: 16-QAM 3/4 */
43 	{   208,  432 },     /*  5: 64-QAM 2/3 */
44 	{   234,  486 },     /*  6: 64-QAM 3/4 */
45 	{   260,  540 },     /*  7: 64-QAM 5/6 */
46 };
47 
48 #define IS_HT_RATE(_rate)     ((_rate) & 0x80)
49 
50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 			       struct ath_atx_tid *tid, struct sk_buff *skb);
52 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 			    int tx_flags, struct ath_txq *txq);
54 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 				struct ath_txq *txq, struct list_head *bf_q,
56 				struct ath_tx_status *ts, int txok);
57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 			     struct list_head *head, bool internal);
59 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 			     struct ath_tx_status *ts, int nframes, int nbad,
61 			     int txok);
62 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 			      int seqno);
64 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 					   struct ath_txq *txq,
66 					   struct ath_atx_tid *tid,
67 					   struct sk_buff *skb);
68 
69 enum {
70 	MCS_HT20,
71 	MCS_HT20_SGI,
72 	MCS_HT40,
73 	MCS_HT40_SGI,
74 };
75 
76 static int ath_max_4ms_framelen[4][32] = {
77 	[MCS_HT20] = {
78 		3212,  6432,  9648,  12864,  19300,  25736,  28952,  32172,
79 		6424,  12852, 19280, 25708,  38568,  51424,  57852,  64280,
80 		9628,  19260, 28896, 38528,  57792,  65532,  65532,  65532,
81 		12828, 25656, 38488, 51320,  65532,  65532,  65532,  65532,
82 	},
83 	[MCS_HT20_SGI] = {
84 		3572,  7144,  10720,  14296,  21444,  28596,  32172,  35744,
85 		7140,  14284, 21428,  28568,  42856,  57144,  64288,  65532,
86 		10700, 21408, 32112,  42816,  64228,  65532,  65532,  65532,
87 		14256, 28516, 42780,  57040,  65532,  65532,  65532,  65532,
88 	},
89 	[MCS_HT40] = {
90 		6680,  13360,  20044,  26724,  40092,  53456,  60140,  65532,
91 		13348, 26700,  40052,  53400,  65532,  65532,  65532,  65532,
92 		20004, 40008,  60016,  65532,  65532,  65532,  65532,  65532,
93 		26644, 53292,  65532,  65532,  65532,  65532,  65532,  65532,
94 	},
95 	[MCS_HT40_SGI] = {
96 		7420,  14844,  22272,  29696,  44544,  59396,  65532,  65532,
97 		14832, 29668,  44504,  59340,  65532,  65532,  65532,  65532,
98 		22232, 44464,  65532,  65532,  65532,  65532,  65532,  65532,
99 		29616, 59232,  65532,  65532,  65532,  65532,  65532,  65532,
100 	}
101 };
102 
103 /*********************/
104 /* Aggregation logic */
105 /*********************/
106 
ath_txq_lock(struct ath_softc * sc,struct ath_txq * txq)107 static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
108 	__acquires(&txq->axq_lock)
109 {
110 	spin_lock_bh(&txq->axq_lock);
111 }
112 
ath_txq_unlock(struct ath_softc * sc,struct ath_txq * txq)113 static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
114 	__releases(&txq->axq_lock)
115 {
116 	spin_unlock_bh(&txq->axq_lock);
117 }
118 
ath_txq_unlock_complete(struct ath_softc * sc,struct ath_txq * txq)119 static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
120 	__releases(&txq->axq_lock)
121 {
122 	struct sk_buff_head q;
123 	struct sk_buff *skb;
124 
125 	__skb_queue_head_init(&q);
126 	skb_queue_splice_init(&txq->complete_q, &q);
127 	spin_unlock_bh(&txq->axq_lock);
128 
129 	while ((skb = __skb_dequeue(&q)))
130 		ieee80211_tx_status(sc->hw, skb);
131 }
132 
ath_tx_queue_tid(struct ath_txq * txq,struct ath_atx_tid * tid)133 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
134 {
135 	struct ath_atx_ac *ac = tid->ac;
136 
137 	if (tid->paused)
138 		return;
139 
140 	if (tid->sched)
141 		return;
142 
143 	tid->sched = true;
144 	list_add_tail(&tid->list, &ac->tid_q);
145 
146 	if (ac->sched)
147 		return;
148 
149 	ac->sched = true;
150 	list_add_tail(&ac->list, &txq->axq_acq);
151 }
152 
ath_tx_resume_tid(struct ath_softc * sc,struct ath_atx_tid * tid)153 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154 {
155 	struct ath_txq *txq = tid->ac->txq;
156 
157 	WARN_ON(!tid->paused);
158 
159 	ath_txq_lock(sc, txq);
160 	tid->paused = false;
161 
162 	if (skb_queue_empty(&tid->buf_q))
163 		goto unlock;
164 
165 	ath_tx_queue_tid(txq, tid);
166 	ath_txq_schedule(sc, txq);
167 unlock:
168 	ath_txq_unlock_complete(sc, txq);
169 }
170 
get_frame_info(struct sk_buff * skb)171 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
172 {
173 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
174 	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
175 		     sizeof(tx_info->rate_driver_data));
176 	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
177 }
178 
ath_send_bar(struct ath_atx_tid * tid,u16 seqno)179 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
180 {
181 	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
182 			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
183 }
184 
ath_tx_flush_tid(struct ath_softc * sc,struct ath_atx_tid * tid)185 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
186 {
187 	struct ath_txq *txq = tid->ac->txq;
188 	struct sk_buff *skb;
189 	struct ath_buf *bf;
190 	struct list_head bf_head;
191 	struct ath_tx_status ts;
192 	struct ath_frame_info *fi;
193 	bool sendbar = false;
194 
195 	INIT_LIST_HEAD(&bf_head);
196 
197 	memset(&ts, 0, sizeof(ts));
198 
199 	while ((skb = __skb_dequeue(&tid->buf_q))) {
200 		fi = get_frame_info(skb);
201 		bf = fi->bf;
202 
203 		if (!bf) {
204 			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
205 			if (!bf) {
206 				ieee80211_free_txskb(sc->hw, skb);
207 				continue;
208 			}
209 		}
210 
211 		if (fi->retries) {
212 			list_add_tail(&bf->list, &bf_head);
213 			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
214 			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
215 			sendbar = true;
216 		} else {
217 			ath_tx_send_normal(sc, txq, NULL, skb);
218 		}
219 	}
220 
221 	if (tid->baw_head == tid->baw_tail) {
222 		tid->state &= ~AGGR_ADDBA_COMPLETE;
223 		tid->state &= ~AGGR_CLEANUP;
224 	}
225 
226 	if (sendbar) {
227 		ath_txq_unlock(sc, txq);
228 		ath_send_bar(tid, tid->seq_start);
229 		ath_txq_lock(sc, txq);
230 	}
231 }
232 
ath_tx_update_baw(struct ath_softc * sc,struct ath_atx_tid * tid,int seqno)233 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
234 			      int seqno)
235 {
236 	int index, cindex;
237 
238 	index  = ATH_BA_INDEX(tid->seq_start, seqno);
239 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
240 
241 	__clear_bit(cindex, tid->tx_buf);
242 
243 	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
244 		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
245 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
246 		if (tid->bar_index >= 0)
247 			tid->bar_index--;
248 	}
249 }
250 
ath_tx_addto_baw(struct ath_softc * sc,struct ath_atx_tid * tid,u16 seqno)251 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
252 			     u16 seqno)
253 {
254 	int index, cindex;
255 
256 	index  = ATH_BA_INDEX(tid->seq_start, seqno);
257 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
258 	__set_bit(cindex, tid->tx_buf);
259 
260 	if (index >= ((tid->baw_tail - tid->baw_head) &
261 		(ATH_TID_MAX_BUFS - 1))) {
262 		tid->baw_tail = cindex;
263 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
264 	}
265 }
266 
267 /*
268  * TODO: For frame(s) that are in the retry state, we will reuse the
269  * sequence number(s) without setting the retry bit. The
270  * alternative is to give up on these and BAR the receiver's window
271  * forward.
272  */
ath_tid_drain(struct ath_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid)273 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
274 			  struct ath_atx_tid *tid)
275 
276 {
277 	struct sk_buff *skb;
278 	struct ath_buf *bf;
279 	struct list_head bf_head;
280 	struct ath_tx_status ts;
281 	struct ath_frame_info *fi;
282 
283 	memset(&ts, 0, sizeof(ts));
284 	INIT_LIST_HEAD(&bf_head);
285 
286 	while ((skb = __skb_dequeue(&tid->buf_q))) {
287 		fi = get_frame_info(skb);
288 		bf = fi->bf;
289 
290 		if (!bf) {
291 			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
292 			continue;
293 		}
294 
295 		list_add_tail(&bf->list, &bf_head);
296 
297 		if (fi->retries)
298 			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
299 
300 		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
301 	}
302 
303 	tid->seq_next = tid->seq_start;
304 	tid->baw_tail = tid->baw_head;
305 	tid->bar_index = -1;
306 }
307 
ath_tx_set_retry(struct ath_softc * sc,struct ath_txq * txq,struct sk_buff * skb,int count)308 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
309 			     struct sk_buff *skb, int count)
310 {
311 	struct ath_frame_info *fi = get_frame_info(skb);
312 	struct ath_buf *bf = fi->bf;
313 	struct ieee80211_hdr *hdr;
314 	int prev = fi->retries;
315 
316 	TX_STAT_INC(txq->axq_qnum, a_retries);
317 	fi->retries += count;
318 
319 	if (prev > 0)
320 		return;
321 
322 	hdr = (struct ieee80211_hdr *)skb->data;
323 	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
324 	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
325 		sizeof(*hdr), DMA_TO_DEVICE);
326 }
327 
ath_tx_get_buffer(struct ath_softc * sc)328 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
329 {
330 	struct ath_buf *bf = NULL;
331 
332 	spin_lock_bh(&sc->tx.txbuflock);
333 
334 	if (unlikely(list_empty(&sc->tx.txbuf))) {
335 		spin_unlock_bh(&sc->tx.txbuflock);
336 		return NULL;
337 	}
338 
339 	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
340 	bf->bf_next = NULL;
341 	list_del(&bf->list);
342 
343 	spin_unlock_bh(&sc->tx.txbuflock);
344 
345 	return bf;
346 }
347 
ath_tx_return_buffer(struct ath_softc * sc,struct ath_buf * bf)348 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
349 {
350 	spin_lock_bh(&sc->tx.txbuflock);
351 	list_add_tail(&bf->list, &sc->tx.txbuf);
352 	spin_unlock_bh(&sc->tx.txbuflock);
353 }
354 
ath_clone_txbuf(struct ath_softc * sc,struct ath_buf * bf)355 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
356 {
357 	struct ath_buf *tbf;
358 
359 	tbf = ath_tx_get_buffer(sc);
360 	if (WARN_ON(!tbf))
361 		return NULL;
362 
363 	ATH_TXBUF_RESET(tbf);
364 
365 	tbf->bf_mpdu = bf->bf_mpdu;
366 	tbf->bf_buf_addr = bf->bf_buf_addr;
367 	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
368 	tbf->bf_state = bf->bf_state;
369 
370 	return tbf;
371 }
372 
ath_tx_count_frames(struct ath_softc * sc,struct ath_buf * bf,struct ath_tx_status * ts,int txok,int * nframes,int * nbad)373 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
374 			        struct ath_tx_status *ts, int txok,
375 			        int *nframes, int *nbad)
376 {
377 	struct ath_frame_info *fi;
378 	u16 seq_st = 0;
379 	u32 ba[WME_BA_BMP_SIZE >> 5];
380 	int ba_index;
381 	int isaggr = 0;
382 
383 	*nbad = 0;
384 	*nframes = 0;
385 
386 	isaggr = bf_isaggr(bf);
387 	if (isaggr) {
388 		seq_st = ts->ts_seqnum;
389 		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
390 	}
391 
392 	while (bf) {
393 		fi = get_frame_info(bf->bf_mpdu);
394 		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
395 
396 		(*nframes)++;
397 		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
398 			(*nbad)++;
399 
400 		bf = bf->bf_next;
401 	}
402 }
403 
404 
ath_tx_complete_aggr(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf,struct list_head * bf_q,struct ath_tx_status * ts,int txok,bool retry)405 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
406 				 struct ath_buf *bf, struct list_head *bf_q,
407 				 struct ath_tx_status *ts, int txok, bool retry)
408 {
409 	struct ath_node *an = NULL;
410 	struct sk_buff *skb;
411 	struct ieee80211_sta *sta;
412 	struct ieee80211_hw *hw = sc->hw;
413 	struct ieee80211_hdr *hdr;
414 	struct ieee80211_tx_info *tx_info;
415 	struct ath_atx_tid *tid = NULL;
416 	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
417 	struct list_head bf_head;
418 	struct sk_buff_head bf_pending;
419 	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
420 	u32 ba[WME_BA_BMP_SIZE >> 5];
421 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
422 	bool rc_update = true, isba;
423 	struct ieee80211_tx_rate rates[4];
424 	struct ath_frame_info *fi;
425 	int nframes;
426 	u8 tidno;
427 	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
428 	int i, retries;
429 	int bar_index = -1;
430 
431 	skb = bf->bf_mpdu;
432 	hdr = (struct ieee80211_hdr *)skb->data;
433 
434 	tx_info = IEEE80211_SKB_CB(skb);
435 
436 	memcpy(rates, tx_info->control.rates, sizeof(rates));
437 
438 	retries = ts->ts_longretry + 1;
439 	for (i = 0; i < ts->ts_rateindex; i++)
440 		retries += rates[i].count;
441 
442 	rcu_read_lock();
443 
444 	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
445 	if (!sta) {
446 		rcu_read_unlock();
447 
448 		INIT_LIST_HEAD(&bf_head);
449 		while (bf) {
450 			bf_next = bf->bf_next;
451 
452 			if (!bf->bf_stale || bf_next != NULL)
453 				list_move_tail(&bf->list, &bf_head);
454 
455 			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
456 
457 			bf = bf_next;
458 		}
459 		return;
460 	}
461 
462 	an = (struct ath_node *)sta->drv_priv;
463 	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
464 	tid = ATH_AN_2_TID(an, tidno);
465 	seq_first = tid->seq_start;
466 	isba = ts->ts_flags & ATH9K_TX_BA;
467 
468 	/*
469 	 * The hardware occasionally sends a tx status for the wrong TID.
470 	 * In this case, the BA status cannot be considered valid and all
471 	 * subframes need to be retransmitted
472 	 *
473 	 * Only BlockAcks have a TID and therefore normal Acks cannot be
474 	 * checked
475 	 */
476 	if (isba && tidno != ts->tid)
477 		txok = false;
478 
479 	isaggr = bf_isaggr(bf);
480 	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
481 
482 	if (isaggr && txok) {
483 		if (ts->ts_flags & ATH9K_TX_BA) {
484 			seq_st = ts->ts_seqnum;
485 			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
486 		} else {
487 			/*
488 			 * AR5416 can become deaf/mute when BA
489 			 * issue happens. Chip needs to be reset.
490 			 * But AP code may have sychronization issues
491 			 * when perform internal reset in this routine.
492 			 * Only enable reset in STA mode for now.
493 			 */
494 			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
495 				needreset = 1;
496 		}
497 	}
498 
499 	__skb_queue_head_init(&bf_pending);
500 
501 	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
502 	while (bf) {
503 		u16 seqno = bf->bf_state.seqno;
504 
505 		txfail = txpending = sendbar = 0;
506 		bf_next = bf->bf_next;
507 
508 		skb = bf->bf_mpdu;
509 		tx_info = IEEE80211_SKB_CB(skb);
510 		fi = get_frame_info(skb);
511 
512 		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
513 			/* transmit completion, subframe is
514 			 * acked by block ack */
515 			acked_cnt++;
516 		} else if (!isaggr && txok) {
517 			/* transmit completion */
518 			acked_cnt++;
519 		} else if ((tid->state & AGGR_CLEANUP) || !retry) {
520 			/*
521 			 * cleanup in progress, just fail
522 			 * the un-acked sub-frames
523 			 */
524 			txfail = 1;
525 		} else if (flush) {
526 			txpending = 1;
527 		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
528 			if (txok || !an->sleeping)
529 				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
530 						 retries);
531 
532 			txpending = 1;
533 		} else {
534 			txfail = 1;
535 			txfail_cnt++;
536 			bar_index = max_t(int, bar_index,
537 				ATH_BA_INDEX(seq_first, seqno));
538 		}
539 
540 		/*
541 		 * Make sure the last desc is reclaimed if it
542 		 * not a holding desc.
543 		 */
544 		INIT_LIST_HEAD(&bf_head);
545 		if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
546 		    bf_next != NULL || !bf_last->bf_stale)
547 			list_move_tail(&bf->list, &bf_head);
548 
549 		if (!txpending || (tid->state & AGGR_CLEANUP)) {
550 			/*
551 			 * complete the acked-ones/xretried ones; update
552 			 * block-ack window
553 			 */
554 			ath_tx_update_baw(sc, tid, seqno);
555 
556 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
557 				memcpy(tx_info->control.rates, rates, sizeof(rates));
558 				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
559 				rc_update = false;
560 			}
561 
562 			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
563 				!txfail);
564 		} else {
565 			/* retry the un-acked ones */
566 			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
567 			    bf->bf_next == NULL && bf_last->bf_stale) {
568 				struct ath_buf *tbf;
569 
570 				tbf = ath_clone_txbuf(sc, bf_last);
571 				/*
572 				 * Update tx baw and complete the
573 				 * frame with failed status if we
574 				 * run out of tx buf.
575 				 */
576 				if (!tbf) {
577 					ath_tx_update_baw(sc, tid, seqno);
578 
579 					ath_tx_complete_buf(sc, bf, txq,
580 							    &bf_head, ts, 0);
581 					bar_index = max_t(int, bar_index,
582 						ATH_BA_INDEX(seq_first, seqno));
583 					break;
584 				}
585 
586 				fi->bf = tbf;
587 			}
588 
589 			/*
590 			 * Put this buffer to the temporary pending
591 			 * queue to retain ordering
592 			 */
593 			__skb_queue_tail(&bf_pending, skb);
594 		}
595 
596 		bf = bf_next;
597 	}
598 
599 	/* prepend un-acked frames to the beginning of the pending frame queue */
600 	if (!skb_queue_empty(&bf_pending)) {
601 		if (an->sleeping)
602 			ieee80211_sta_set_buffered(sta, tid->tidno, true);
603 
604 		skb_queue_splice(&bf_pending, &tid->buf_q);
605 		if (!an->sleeping) {
606 			ath_tx_queue_tid(txq, tid);
607 
608 			if (ts->ts_status & ATH9K_TXERR_FILT)
609 				tid->ac->clear_ps_filter = true;
610 		}
611 	}
612 
613 	if (bar_index >= 0) {
614 		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
615 
616 		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
617 			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
618 
619 		ath_txq_unlock(sc, txq);
620 		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
621 		ath_txq_lock(sc, txq);
622 	}
623 
624 	if (tid->state & AGGR_CLEANUP)
625 		ath_tx_flush_tid(sc, tid);
626 
627 	rcu_read_unlock();
628 
629 	if (needreset) {
630 		RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
631 		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
632 	}
633 }
634 
ath_lookup_legacy(struct ath_buf * bf)635 static bool ath_lookup_legacy(struct ath_buf *bf)
636 {
637 	struct sk_buff *skb;
638 	struct ieee80211_tx_info *tx_info;
639 	struct ieee80211_tx_rate *rates;
640 	int i;
641 
642 	skb = bf->bf_mpdu;
643 	tx_info = IEEE80211_SKB_CB(skb);
644 	rates = tx_info->control.rates;
645 
646 	for (i = 0; i < 4; i++) {
647 		if (!rates[i].count || rates[i].idx < 0)
648 			break;
649 
650 		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
651 			return true;
652 	}
653 
654 	return false;
655 }
656 
ath_lookup_rate(struct ath_softc * sc,struct ath_buf * bf,struct ath_atx_tid * tid)657 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
658 			   struct ath_atx_tid *tid)
659 {
660 	struct sk_buff *skb;
661 	struct ieee80211_tx_info *tx_info;
662 	struct ieee80211_tx_rate *rates;
663 	u32 max_4ms_framelen, frmlen;
664 	u16 aggr_limit, bt_aggr_limit, legacy = 0;
665 	int i;
666 
667 	skb = bf->bf_mpdu;
668 	tx_info = IEEE80211_SKB_CB(skb);
669 	rates = tx_info->control.rates;
670 
671 	/*
672 	 * Find the lowest frame length among the rate series that will have a
673 	 * 4ms transmit duration.
674 	 * TODO - TXOP limit needs to be considered.
675 	 */
676 	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
677 
678 	for (i = 0; i < 4; i++) {
679 		int modeidx;
680 
681 		if (!rates[i].count)
682 			continue;
683 
684 		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
685 			legacy = 1;
686 			break;
687 		}
688 
689 		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
690 			modeidx = MCS_HT40;
691 		else
692 			modeidx = MCS_HT20;
693 
694 		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
695 			modeidx++;
696 
697 		frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
698 		max_4ms_framelen = min(max_4ms_framelen, frmlen);
699 	}
700 
701 	/*
702 	 * limit aggregate size by the minimum rate if rate selected is
703 	 * not a probe rate, if rate selected is a probe rate then
704 	 * avoid aggregation of this packet.
705 	 */
706 	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
707 		return 0;
708 
709 	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
710 
711 	/*
712 	 * Override the default aggregation limit for BTCOEX.
713 	 */
714 	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
715 	if (bt_aggr_limit)
716 		aggr_limit = bt_aggr_limit;
717 
718 	/*
719 	 * h/w can accept aggregates up to 16 bit lengths (65535).
720 	 * The IE, however can hold up to 65536, which shows up here
721 	 * as zero. Ignore 65536 since we  are constrained by hw.
722 	 */
723 	if (tid->an->maxampdu)
724 		aggr_limit = min(aggr_limit, tid->an->maxampdu);
725 
726 	return aggr_limit;
727 }
728 
729 /*
730  * Returns the number of delimiters to be added to
731  * meet the minimum required mpdudensity.
732  */
ath_compute_num_delims(struct ath_softc * sc,struct ath_atx_tid * tid,struct ath_buf * bf,u16 frmlen,bool first_subfrm)733 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
734 				  struct ath_buf *bf, u16 frmlen,
735 				  bool first_subfrm)
736 {
737 #define FIRST_DESC_NDELIMS 60
738 	struct sk_buff *skb = bf->bf_mpdu;
739 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
740 	u32 nsymbits, nsymbols;
741 	u16 minlen;
742 	u8 flags, rix;
743 	int width, streams, half_gi, ndelim, mindelim;
744 	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
745 
746 	/* Select standard number of delimiters based on frame length alone */
747 	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
748 
749 	/*
750 	 * If encryption enabled, hardware requires some more padding between
751 	 * subframes.
752 	 * TODO - this could be improved to be dependent on the rate.
753 	 *      The hardware can keep up at lower rates, but not higher rates
754 	 */
755 	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
756 	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
757 		ndelim += ATH_AGGR_ENCRYPTDELIM;
758 
759 	/*
760 	 * Add delimiter when using RTS/CTS with aggregation
761 	 * and non enterprise AR9003 card
762 	 */
763 	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
764 	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
765 		ndelim = max(ndelim, FIRST_DESC_NDELIMS);
766 
767 	/*
768 	 * Convert desired mpdu density from microeconds to bytes based
769 	 * on highest rate in rate series (i.e. first rate) to determine
770 	 * required minimum length for subframe. Take into account
771 	 * whether high rate is 20 or 40Mhz and half or full GI.
772 	 *
773 	 * If there is no mpdu density restriction, no further calculation
774 	 * is needed.
775 	 */
776 
777 	if (tid->an->mpdudensity == 0)
778 		return ndelim;
779 
780 	rix = tx_info->control.rates[0].idx;
781 	flags = tx_info->control.rates[0].flags;
782 	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
783 	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
784 
785 	if (half_gi)
786 		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
787 	else
788 		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
789 
790 	if (nsymbols == 0)
791 		nsymbols = 1;
792 
793 	streams = HT_RC_2_STREAMS(rix);
794 	nsymbits = bits_per_symbol[rix % 8][width] * streams;
795 	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
796 
797 	if (frmlen < minlen) {
798 		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
799 		ndelim = max(mindelim, ndelim);
800 	}
801 
802 	return ndelim;
803 }
804 
ath_tx_form_aggr(struct ath_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid,struct list_head * bf_q,int * aggr_len)805 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
806 					     struct ath_txq *txq,
807 					     struct ath_atx_tid *tid,
808 					     struct list_head *bf_q,
809 					     int *aggr_len)
810 {
811 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
812 	struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
813 	int rl = 0, nframes = 0, ndelim, prev_al = 0;
814 	u16 aggr_limit = 0, al = 0, bpad = 0,
815 		al_delta, h_baw = tid->baw_size / 2;
816 	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
817 	struct ieee80211_tx_info *tx_info;
818 	struct ath_frame_info *fi;
819 	struct sk_buff *skb;
820 	u16 seqno;
821 
822 	do {
823 		skb = skb_peek(&tid->buf_q);
824 		fi = get_frame_info(skb);
825 		bf = fi->bf;
826 		if (!fi->bf)
827 			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
828 
829 		if (!bf) {
830 			__skb_unlink(skb, &tid->buf_q);
831 			ieee80211_free_txskb(sc->hw, skb);
832 			continue;
833 		}
834 
835 		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
836 		seqno = bf->bf_state.seqno;
837 
838 		/* do not step over block-ack window */
839 		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
840 			status = ATH_AGGR_BAW_CLOSED;
841 			break;
842 		}
843 
844 		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
845 			struct ath_tx_status ts = {};
846 			struct list_head bf_head;
847 
848 			INIT_LIST_HEAD(&bf_head);
849 			list_add(&bf->list, &bf_head);
850 			__skb_unlink(skb, &tid->buf_q);
851 			ath_tx_update_baw(sc, tid, seqno);
852 			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
853 			continue;
854 		}
855 
856 		if (!bf_first)
857 			bf_first = bf;
858 
859 		if (!rl) {
860 			aggr_limit = ath_lookup_rate(sc, bf, tid);
861 			rl = 1;
862 		}
863 
864 		/* do not exceed aggregation limit */
865 		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
866 
867 		if (nframes &&
868 		    ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
869 		     ath_lookup_legacy(bf))) {
870 			status = ATH_AGGR_LIMITED;
871 			break;
872 		}
873 
874 		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
875 		if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
876 			break;
877 
878 		/* do not exceed subframe limit */
879 		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
880 			status = ATH_AGGR_LIMITED;
881 			break;
882 		}
883 
884 		/* add padding for previous frame to aggregation length */
885 		al += bpad + al_delta;
886 
887 		/*
888 		 * Get the delimiters needed to meet the MPDU
889 		 * density for this node.
890 		 */
891 		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
892 						!nframes);
893 		bpad = PADBYTES(al_delta) + (ndelim << 2);
894 
895 		nframes++;
896 		bf->bf_next = NULL;
897 
898 		/* link buffers of this frame to the aggregate */
899 		if (!fi->retries)
900 			ath_tx_addto_baw(sc, tid, seqno);
901 		bf->bf_state.ndelim = ndelim;
902 
903 		__skb_unlink(skb, &tid->buf_q);
904 		list_add_tail(&bf->list, bf_q);
905 		if (bf_prev)
906 			bf_prev->bf_next = bf;
907 
908 		bf_prev = bf;
909 
910 	} while (!skb_queue_empty(&tid->buf_q));
911 
912 	*aggr_len = al;
913 
914 	return status;
915 #undef PADBYTES
916 }
917 
918 /*
919  * rix - rate index
920  * pktlen - total bytes (delims + data + fcs + pads + pad delims)
921  * width  - 0 for 20 MHz, 1 for 40 MHz
922  * half_gi - to use 4us v/s 3.6 us for symbol time
923  */
ath_pkt_duration(struct ath_softc * sc,u8 rix,int pktlen,int width,int half_gi,bool shortPreamble)924 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
925 			    int width, int half_gi, bool shortPreamble)
926 {
927 	u32 nbits, nsymbits, duration, nsymbols;
928 	int streams;
929 
930 	/* find number of symbols: PLCP + data */
931 	streams = HT_RC_2_STREAMS(rix);
932 	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
933 	nsymbits = bits_per_symbol[rix % 8][width] * streams;
934 	nsymbols = (nbits + nsymbits - 1) / nsymbits;
935 
936 	if (!half_gi)
937 		duration = SYMBOL_TIME(nsymbols);
938 	else
939 		duration = SYMBOL_TIME_HALFGI(nsymbols);
940 
941 	/* addup duration for legacy/ht training and signal fields */
942 	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
943 
944 	return duration;
945 }
946 
ath_buf_set_rate(struct ath_softc * sc,struct ath_buf * bf,struct ath_tx_info * info,int len)947 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
948 			     struct ath_tx_info *info, int len)
949 {
950 	struct ath_hw *ah = sc->sc_ah;
951 	struct sk_buff *skb;
952 	struct ieee80211_tx_info *tx_info;
953 	struct ieee80211_tx_rate *rates;
954 	const struct ieee80211_rate *rate;
955 	struct ieee80211_hdr *hdr;
956 	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
957 	int i;
958 	u8 rix = 0;
959 
960 	skb = bf->bf_mpdu;
961 	tx_info = IEEE80211_SKB_CB(skb);
962 	rates = tx_info->control.rates;
963 	hdr = (struct ieee80211_hdr *)skb->data;
964 
965 	/* set dur_update_en for l-sig computation except for PS-Poll frames */
966 	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
967 	info->rtscts_rate = fi->rtscts_rate;
968 
969 	for (i = 0; i < 4; i++) {
970 		bool is_40, is_sgi, is_sp;
971 		int phy;
972 
973 		if (!rates[i].count || (rates[i].idx < 0))
974 			continue;
975 
976 		rix = rates[i].idx;
977 		info->rates[i].Tries = rates[i].count;
978 
979 		    if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
980 			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
981 			info->flags |= ATH9K_TXDESC_RTSENA;
982 		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
983 			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
984 			info->flags |= ATH9K_TXDESC_CTSENA;
985 		}
986 
987 		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
988 			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
989 		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
990 			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
991 
992 		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
993 		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
994 		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
995 
996 		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
997 			/* MCS rates */
998 			info->rates[i].Rate = rix | 0x80;
999 			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1000 					ah->txchainmask, info->rates[i].Rate);
1001 			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1002 				 is_40, is_sgi, is_sp);
1003 			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1004 				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1005 			continue;
1006 		}
1007 
1008 		/* legacy rates */
1009 		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1010 		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1011 		    !(rate->flags & IEEE80211_RATE_ERP_G))
1012 			phy = WLAN_RC_PHY_CCK;
1013 		else
1014 			phy = WLAN_RC_PHY_OFDM;
1015 
1016 		info->rates[i].Rate = rate->hw_value;
1017 		if (rate->hw_value_short) {
1018 			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1019 				info->rates[i].Rate |= rate->hw_value_short;
1020 		} else {
1021 			is_sp = false;
1022 		}
1023 
1024 		if (bf->bf_state.bfs_paprd)
1025 			info->rates[i].ChSel = ah->txchainmask;
1026 		else
1027 			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1028 					ah->txchainmask, info->rates[i].Rate);
1029 
1030 		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1031 			phy, rate->bitrate * 100, len, rix, is_sp);
1032 	}
1033 
1034 	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1035 	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1036 		info->flags &= ~ATH9K_TXDESC_RTSENA;
1037 
1038 	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1039 	if (info->flags & ATH9K_TXDESC_RTSENA)
1040 		info->flags &= ~ATH9K_TXDESC_CTSENA;
1041 }
1042 
get_hw_packet_type(struct sk_buff * skb)1043 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1044 {
1045 	struct ieee80211_hdr *hdr;
1046 	enum ath9k_pkt_type htype;
1047 	__le16 fc;
1048 
1049 	hdr = (struct ieee80211_hdr *)skb->data;
1050 	fc = hdr->frame_control;
1051 
1052 	if (ieee80211_is_beacon(fc))
1053 		htype = ATH9K_PKT_TYPE_BEACON;
1054 	else if (ieee80211_is_probe_resp(fc))
1055 		htype = ATH9K_PKT_TYPE_PROBE_RESP;
1056 	else if (ieee80211_is_atim(fc))
1057 		htype = ATH9K_PKT_TYPE_ATIM;
1058 	else if (ieee80211_is_pspoll(fc))
1059 		htype = ATH9K_PKT_TYPE_PSPOLL;
1060 	else
1061 		htype = ATH9K_PKT_TYPE_NORMAL;
1062 
1063 	return htype;
1064 }
1065 
ath_tx_fill_desc(struct ath_softc * sc,struct ath_buf * bf,struct ath_txq * txq,int len)1066 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1067 			     struct ath_txq *txq, int len)
1068 {
1069 	struct ath_hw *ah = sc->sc_ah;
1070 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1071 	struct ath_buf *bf_first = bf;
1072 	struct ath_tx_info info;
1073 	bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1074 
1075 	memset(&info, 0, sizeof(info));
1076 	info.is_first = true;
1077 	info.is_last = true;
1078 	info.txpower = MAX_RATE_POWER;
1079 	info.qcu = txq->axq_qnum;
1080 
1081 	info.flags = ATH9K_TXDESC_INTREQ;
1082 	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1083 		info.flags |= ATH9K_TXDESC_NOACK;
1084 	if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1085 		info.flags |= ATH9K_TXDESC_LDPC;
1086 
1087 	ath_buf_set_rate(sc, bf, &info, len);
1088 
1089 	if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1090 		info.flags |= ATH9K_TXDESC_CLRDMASK;
1091 
1092 	if (bf->bf_state.bfs_paprd)
1093 		info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1094 
1095 
1096 	while (bf) {
1097 		struct sk_buff *skb = bf->bf_mpdu;
1098 		struct ath_frame_info *fi = get_frame_info(skb);
1099 
1100 		info.type = get_hw_packet_type(skb);
1101 		if (bf->bf_next)
1102 			info.link = bf->bf_next->bf_daddr;
1103 		else
1104 			info.link = 0;
1105 
1106 		info.buf_addr[0] = bf->bf_buf_addr;
1107 		info.buf_len[0] = skb->len;
1108 		info.pkt_len = fi->framelen;
1109 		info.keyix = fi->keyix;
1110 		info.keytype = fi->keytype;
1111 
1112 		if (aggr) {
1113 			if (bf == bf_first)
1114 				info.aggr = AGGR_BUF_FIRST;
1115 			else if (!bf->bf_next)
1116 				info.aggr = AGGR_BUF_LAST;
1117 			else
1118 				info.aggr = AGGR_BUF_MIDDLE;
1119 
1120 			info.ndelim = bf->bf_state.ndelim;
1121 			info.aggr_len = len;
1122 		}
1123 
1124 		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1125 		bf = bf->bf_next;
1126 	}
1127 }
1128 
ath_tx_sched_aggr(struct ath_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid)1129 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1130 			      struct ath_atx_tid *tid)
1131 {
1132 	struct ath_buf *bf;
1133 	enum ATH_AGGR_STATUS status;
1134 	struct ieee80211_tx_info *tx_info;
1135 	struct list_head bf_q;
1136 	int aggr_len;
1137 
1138 	do {
1139 		if (skb_queue_empty(&tid->buf_q))
1140 			return;
1141 
1142 		INIT_LIST_HEAD(&bf_q);
1143 
1144 		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
1145 
1146 		/*
1147 		 * no frames picked up to be aggregated;
1148 		 * block-ack window is not open.
1149 		 */
1150 		if (list_empty(&bf_q))
1151 			break;
1152 
1153 		bf = list_first_entry(&bf_q, struct ath_buf, list);
1154 		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1155 		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1156 
1157 		if (tid->ac->clear_ps_filter) {
1158 			tid->ac->clear_ps_filter = false;
1159 			tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1160 		} else {
1161 			tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1162 		}
1163 
1164 		/* if only one frame, send as non-aggregate */
1165 		if (bf == bf->bf_lastbf) {
1166 			aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1167 			bf->bf_state.bf_type = BUF_AMPDU;
1168 		} else {
1169 			TX_STAT_INC(txq->axq_qnum, a_aggr);
1170 		}
1171 
1172 		ath_tx_fill_desc(sc, bf, txq, aggr_len);
1173 		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1174 	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
1175 		 status != ATH_AGGR_BAW_CLOSED);
1176 }
1177 
ath_tx_aggr_start(struct ath_softc * sc,struct ieee80211_sta * sta,u16 tid,u16 * ssn)1178 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1179 		      u16 tid, u16 *ssn)
1180 {
1181 	struct ath_atx_tid *txtid;
1182 	struct ath_node *an;
1183 
1184 	an = (struct ath_node *)sta->drv_priv;
1185 	txtid = ATH_AN_2_TID(an, tid);
1186 
1187 	if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1188 		return -EAGAIN;
1189 
1190 	txtid->state |= AGGR_ADDBA_PROGRESS;
1191 	txtid->paused = true;
1192 	*ssn = txtid->seq_start = txtid->seq_next;
1193 	txtid->bar_index = -1;
1194 
1195 	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1196 	txtid->baw_head = txtid->baw_tail = 0;
1197 
1198 	return 0;
1199 }
1200 
ath_tx_aggr_stop(struct ath_softc * sc,struct ieee80211_sta * sta,u16 tid)1201 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1202 {
1203 	struct ath_node *an = (struct ath_node *)sta->drv_priv;
1204 	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1205 	struct ath_txq *txq = txtid->ac->txq;
1206 
1207 	if (txtid->state & AGGR_CLEANUP)
1208 		return;
1209 
1210 	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
1211 		txtid->state &= ~AGGR_ADDBA_PROGRESS;
1212 		return;
1213 	}
1214 
1215 	ath_txq_lock(sc, txq);
1216 	txtid->paused = true;
1217 
1218 	/*
1219 	 * If frames are still being transmitted for this TID, they will be
1220 	 * cleaned up during tx completion. To prevent race conditions, this
1221 	 * TID can only be reused after all in-progress subframes have been
1222 	 * completed.
1223 	 */
1224 	if (txtid->baw_head != txtid->baw_tail)
1225 		txtid->state |= AGGR_CLEANUP;
1226 	else
1227 		txtid->state &= ~AGGR_ADDBA_COMPLETE;
1228 
1229 	ath_tx_flush_tid(sc, txtid);
1230 	ath_txq_unlock_complete(sc, txq);
1231 }
1232 
ath_tx_aggr_sleep(struct ieee80211_sta * sta,struct ath_softc * sc,struct ath_node * an)1233 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1234 		       struct ath_node *an)
1235 {
1236 	struct ath_atx_tid *tid;
1237 	struct ath_atx_ac *ac;
1238 	struct ath_txq *txq;
1239 	bool buffered;
1240 	int tidno;
1241 
1242 	for (tidno = 0, tid = &an->tid[tidno];
1243 	     tidno < WME_NUM_TID; tidno++, tid++) {
1244 
1245 		ac = tid->ac;
1246 		txq = ac->txq;
1247 
1248 		ath_txq_lock(sc, txq);
1249 
1250 		if (!tid->sched) {
1251 			ath_txq_unlock(sc, txq);
1252 			continue;
1253 		}
1254 
1255 		buffered = !skb_queue_empty(&tid->buf_q);
1256 
1257 		tid->sched = false;
1258 		list_del(&tid->list);
1259 
1260 		if (ac->sched) {
1261 			ac->sched = false;
1262 			list_del(&ac->list);
1263 		}
1264 
1265 		ath_txq_unlock(sc, txq);
1266 
1267 		ieee80211_sta_set_buffered(sta, tidno, buffered);
1268 	}
1269 }
1270 
ath_tx_aggr_wakeup(struct ath_softc * sc,struct ath_node * an)1271 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1272 {
1273 	struct ath_atx_tid *tid;
1274 	struct ath_atx_ac *ac;
1275 	struct ath_txq *txq;
1276 	int tidno;
1277 
1278 	for (tidno = 0, tid = &an->tid[tidno];
1279 	     tidno < WME_NUM_TID; tidno++, tid++) {
1280 
1281 		ac = tid->ac;
1282 		txq = ac->txq;
1283 
1284 		ath_txq_lock(sc, txq);
1285 		ac->clear_ps_filter = true;
1286 
1287 		if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
1288 			ath_tx_queue_tid(txq, tid);
1289 			ath_txq_schedule(sc, txq);
1290 		}
1291 
1292 		ath_txq_unlock_complete(sc, txq);
1293 	}
1294 }
1295 
ath_tx_aggr_resume(struct ath_softc * sc,struct ieee80211_sta * sta,u16 tid)1296 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1297 {
1298 	struct ath_atx_tid *txtid;
1299 	struct ath_node *an;
1300 
1301 	an = (struct ath_node *)sta->drv_priv;
1302 
1303 	txtid = ATH_AN_2_TID(an, tid);
1304 	txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1305 	txtid->state |= AGGR_ADDBA_COMPLETE;
1306 	txtid->state &= ~AGGR_ADDBA_PROGRESS;
1307 	ath_tx_resume_tid(sc, txtid);
1308 }
1309 
1310 /********************/
1311 /* Queue Management */
1312 /********************/
1313 
ath_txq_drain_pending_buffers(struct ath_softc * sc,struct ath_txq * txq)1314 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1315 					  struct ath_txq *txq)
1316 {
1317 	struct ath_atx_ac *ac, *ac_tmp;
1318 	struct ath_atx_tid *tid, *tid_tmp;
1319 
1320 	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1321 		list_del(&ac->list);
1322 		ac->sched = false;
1323 		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1324 			list_del(&tid->list);
1325 			tid->sched = false;
1326 			ath_tid_drain(sc, txq, tid);
1327 		}
1328 	}
1329 }
1330 
ath_txq_setup(struct ath_softc * sc,int qtype,int subtype)1331 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1332 {
1333 	struct ath_hw *ah = sc->sc_ah;
1334 	struct ath9k_tx_queue_info qi;
1335 	static const int subtype_txq_to_hwq[] = {
1336 		[WME_AC_BE] = ATH_TXQ_AC_BE,
1337 		[WME_AC_BK] = ATH_TXQ_AC_BK,
1338 		[WME_AC_VI] = ATH_TXQ_AC_VI,
1339 		[WME_AC_VO] = ATH_TXQ_AC_VO,
1340 	};
1341 	int axq_qnum, i;
1342 
1343 	memset(&qi, 0, sizeof(qi));
1344 	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
1345 	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1346 	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1347 	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1348 	qi.tqi_physCompBuf = 0;
1349 
1350 	/*
1351 	 * Enable interrupts only for EOL and DESC conditions.
1352 	 * We mark tx descriptors to receive a DESC interrupt
1353 	 * when a tx queue gets deep; otherwise waiting for the
1354 	 * EOL to reap descriptors.  Note that this is done to
1355 	 * reduce interrupt load and this only defers reaping
1356 	 * descriptors, never transmitting frames.  Aside from
1357 	 * reducing interrupts this also permits more concurrency.
1358 	 * The only potential downside is if the tx queue backs
1359 	 * up in which case the top half of the kernel may backup
1360 	 * due to a lack of tx descriptors.
1361 	 *
1362 	 * The UAPSD queue is an exception, since we take a desc-
1363 	 * based intr on the EOSP frames.
1364 	 */
1365 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1366 		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1367 	} else {
1368 		if (qtype == ATH9K_TX_QUEUE_UAPSD)
1369 			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1370 		else
1371 			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1372 					TXQ_FLAG_TXDESCINT_ENABLE;
1373 	}
1374 	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1375 	if (axq_qnum == -1) {
1376 		/*
1377 		 * NB: don't print a message, this happens
1378 		 * normally on parts with too few tx queues
1379 		 */
1380 		return NULL;
1381 	}
1382 	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1383 		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1384 
1385 		txq->axq_qnum = axq_qnum;
1386 		txq->mac80211_qnum = -1;
1387 		txq->axq_link = NULL;
1388 		__skb_queue_head_init(&txq->complete_q);
1389 		INIT_LIST_HEAD(&txq->axq_q);
1390 		INIT_LIST_HEAD(&txq->axq_acq);
1391 		spin_lock_init(&txq->axq_lock);
1392 		txq->axq_depth = 0;
1393 		txq->axq_ampdu_depth = 0;
1394 		txq->axq_tx_inprogress = false;
1395 		sc->tx.txqsetup |= 1<<axq_qnum;
1396 
1397 		txq->txq_headidx = txq->txq_tailidx = 0;
1398 		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1399 			INIT_LIST_HEAD(&txq->txq_fifo[i]);
1400 	}
1401 	return &sc->tx.txq[axq_qnum];
1402 }
1403 
ath_txq_update(struct ath_softc * sc,int qnum,struct ath9k_tx_queue_info * qinfo)1404 int ath_txq_update(struct ath_softc *sc, int qnum,
1405 		   struct ath9k_tx_queue_info *qinfo)
1406 {
1407 	struct ath_hw *ah = sc->sc_ah;
1408 	int error = 0;
1409 	struct ath9k_tx_queue_info qi;
1410 
1411 	if (qnum == sc->beacon.beaconq) {
1412 		/*
1413 		 * XXX: for beacon queue, we just save the parameter.
1414 		 * It will be picked up by ath_beaconq_config when
1415 		 * it's necessary.
1416 		 */
1417 		sc->beacon.beacon_qi = *qinfo;
1418 		return 0;
1419 	}
1420 
1421 	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1422 
1423 	ath9k_hw_get_txq_props(ah, qnum, &qi);
1424 	qi.tqi_aifs = qinfo->tqi_aifs;
1425 	qi.tqi_cwmin = qinfo->tqi_cwmin;
1426 	qi.tqi_cwmax = qinfo->tqi_cwmax;
1427 	qi.tqi_burstTime = qinfo->tqi_burstTime;
1428 	qi.tqi_readyTime = qinfo->tqi_readyTime;
1429 
1430 	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1431 		ath_err(ath9k_hw_common(sc->sc_ah),
1432 			"Unable to update hardware queue %u!\n", qnum);
1433 		error = -EIO;
1434 	} else {
1435 		ath9k_hw_resettxqueue(ah, qnum);
1436 	}
1437 
1438 	return error;
1439 }
1440 
ath_cabq_update(struct ath_softc * sc)1441 int ath_cabq_update(struct ath_softc *sc)
1442 {
1443 	struct ath9k_tx_queue_info qi;
1444 	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1445 	int qnum = sc->beacon.cabq->axq_qnum;
1446 
1447 	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1448 	/*
1449 	 * Ensure the readytime % is within the bounds.
1450 	 */
1451 	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1452 		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1453 	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1454 		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1455 
1456 	qi.tqi_readyTime = (cur_conf->beacon_interval *
1457 			    sc->config.cabqReadytime) / 100;
1458 	ath_txq_update(sc, qnum, &qi);
1459 
1460 	return 0;
1461 }
1462 
bf_is_ampdu_not_probing(struct ath_buf * bf)1463 static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1464 {
1465     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1466     return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1467 }
1468 
ath_drain_txq_list(struct ath_softc * sc,struct ath_txq * txq,struct list_head * list,bool retry_tx)1469 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1470 			       struct list_head *list, bool retry_tx)
1471 {
1472 	struct ath_buf *bf, *lastbf;
1473 	struct list_head bf_head;
1474 	struct ath_tx_status ts;
1475 
1476 	memset(&ts, 0, sizeof(ts));
1477 	ts.ts_status = ATH9K_TX_FLUSH;
1478 	INIT_LIST_HEAD(&bf_head);
1479 
1480 	while (!list_empty(list)) {
1481 		bf = list_first_entry(list, struct ath_buf, list);
1482 
1483 		if (bf->bf_stale) {
1484 			list_del(&bf->list);
1485 
1486 			ath_tx_return_buffer(sc, bf);
1487 			continue;
1488 		}
1489 
1490 		lastbf = bf->bf_lastbf;
1491 		list_cut_position(&bf_head, list, &lastbf->list);
1492 
1493 		txq->axq_depth--;
1494 		if (bf_is_ampdu_not_probing(bf))
1495 			txq->axq_ampdu_depth--;
1496 
1497 		if (bf_isampdu(bf))
1498 			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1499 					     retry_tx);
1500 		else
1501 			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
1502 	}
1503 }
1504 
1505 /*
1506  * Drain a given TX queue (could be Beacon or Data)
1507  *
1508  * This assumes output has been stopped and
1509  * we do not need to block ath_tx_tasklet.
1510  */
ath_draintxq(struct ath_softc * sc,struct ath_txq * txq,bool retry_tx)1511 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1512 {
1513 	ath_txq_lock(sc, txq);
1514 
1515 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1516 		int idx = txq->txq_tailidx;
1517 
1518 		while (!list_empty(&txq->txq_fifo[idx])) {
1519 			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1520 					   retry_tx);
1521 
1522 			INCR(idx, ATH_TXFIFO_DEPTH);
1523 		}
1524 		txq->txq_tailidx = idx;
1525 	}
1526 
1527 	txq->axq_link = NULL;
1528 	txq->axq_tx_inprogress = false;
1529 	ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1530 
1531 	/* flush any pending frames if aggregation is enabled */
1532 	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
1533 		ath_txq_drain_pending_buffers(sc, txq);
1534 
1535 	ath_txq_unlock_complete(sc, txq);
1536 }
1537 
ath_drain_all_txq(struct ath_softc * sc,bool retry_tx)1538 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1539 {
1540 	struct ath_hw *ah = sc->sc_ah;
1541 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1542 	struct ath_txq *txq;
1543 	int i;
1544 	u32 npend = 0;
1545 
1546 	if (sc->sc_flags & SC_OP_INVALID)
1547 		return true;
1548 
1549 	ath9k_hw_abort_tx_dma(ah);
1550 
1551 	/* Check if any queue remains active */
1552 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1553 		if (!ATH_TXQ_SETUP(sc, i))
1554 			continue;
1555 
1556 		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1557 			npend |= BIT(i);
1558 	}
1559 
1560 	if (npend)
1561 		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
1562 
1563 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1564 		if (!ATH_TXQ_SETUP(sc, i))
1565 			continue;
1566 
1567 		/*
1568 		 * The caller will resume queues with ieee80211_wake_queues.
1569 		 * Mark the queue as not stopped to prevent ath_tx_complete
1570 		 * from waking the queue too early.
1571 		 */
1572 		txq = &sc->tx.txq[i];
1573 		txq->stopped = false;
1574 		ath_draintxq(sc, txq, retry_tx);
1575 	}
1576 
1577 	return !npend;
1578 }
1579 
ath_tx_cleanupq(struct ath_softc * sc,struct ath_txq * txq)1580 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1581 {
1582 	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1583 	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1584 }
1585 
1586 /* For each axq_acq entry, for each tid, try to schedule packets
1587  * for transmit until ampdu_depth has reached min Q depth.
1588  */
ath_txq_schedule(struct ath_softc * sc,struct ath_txq * txq)1589 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1590 {
1591 	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1592 	struct ath_atx_tid *tid, *last_tid;
1593 
1594 	if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
1595 	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1596 		return;
1597 
1598 	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1599 	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1600 
1601 	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1602 		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1603 		list_del(&ac->list);
1604 		ac->sched = false;
1605 
1606 		while (!list_empty(&ac->tid_q)) {
1607 			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1608 					       list);
1609 			list_del(&tid->list);
1610 			tid->sched = false;
1611 
1612 			if (tid->paused)
1613 				continue;
1614 
1615 			ath_tx_sched_aggr(sc, txq, tid);
1616 
1617 			/*
1618 			 * add tid to round-robin queue if more frames
1619 			 * are pending for the tid
1620 			 */
1621 			if (!skb_queue_empty(&tid->buf_q))
1622 				ath_tx_queue_tid(txq, tid);
1623 
1624 			if (tid == last_tid ||
1625 			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1626 				break;
1627 		}
1628 
1629 		if (!list_empty(&ac->tid_q) && !ac->sched) {
1630 			ac->sched = true;
1631 			list_add_tail(&ac->list, &txq->axq_acq);
1632 		}
1633 
1634 		if (ac == last_ac ||
1635 		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1636 			return;
1637 	}
1638 }
1639 
1640 /***********/
1641 /* TX, DMA */
1642 /***********/
1643 
1644 /*
1645  * Insert a chain of ath_buf (descriptors) on a txq and
1646  * assume the descriptors are already chained together by caller.
1647  */
ath_tx_txqaddbuf(struct ath_softc * sc,struct ath_txq * txq,struct list_head * head,bool internal)1648 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1649 			     struct list_head *head, bool internal)
1650 {
1651 	struct ath_hw *ah = sc->sc_ah;
1652 	struct ath_common *common = ath9k_hw_common(ah);
1653 	struct ath_buf *bf, *bf_last;
1654 	bool puttxbuf = false;
1655 	bool edma;
1656 
1657 	/*
1658 	 * Insert the frame on the outbound list and
1659 	 * pass it on to the hardware.
1660 	 */
1661 
1662 	if (list_empty(head))
1663 		return;
1664 
1665 	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1666 	bf = list_first_entry(head, struct ath_buf, list);
1667 	bf_last = list_entry(head->prev, struct ath_buf, list);
1668 
1669 	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1670 		txq->axq_qnum, txq->axq_depth);
1671 
1672 	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1673 		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1674 		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1675 		puttxbuf = true;
1676 	} else {
1677 		list_splice_tail_init(head, &txq->axq_q);
1678 
1679 		if (txq->axq_link) {
1680 			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1681 			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
1682 				txq->axq_qnum, txq->axq_link,
1683 				ito64(bf->bf_daddr), bf->bf_desc);
1684 		} else if (!edma)
1685 			puttxbuf = true;
1686 
1687 		txq->axq_link = bf_last->bf_desc;
1688 	}
1689 
1690 	if (puttxbuf) {
1691 		TX_STAT_INC(txq->axq_qnum, puttxbuf);
1692 		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1693 		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1694 			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1695 	}
1696 
1697 	if (!edma) {
1698 		TX_STAT_INC(txq->axq_qnum, txstart);
1699 		ath9k_hw_txstart(ah, txq->axq_qnum);
1700 	}
1701 
1702 	if (!internal) {
1703 		txq->axq_depth++;
1704 		if (bf_is_ampdu_not_probing(bf))
1705 			txq->axq_ampdu_depth++;
1706 	}
1707 }
1708 
ath_tx_send_ampdu(struct ath_softc * sc,struct ath_atx_tid * tid,struct sk_buff * skb,struct ath_tx_control * txctl)1709 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1710 			      struct sk_buff *skb, struct ath_tx_control *txctl)
1711 {
1712 	struct ath_frame_info *fi = get_frame_info(skb);
1713 	struct list_head bf_head;
1714 	struct ath_buf *bf;
1715 
1716 	/*
1717 	 * Do not queue to h/w when any of the following conditions is true:
1718 	 * - there are pending frames in software queue
1719 	 * - the TID is currently paused for ADDBA/BAR request
1720 	 * - seqno is not within block-ack window
1721 	 * - h/w queue depth exceeds low water mark
1722 	 */
1723 	if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1724 	    !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1725 	    txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1726 		/*
1727 		 * Add this frame to software queue for scheduling later
1728 		 * for aggregation.
1729 		 */
1730 		TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1731 		__skb_queue_tail(&tid->buf_q, skb);
1732 		if (!txctl->an || !txctl->an->sleeping)
1733 			ath_tx_queue_tid(txctl->txq, tid);
1734 		return;
1735 	}
1736 
1737 	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1738 	if (!bf) {
1739 		ieee80211_free_txskb(sc->hw, skb);
1740 		return;
1741 	}
1742 
1743 	bf->bf_state.bf_type = BUF_AMPDU;
1744 	INIT_LIST_HEAD(&bf_head);
1745 	list_add(&bf->list, &bf_head);
1746 
1747 	/* Add sub-frame to BAW */
1748 	ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1749 
1750 	/* Queue to h/w without aggregation */
1751 	TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
1752 	bf->bf_lastbf = bf;
1753 	ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
1754 	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
1755 }
1756 
ath_tx_send_normal(struct ath_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid,struct sk_buff * skb)1757 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1758 			       struct ath_atx_tid *tid, struct sk_buff *skb)
1759 {
1760 	struct ath_frame_info *fi = get_frame_info(skb);
1761 	struct list_head bf_head;
1762 	struct ath_buf *bf;
1763 
1764 	bf = fi->bf;
1765 
1766 	INIT_LIST_HEAD(&bf_head);
1767 	list_add_tail(&bf->list, &bf_head);
1768 	bf->bf_state.bf_type = 0;
1769 
1770 	bf->bf_next = NULL;
1771 	bf->bf_lastbf = bf;
1772 	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1773 	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1774 	TX_STAT_INC(txq->axq_qnum, queued);
1775 }
1776 
setup_frame_info(struct ieee80211_hw * hw,struct sk_buff * skb,int framelen)1777 static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1778 			     int framelen)
1779 {
1780 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1781 	struct ieee80211_sta *sta = tx_info->control.sta;
1782 	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1783 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1784 	const struct ieee80211_rate *rate;
1785 	struct ath_frame_info *fi = get_frame_info(skb);
1786 	struct ath_node *an = NULL;
1787 	enum ath9k_key_type keytype;
1788 	bool short_preamble = false;
1789 
1790 	/*
1791 	 * We check if Short Preamble is needed for the CTS rate by
1792 	 * checking the BSS's global flag.
1793 	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1794 	 */
1795 	if (tx_info->control.vif &&
1796 	    tx_info->control.vif->bss_conf.use_short_preamble)
1797 		short_preamble = true;
1798 
1799 	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1800 	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1801 
1802 	if (sta)
1803 		an = (struct ath_node *) sta->drv_priv;
1804 
1805 	memset(fi, 0, sizeof(*fi));
1806 	if (hw_key)
1807 		fi->keyix = hw_key->hw_key_idx;
1808 	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1809 		fi->keyix = an->ps_key;
1810 	else
1811 		fi->keyix = ATH9K_TXKEYIX_INVALID;
1812 	fi->keytype = keytype;
1813 	fi->framelen = framelen;
1814 	fi->rtscts_rate = rate->hw_value;
1815 	if (short_preamble)
1816 		fi->rtscts_rate |= rate->hw_value_short;
1817 }
1818 
ath_txchainmask_reduction(struct ath_softc * sc,u8 chainmask,u32 rate)1819 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1820 {
1821 	struct ath_hw *ah = sc->sc_ah;
1822 	struct ath9k_channel *curchan = ah->curchan;
1823 	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1824 	    (curchan->channelFlags & CHANNEL_5GHZ) &&
1825 	    (chainmask == 0x7) && (rate < 0x90))
1826 		return 0x3;
1827 	else
1828 		return chainmask;
1829 }
1830 
1831 /*
1832  * Assign a descriptor (and sequence number if necessary,
1833  * and map buffer for DMA. Frees skb on error
1834  */
ath_tx_setup_buffer(struct ath_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid,struct sk_buff * skb)1835 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1836 					   struct ath_txq *txq,
1837 					   struct ath_atx_tid *tid,
1838 					   struct sk_buff *skb)
1839 {
1840 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1841 	struct ath_frame_info *fi = get_frame_info(skb);
1842 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1843 	struct ath_buf *bf;
1844 	int fragno;
1845 	u16 seqno;
1846 
1847 	bf = ath_tx_get_buffer(sc);
1848 	if (!bf) {
1849 		ath_dbg(common, XMIT, "TX buffers are full\n");
1850 		return NULL;
1851 	}
1852 
1853 	ATH_TXBUF_RESET(bf);
1854 
1855 	if (tid) {
1856 		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1857 		seqno = tid->seq_next;
1858 		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1859 
1860 		if (fragno)
1861 			hdr->seq_ctrl |= cpu_to_le16(fragno);
1862 
1863 		if (!ieee80211_has_morefrags(hdr->frame_control))
1864 			INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1865 
1866 		bf->bf_state.seqno = seqno;
1867 	}
1868 
1869 	bf->bf_mpdu = skb;
1870 
1871 	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1872 					 skb->len, DMA_TO_DEVICE);
1873 	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1874 		bf->bf_mpdu = NULL;
1875 		bf->bf_buf_addr = 0;
1876 		ath_err(ath9k_hw_common(sc->sc_ah),
1877 			"dma_mapping_error() on TX\n");
1878 		ath_tx_return_buffer(sc, bf);
1879 		return NULL;
1880 	}
1881 
1882 	fi->bf = bf;
1883 
1884 	return bf;
1885 }
1886 
1887 /* FIXME: tx power */
ath_tx_start_dma(struct ath_softc * sc,struct sk_buff * skb,struct ath_tx_control * txctl)1888 static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1889 			     struct ath_tx_control *txctl)
1890 {
1891 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1892 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1893 	struct ath_atx_tid *tid = NULL;
1894 	struct ath_buf *bf;
1895 	u8 tidno;
1896 
1897 	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
1898 		ieee80211_is_data_qos(hdr->frame_control)) {
1899 		tidno = ieee80211_get_qos_ctl(hdr)[0] &
1900 			IEEE80211_QOS_CTL_TID_MASK;
1901 		tid = ATH_AN_2_TID(txctl->an, tidno);
1902 
1903 		WARN_ON(tid->ac->txq != txctl->txq);
1904 	}
1905 
1906 	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1907 		/*
1908 		 * Try aggregation if it's a unicast data frame
1909 		 * and the destination is HT capable.
1910 		 */
1911 		ath_tx_send_ampdu(sc, tid, skb, txctl);
1912 	} else {
1913 		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1914 		if (!bf) {
1915 			if (txctl->paprd)
1916 				dev_kfree_skb_any(skb);
1917 			else
1918 				ieee80211_free_txskb(sc->hw, skb);
1919 			return;
1920 		}
1921 
1922 		bf->bf_state.bfs_paprd = txctl->paprd;
1923 
1924 		if (txctl->paprd)
1925 			bf->bf_state.bfs_paprd_timestamp = jiffies;
1926 
1927 		ath_tx_send_normal(sc, txctl->txq, tid, skb);
1928 	}
1929 }
1930 
1931 /* Upon failure caller should free skb */
ath_tx_start(struct ieee80211_hw * hw,struct sk_buff * skb,struct ath_tx_control * txctl)1932 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1933 		 struct ath_tx_control *txctl)
1934 {
1935 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1936 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1937 	struct ieee80211_sta *sta = info->control.sta;
1938 	struct ieee80211_vif *vif = info->control.vif;
1939 	struct ath_softc *sc = hw->priv;
1940 	struct ath_txq *txq = txctl->txq;
1941 	int padpos, padsize;
1942 	int frmlen = skb->len + FCS_LEN;
1943 	int q;
1944 
1945 	/* NOTE:  sta can be NULL according to net/mac80211.h */
1946 	if (sta)
1947 		txctl->an = (struct ath_node *)sta->drv_priv;
1948 
1949 	if (info->control.hw_key)
1950 		frmlen += info->control.hw_key->icv_len;
1951 
1952 	/*
1953 	 * As a temporary workaround, assign seq# here; this will likely need
1954 	 * to be cleaned up to work better with Beacon transmission and virtual
1955 	 * BSSes.
1956 	 */
1957 	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1958 		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1959 			sc->tx.seq_no += 0x10;
1960 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1961 		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1962 	}
1963 
1964 	/* Add the padding after the header if this is not already done */
1965 	padpos = ath9k_cmn_padpos(hdr->frame_control);
1966 	padsize = padpos & 3;
1967 	if (padsize && skb->len > padpos) {
1968 		if (skb_headroom(skb) < padsize)
1969 			return -ENOMEM;
1970 
1971 		skb_push(skb, padsize);
1972 		memmove(skb->data, skb->data + padsize, padpos);
1973 		hdr = (struct ieee80211_hdr *) skb->data;
1974 	}
1975 
1976 	if ((vif && vif->type != NL80211_IFTYPE_AP &&
1977 	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
1978 	    !ieee80211_is_data(hdr->frame_control))
1979 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1980 
1981 	setup_frame_info(hw, skb, frmlen);
1982 
1983 	/*
1984 	 * At this point, the vif, hw_key and sta pointers in the tx control
1985 	 * info are no longer valid (overwritten by the ath_frame_info data.
1986 	 */
1987 
1988 	q = skb_get_queue_mapping(skb);
1989 
1990 	ath_txq_lock(sc, txq);
1991 	if (txq == sc->tx.txq_map[q] &&
1992 	    ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1993 		ieee80211_stop_queue(sc->hw, q);
1994 		txq->stopped = true;
1995 	}
1996 
1997 	ath_tx_start_dma(sc, skb, txctl);
1998 
1999 	ath_txq_unlock(sc, txq);
2000 
2001 	return 0;
2002 }
2003 
2004 /*****************/
2005 /* TX Completion */
2006 /*****************/
2007 
ath_tx_complete(struct ath_softc * sc,struct sk_buff * skb,int tx_flags,struct ath_txq * txq)2008 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2009 			    int tx_flags, struct ath_txq *txq)
2010 {
2011 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2012 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2013 	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2014 	int q, padpos, padsize;
2015 
2016 	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
2017 
2018 	if (!(tx_flags & ATH_TX_ERROR))
2019 		/* Frame was ACKed */
2020 		tx_info->flags |= IEEE80211_TX_STAT_ACK;
2021 
2022 	padpos = ath9k_cmn_padpos(hdr->frame_control);
2023 	padsize = padpos & 3;
2024 	if (padsize && skb->len>padpos+padsize) {
2025 		/*
2026 		 * Remove MAC header padding before giving the frame back to
2027 		 * mac80211.
2028 		 */
2029 		memmove(skb->data + padsize, skb->data, padpos);
2030 		skb_pull(skb, padsize);
2031 	}
2032 
2033 	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2034 		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2035 		ath_dbg(common, PS,
2036 			"Going back to sleep after having received TX status (0x%lx)\n",
2037 			sc->ps_flags & (PS_WAIT_FOR_BEACON |
2038 					PS_WAIT_FOR_CAB |
2039 					PS_WAIT_FOR_PSPOLL_DATA |
2040 					PS_WAIT_FOR_TX_ACK));
2041 	}
2042 
2043 	q = skb_get_queue_mapping(skb);
2044 	if (txq == sc->tx.txq_map[q]) {
2045 		if (WARN_ON(--txq->pending_frames < 0))
2046 			txq->pending_frames = 0;
2047 
2048 		if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2049 			ieee80211_wake_queue(sc->hw, q);
2050 			txq->stopped = false;
2051 		}
2052 	}
2053 
2054 	__skb_queue_tail(&txq->complete_q, skb);
2055 }
2056 
ath_tx_complete_buf(struct ath_softc * sc,struct ath_buf * bf,struct ath_txq * txq,struct list_head * bf_q,struct ath_tx_status * ts,int txok)2057 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2058 				struct ath_txq *txq, struct list_head *bf_q,
2059 				struct ath_tx_status *ts, int txok)
2060 {
2061 	struct sk_buff *skb = bf->bf_mpdu;
2062 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2063 	unsigned long flags;
2064 	int tx_flags = 0;
2065 
2066 	if (!txok)
2067 		tx_flags |= ATH_TX_ERROR;
2068 
2069 	if (ts->ts_status & ATH9K_TXERR_FILT)
2070 		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2071 
2072 	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2073 	bf->bf_buf_addr = 0;
2074 
2075 	if (bf->bf_state.bfs_paprd) {
2076 		if (time_after(jiffies,
2077 				bf->bf_state.bfs_paprd_timestamp +
2078 				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2079 			dev_kfree_skb_any(skb);
2080 		else
2081 			complete(&sc->paprd_complete);
2082 	} else {
2083 		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2084 		ath_tx_complete(sc, skb, tx_flags, txq);
2085 	}
2086 	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2087 	 * accidentally reference it later.
2088 	 */
2089 	bf->bf_mpdu = NULL;
2090 
2091 	/*
2092 	 * Return the list of ath_buf of this mpdu to free queue
2093 	 */
2094 	spin_lock_irqsave(&sc->tx.txbuflock, flags);
2095 	list_splice_tail_init(bf_q, &sc->tx.txbuf);
2096 	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2097 }
2098 
ath_tx_rc_status(struct ath_softc * sc,struct ath_buf * bf,struct ath_tx_status * ts,int nframes,int nbad,int txok)2099 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2100 			     struct ath_tx_status *ts, int nframes, int nbad,
2101 			     int txok)
2102 {
2103 	struct sk_buff *skb = bf->bf_mpdu;
2104 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2105 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2106 	struct ieee80211_hw *hw = sc->hw;
2107 	struct ath_hw *ah = sc->sc_ah;
2108 	u8 i, tx_rateindex;
2109 
2110 	if (txok)
2111 		tx_info->status.ack_signal = ts->ts_rssi;
2112 
2113 	tx_rateindex = ts->ts_rateindex;
2114 	WARN_ON(tx_rateindex >= hw->max_rates);
2115 
2116 	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2117 		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2118 
2119 		BUG_ON(nbad > nframes);
2120 	}
2121 	tx_info->status.ampdu_len = nframes;
2122 	tx_info->status.ampdu_ack_len = nframes - nbad;
2123 
2124 	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2125 	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2126 		/*
2127 		 * If an underrun error is seen assume it as an excessive
2128 		 * retry only if max frame trigger level has been reached
2129 		 * (2 KB for single stream, and 4 KB for dual stream).
2130 		 * Adjust the long retry as if the frame was tried
2131 		 * hw->max_rate_tries times to affect how rate control updates
2132 		 * PER for the failed rate.
2133 		 * In case of congestion on the bus penalizing this type of
2134 		 * underruns should help hardware actually transmit new frames
2135 		 * successfully by eventually preferring slower rates.
2136 		 * This itself should also alleviate congestion on the bus.
2137 		 */
2138 		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2139 		                             ATH9K_TX_DELIM_UNDERRUN)) &&
2140 		    ieee80211_is_data(hdr->frame_control) &&
2141 		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2142 			tx_info->status.rates[tx_rateindex].count =
2143 				hw->max_rate_tries;
2144 	}
2145 
2146 	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2147 		tx_info->status.rates[i].count = 0;
2148 		tx_info->status.rates[i].idx = -1;
2149 	}
2150 
2151 	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2152 }
2153 
ath_tx_process_buffer(struct ath_softc * sc,struct ath_txq * txq,struct ath_tx_status * ts,struct ath_buf * bf,struct list_head * bf_head)2154 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2155 				  struct ath_tx_status *ts, struct ath_buf *bf,
2156 				  struct list_head *bf_head)
2157 {
2158 	int txok;
2159 
2160 	txq->axq_depth--;
2161 	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2162 	txq->axq_tx_inprogress = false;
2163 	if (bf_is_ampdu_not_probing(bf))
2164 		txq->axq_ampdu_depth--;
2165 
2166 	if (!bf_isampdu(bf)) {
2167 		ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
2168 		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
2169 	} else
2170 		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2171 
2172 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2173 		ath_txq_schedule(sc, txq);
2174 }
2175 
ath_tx_processq(struct ath_softc * sc,struct ath_txq * txq)2176 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2177 {
2178 	struct ath_hw *ah = sc->sc_ah;
2179 	struct ath_common *common = ath9k_hw_common(ah);
2180 	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2181 	struct list_head bf_head;
2182 	struct ath_desc *ds;
2183 	struct ath_tx_status ts;
2184 	int status;
2185 
2186 	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
2187 		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2188 		txq->axq_link);
2189 
2190 	ath_txq_lock(sc, txq);
2191 	for (;;) {
2192 		if (work_pending(&sc->hw_reset_work))
2193 			break;
2194 
2195 		if (list_empty(&txq->axq_q)) {
2196 			txq->axq_link = NULL;
2197 			if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2198 				ath_txq_schedule(sc, txq);
2199 			break;
2200 		}
2201 		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2202 
2203 		/*
2204 		 * There is a race condition that a BH gets scheduled
2205 		 * after sw writes TxE and before hw re-load the last
2206 		 * descriptor to get the newly chained one.
2207 		 * Software must keep the last DONE descriptor as a
2208 		 * holding descriptor - software does so by marking
2209 		 * it with the STALE flag.
2210 		 */
2211 		bf_held = NULL;
2212 		if (bf->bf_stale) {
2213 			bf_held = bf;
2214 			if (list_is_last(&bf_held->list, &txq->axq_q))
2215 				break;
2216 
2217 			bf = list_entry(bf_held->list.next, struct ath_buf,
2218 					list);
2219 		}
2220 
2221 		lastbf = bf->bf_lastbf;
2222 		ds = lastbf->bf_desc;
2223 
2224 		memset(&ts, 0, sizeof(ts));
2225 		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2226 		if (status == -EINPROGRESS)
2227 			break;
2228 
2229 		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2230 
2231 		/*
2232 		 * Remove ath_buf's of the same transmit unit from txq,
2233 		 * however leave the last descriptor back as the holding
2234 		 * descriptor for hw.
2235 		 */
2236 		lastbf->bf_stale = true;
2237 		INIT_LIST_HEAD(&bf_head);
2238 		if (!list_is_singular(&lastbf->list))
2239 			list_cut_position(&bf_head,
2240 				&txq->axq_q, lastbf->list.prev);
2241 
2242 		if (bf_held) {
2243 			list_del(&bf_held->list);
2244 			ath_tx_return_buffer(sc, bf_held);
2245 		}
2246 
2247 		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2248 	}
2249 	ath_txq_unlock_complete(sc, txq);
2250 }
2251 
ath_tx_complete_poll_work(struct work_struct * work)2252 static void ath_tx_complete_poll_work(struct work_struct *work)
2253 {
2254 	struct ath_softc *sc = container_of(work, struct ath_softc,
2255 			tx_complete_work.work);
2256 	struct ath_txq *txq;
2257 	int i;
2258 	bool needreset = false;
2259 #ifdef CONFIG_ATH9K_DEBUGFS
2260 	sc->tx_complete_poll_work_seen++;
2261 #endif
2262 
2263 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2264 		if (ATH_TXQ_SETUP(sc, i)) {
2265 			txq = &sc->tx.txq[i];
2266 			ath_txq_lock(sc, txq);
2267 			if (txq->axq_depth) {
2268 				if (txq->axq_tx_inprogress) {
2269 					needreset = true;
2270 					ath_txq_unlock(sc, txq);
2271 					break;
2272 				} else {
2273 					txq->axq_tx_inprogress = true;
2274 				}
2275 			}
2276 			ath_txq_unlock_complete(sc, txq);
2277 		}
2278 
2279 	if (needreset) {
2280 		ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
2281 			"tx hung, resetting the chip\n");
2282 		RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
2283 		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
2284 	}
2285 
2286 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2287 			msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2288 }
2289 
2290 
2291 
ath_tx_tasklet(struct ath_softc * sc)2292 void ath_tx_tasklet(struct ath_softc *sc)
2293 {
2294 	struct ath_hw *ah = sc->sc_ah;
2295 	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
2296 	int i;
2297 
2298 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2299 		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2300 			ath_tx_processq(sc, &sc->tx.txq[i]);
2301 	}
2302 }
2303 
ath_tx_edma_tasklet(struct ath_softc * sc)2304 void ath_tx_edma_tasklet(struct ath_softc *sc)
2305 {
2306 	struct ath_tx_status ts;
2307 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2308 	struct ath_hw *ah = sc->sc_ah;
2309 	struct ath_txq *txq;
2310 	struct ath_buf *bf, *lastbf;
2311 	struct list_head bf_head;
2312 	int status;
2313 
2314 	for (;;) {
2315 		if (work_pending(&sc->hw_reset_work))
2316 			break;
2317 
2318 		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2319 		if (status == -EINPROGRESS)
2320 			break;
2321 		if (status == -EIO) {
2322 			ath_dbg(common, XMIT, "Error processing tx status\n");
2323 			break;
2324 		}
2325 
2326 		/* Process beacon completions separately */
2327 		if (ts.qid == sc->beacon.beaconq) {
2328 			sc->beacon.tx_processed = true;
2329 			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2330 			continue;
2331 		}
2332 
2333 		txq = &sc->tx.txq[ts.qid];
2334 
2335 		ath_txq_lock(sc, txq);
2336 
2337 		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2338 			ath_txq_unlock(sc, txq);
2339 			return;
2340 		}
2341 
2342 		bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2343 				      struct ath_buf, list);
2344 		lastbf = bf->bf_lastbf;
2345 
2346 		INIT_LIST_HEAD(&bf_head);
2347 		list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2348 				  &lastbf->list);
2349 
2350 		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2351 			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2352 
2353 			if (!list_empty(&txq->axq_q)) {
2354 				struct list_head bf_q;
2355 
2356 				INIT_LIST_HEAD(&bf_q);
2357 				txq->axq_link = NULL;
2358 				list_splice_tail_init(&txq->axq_q, &bf_q);
2359 				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2360 			}
2361 		}
2362 
2363 		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2364 		ath_txq_unlock_complete(sc, txq);
2365 	}
2366 }
2367 
2368 /*****************/
2369 /* Init, Cleanup */
2370 /*****************/
2371 
ath_txstatus_setup(struct ath_softc * sc,int size)2372 static int ath_txstatus_setup(struct ath_softc *sc, int size)
2373 {
2374 	struct ath_descdma *dd = &sc->txsdma;
2375 	u8 txs_len = sc->sc_ah->caps.txs_len;
2376 
2377 	dd->dd_desc_len = size * txs_len;
2378 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2379 					 &dd->dd_desc_paddr, GFP_KERNEL);
2380 	if (!dd->dd_desc)
2381 		return -ENOMEM;
2382 
2383 	return 0;
2384 }
2385 
ath_tx_edma_init(struct ath_softc * sc)2386 static int ath_tx_edma_init(struct ath_softc *sc)
2387 {
2388 	int err;
2389 
2390 	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2391 	if (!err)
2392 		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2393 					  sc->txsdma.dd_desc_paddr,
2394 					  ATH_TXSTATUS_RING_SIZE);
2395 
2396 	return err;
2397 }
2398 
ath_tx_edma_cleanup(struct ath_softc * sc)2399 static void ath_tx_edma_cleanup(struct ath_softc *sc)
2400 {
2401 	struct ath_descdma *dd = &sc->txsdma;
2402 
2403 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2404 			  dd->dd_desc_paddr);
2405 }
2406 
ath_tx_init(struct ath_softc * sc,int nbufs)2407 int ath_tx_init(struct ath_softc *sc, int nbufs)
2408 {
2409 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2410 	int error = 0;
2411 
2412 	spin_lock_init(&sc->tx.txbuflock);
2413 
2414 	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2415 				  "tx", nbufs, 1, 1);
2416 	if (error != 0) {
2417 		ath_err(common,
2418 			"Failed to allocate tx descriptors: %d\n", error);
2419 		goto err;
2420 	}
2421 
2422 	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2423 				  "beacon", ATH_BCBUF, 1, 1);
2424 	if (error != 0) {
2425 		ath_err(common,
2426 			"Failed to allocate beacon descriptors: %d\n", error);
2427 		goto err;
2428 	}
2429 
2430 	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2431 
2432 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2433 		error = ath_tx_edma_init(sc);
2434 		if (error)
2435 			goto err;
2436 	}
2437 
2438 err:
2439 	if (error != 0)
2440 		ath_tx_cleanup(sc);
2441 
2442 	return error;
2443 }
2444 
ath_tx_cleanup(struct ath_softc * sc)2445 void ath_tx_cleanup(struct ath_softc *sc)
2446 {
2447 	if (sc->beacon.bdma.dd_desc_len != 0)
2448 		ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2449 
2450 	if (sc->tx.txdma.dd_desc_len != 0)
2451 		ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2452 
2453 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2454 		ath_tx_edma_cleanup(sc);
2455 }
2456 
ath_tx_node_init(struct ath_softc * sc,struct ath_node * an)2457 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2458 {
2459 	struct ath_atx_tid *tid;
2460 	struct ath_atx_ac *ac;
2461 	int tidno, acno;
2462 
2463 	for (tidno = 0, tid = &an->tid[tidno];
2464 	     tidno < WME_NUM_TID;
2465 	     tidno++, tid++) {
2466 		tid->an        = an;
2467 		tid->tidno     = tidno;
2468 		tid->seq_start = tid->seq_next = 0;
2469 		tid->baw_size  = WME_MAX_BA;
2470 		tid->baw_head  = tid->baw_tail = 0;
2471 		tid->sched     = false;
2472 		tid->paused    = false;
2473 		tid->state &= ~AGGR_CLEANUP;
2474 		__skb_queue_head_init(&tid->buf_q);
2475 		acno = TID_TO_WME_AC(tidno);
2476 		tid->ac = &an->ac[acno];
2477 		tid->state &= ~AGGR_ADDBA_COMPLETE;
2478 		tid->state &= ~AGGR_ADDBA_PROGRESS;
2479 	}
2480 
2481 	for (acno = 0, ac = &an->ac[acno];
2482 	     acno < WME_NUM_AC; acno++, ac++) {
2483 		ac->sched    = false;
2484 		ac->clear_ps_filter = true;
2485 		ac->txq = sc->tx.txq_map[acno];
2486 		INIT_LIST_HEAD(&ac->tid_q);
2487 	}
2488 }
2489 
ath_tx_node_cleanup(struct ath_softc * sc,struct ath_node * an)2490 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2491 {
2492 	struct ath_atx_ac *ac;
2493 	struct ath_atx_tid *tid;
2494 	struct ath_txq *txq;
2495 	int tidno;
2496 
2497 	for (tidno = 0, tid = &an->tid[tidno];
2498 	     tidno < WME_NUM_TID; tidno++, tid++) {
2499 
2500 		ac = tid->ac;
2501 		txq = ac->txq;
2502 
2503 		ath_txq_lock(sc, txq);
2504 
2505 		if (tid->sched) {
2506 			list_del(&tid->list);
2507 			tid->sched = false;
2508 		}
2509 
2510 		if (ac->sched) {
2511 			list_del(&ac->list);
2512 			tid->ac->sched = false;
2513 		}
2514 
2515 		ath_tid_drain(sc, txq, tid);
2516 		tid->state &= ~AGGR_ADDBA_COMPLETE;
2517 		tid->state &= ~AGGR_CLEANUP;
2518 
2519 		ath_txq_unlock(sc, txq);
2520 	}
2521 }
2522