1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "ath9k.h"
18 #include "ar9003_mac.h"
19 
20 #define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
21 
ath_is_alt_ant_ratio_better(int alt_ratio,int maxdelta,int mindelta,int main_rssi_avg,int alt_rssi_avg,int pkt_count)22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
23 					       int mindelta, int main_rssi_avg,
24 					       int alt_rssi_avg, int pkt_count)
25 {
26 	return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
27 		(alt_rssi_avg > main_rssi_avg + maxdelta)) ||
28 		(alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
29 }
30 
ath9k_check_auto_sleep(struct ath_softc * sc)31 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
32 {
33 	return sc->ps_enabled &&
34 	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
35 }
36 
37 /*
38  * Setup and link descriptors.
39  *
40  * 11N: we can no longer afford to self link the last descriptor.
41  * MAC acknowledges BA status as long as it copies frames to host
42  * buffer (or rx fifo). This can incorrectly acknowledge packets
43  * to a sender if last desc is self-linked.
44  */
ath_rx_buf_link(struct ath_softc * sc,struct ath_buf * bf)45 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
46 {
47 	struct ath_hw *ah = sc->sc_ah;
48 	struct ath_common *common = ath9k_hw_common(ah);
49 	struct ath_desc *ds;
50 	struct sk_buff *skb;
51 
52 	ATH_RXBUF_RESET(bf);
53 
54 	ds = bf->bf_desc;
55 	ds->ds_link = 0; /* link to null */
56 	ds->ds_data = bf->bf_buf_addr;
57 
58 	/* virtual addr of the beginning of the buffer. */
59 	skb = bf->bf_mpdu;
60 	BUG_ON(skb == NULL);
61 	ds->ds_vdata = skb->data;
62 
63 	/*
64 	 * setup rx descriptors. The rx_bufsize here tells the hardware
65 	 * how much data it can DMA to us and that we are prepared
66 	 * to process
67 	 */
68 	ath9k_hw_setuprxdesc(ah, ds,
69 			     common->rx_bufsize,
70 			     0);
71 
72 	if (sc->rx.rxlink == NULL)
73 		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
74 	else
75 		*sc->rx.rxlink = bf->bf_daddr;
76 
77 	sc->rx.rxlink = &ds->ds_link;
78 	ath9k_hw_rxena(ah);
79 }
80 
ath_setdefantenna(struct ath_softc * sc,u32 antenna)81 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
82 {
83 	/* XXX block beacon interrupts */
84 	ath9k_hw_setantenna(sc->sc_ah, antenna);
85 	sc->rx.defant = antenna;
86 	sc->rx.rxotherant = 0;
87 }
88 
ath_opmode_init(struct ath_softc * sc)89 static void ath_opmode_init(struct ath_softc *sc)
90 {
91 	struct ath_hw *ah = sc->sc_ah;
92 	struct ath_common *common = ath9k_hw_common(ah);
93 
94 	u32 rfilt, mfilt[2];
95 
96 	/* configure rx filter */
97 	rfilt = ath_calcrxfilter(sc);
98 	ath9k_hw_setrxfilter(ah, rfilt);
99 
100 	/* configure bssid mask */
101 	ath_hw_setbssidmask(common);
102 
103 	/* configure operational mode */
104 	ath9k_hw_setopmode(ah);
105 
106 	/* calculate and install multicast filter */
107 	mfilt[0] = mfilt[1] = ~0;
108 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
109 }
110 
ath_rx_edma_buf_link(struct ath_softc * sc,enum ath9k_rx_qtype qtype)111 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
112 				 enum ath9k_rx_qtype qtype)
113 {
114 	struct ath_hw *ah = sc->sc_ah;
115 	struct ath_rx_edma *rx_edma;
116 	struct sk_buff *skb;
117 	struct ath_buf *bf;
118 
119 	rx_edma = &sc->rx.rx_edma[qtype];
120 	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
121 		return false;
122 
123 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
124 	list_del_init(&bf->list);
125 
126 	skb = bf->bf_mpdu;
127 
128 	ATH_RXBUF_RESET(bf);
129 	memset(skb->data, 0, ah->caps.rx_status_len);
130 	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
131 				ah->caps.rx_status_len, DMA_TO_DEVICE);
132 
133 	SKB_CB_ATHBUF(skb) = bf;
134 	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
135 	skb_queue_tail(&rx_edma->rx_fifo, skb);
136 
137 	return true;
138 }
139 
ath_rx_addbuffer_edma(struct ath_softc * sc,enum ath9k_rx_qtype qtype,int size)140 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
141 				  enum ath9k_rx_qtype qtype, int size)
142 {
143 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
144 	u32 nbuf = 0;
145 
146 	if (list_empty(&sc->rx.rxbuf)) {
147 		ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
148 		return;
149 	}
150 
151 	while (!list_empty(&sc->rx.rxbuf)) {
152 		nbuf++;
153 
154 		if (!ath_rx_edma_buf_link(sc, qtype))
155 			break;
156 
157 		if (nbuf >= size)
158 			break;
159 	}
160 }
161 
ath_rx_remove_buffer(struct ath_softc * sc,enum ath9k_rx_qtype qtype)162 static void ath_rx_remove_buffer(struct ath_softc *sc,
163 				 enum ath9k_rx_qtype qtype)
164 {
165 	struct ath_buf *bf;
166 	struct ath_rx_edma *rx_edma;
167 	struct sk_buff *skb;
168 
169 	rx_edma = &sc->rx.rx_edma[qtype];
170 
171 	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
172 		bf = SKB_CB_ATHBUF(skb);
173 		BUG_ON(!bf);
174 		list_add_tail(&bf->list, &sc->rx.rxbuf);
175 	}
176 }
177 
ath_rx_edma_cleanup(struct ath_softc * sc)178 static void ath_rx_edma_cleanup(struct ath_softc *sc)
179 {
180 	struct ath_buf *bf;
181 
182 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
183 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
184 
185 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
186 		if (bf->bf_mpdu)
187 			dev_kfree_skb_any(bf->bf_mpdu);
188 	}
189 
190 	INIT_LIST_HEAD(&sc->rx.rxbuf);
191 
192 	kfree(sc->rx.rx_bufptr);
193 	sc->rx.rx_bufptr = NULL;
194 }
195 
ath_rx_edma_init_queue(struct ath_rx_edma * rx_edma,int size)196 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
197 {
198 	skb_queue_head_init(&rx_edma->rx_fifo);
199 	skb_queue_head_init(&rx_edma->rx_buffers);
200 	rx_edma->rx_fifo_hwsize = size;
201 }
202 
ath_rx_edma_init(struct ath_softc * sc,int nbufs)203 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
204 {
205 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
206 	struct ath_hw *ah = sc->sc_ah;
207 	struct sk_buff *skb;
208 	struct ath_buf *bf;
209 	int error = 0, i;
210 	u32 size;
211 
212 	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
213 				    ah->caps.rx_status_len);
214 
215 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
216 			       ah->caps.rx_lp_qdepth);
217 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
218 			       ah->caps.rx_hp_qdepth);
219 
220 	size = sizeof(struct ath_buf) * nbufs;
221 	bf = kzalloc(size, GFP_KERNEL);
222 	if (!bf)
223 		return -ENOMEM;
224 
225 	INIT_LIST_HEAD(&sc->rx.rxbuf);
226 	sc->rx.rx_bufptr = bf;
227 
228 	for (i = 0; i < nbufs; i++, bf++) {
229 		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
230 		if (!skb) {
231 			error = -ENOMEM;
232 			goto rx_init_fail;
233 		}
234 
235 		memset(skb->data, 0, common->rx_bufsize);
236 		bf->bf_mpdu = skb;
237 
238 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
239 						 common->rx_bufsize,
240 						 DMA_BIDIRECTIONAL);
241 		if (unlikely(dma_mapping_error(sc->dev,
242 						bf->bf_buf_addr))) {
243 				dev_kfree_skb_any(skb);
244 				bf->bf_mpdu = NULL;
245 				bf->bf_buf_addr = 0;
246 				ath_err(common,
247 					"dma_mapping_error() on RX init\n");
248 				error = -ENOMEM;
249 				goto rx_init_fail;
250 		}
251 
252 		list_add_tail(&bf->list, &sc->rx.rxbuf);
253 	}
254 
255 	return 0;
256 
257 rx_init_fail:
258 	ath_rx_edma_cleanup(sc);
259 	return error;
260 }
261 
ath_edma_start_recv(struct ath_softc * sc)262 static void ath_edma_start_recv(struct ath_softc *sc)
263 {
264 	spin_lock_bh(&sc->rx.rxbuflock);
265 
266 	ath9k_hw_rxena(sc->sc_ah);
267 
268 	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
269 			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
270 
271 	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
272 			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
273 
274 	ath_opmode_init(sc);
275 
276 	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
277 
278 	spin_unlock_bh(&sc->rx.rxbuflock);
279 }
280 
ath_edma_stop_recv(struct ath_softc * sc)281 static void ath_edma_stop_recv(struct ath_softc *sc)
282 {
283 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
284 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
285 }
286 
ath_rx_init(struct ath_softc * sc,int nbufs)287 int ath_rx_init(struct ath_softc *sc, int nbufs)
288 {
289 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
290 	struct sk_buff *skb;
291 	struct ath_buf *bf;
292 	int error = 0;
293 
294 	spin_lock_init(&sc->sc_pcu_lock);
295 	sc->sc_flags &= ~SC_OP_RXFLUSH;
296 	spin_lock_init(&sc->rx.rxbuflock);
297 
298 	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
299 			     sc->sc_ah->caps.rx_status_len;
300 
301 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
302 		return ath_rx_edma_init(sc, nbufs);
303 	} else {
304 		ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
305 			common->cachelsz, common->rx_bufsize);
306 
307 		/* Initialize rx descriptors */
308 
309 		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
310 				"rx", nbufs, 1, 0);
311 		if (error != 0) {
312 			ath_err(common,
313 				"failed to allocate rx descriptors: %d\n",
314 				error);
315 			goto err;
316 		}
317 
318 		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
319 			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
320 					      GFP_KERNEL);
321 			if (skb == NULL) {
322 				error = -ENOMEM;
323 				goto err;
324 			}
325 
326 			bf->bf_mpdu = skb;
327 			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
328 					common->rx_bufsize,
329 					DMA_FROM_DEVICE);
330 			if (unlikely(dma_mapping_error(sc->dev,
331 							bf->bf_buf_addr))) {
332 				dev_kfree_skb_any(skb);
333 				bf->bf_mpdu = NULL;
334 				bf->bf_buf_addr = 0;
335 				ath_err(common,
336 					"dma_mapping_error() on RX init\n");
337 				error = -ENOMEM;
338 				goto err;
339 			}
340 		}
341 		sc->rx.rxlink = NULL;
342 	}
343 
344 err:
345 	if (error)
346 		ath_rx_cleanup(sc);
347 
348 	return error;
349 }
350 
ath_rx_cleanup(struct ath_softc * sc)351 void ath_rx_cleanup(struct ath_softc *sc)
352 {
353 	struct ath_hw *ah = sc->sc_ah;
354 	struct ath_common *common = ath9k_hw_common(ah);
355 	struct sk_buff *skb;
356 	struct ath_buf *bf;
357 
358 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
359 		ath_rx_edma_cleanup(sc);
360 		return;
361 	} else {
362 		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
363 			skb = bf->bf_mpdu;
364 			if (skb) {
365 				dma_unmap_single(sc->dev, bf->bf_buf_addr,
366 						common->rx_bufsize,
367 						DMA_FROM_DEVICE);
368 				dev_kfree_skb(skb);
369 				bf->bf_buf_addr = 0;
370 				bf->bf_mpdu = NULL;
371 			}
372 		}
373 
374 		if (sc->rx.rxdma.dd_desc_len != 0)
375 			ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
376 	}
377 }
378 
379 /*
380  * Calculate the receive filter according to the
381  * operating mode and state:
382  *
383  * o always accept unicast, broadcast, and multicast traffic
384  * o maintain current state of phy error reception (the hal
385  *   may enable phy error frames for noise immunity work)
386  * o probe request frames are accepted only when operating in
387  *   hostap, adhoc, or monitor modes
388  * o enable promiscuous mode according to the interface state
389  * o accept beacons:
390  *   - when operating in adhoc mode so the 802.11 layer creates
391  *     node table entries for peers,
392  *   - when operating in station mode for collecting rssi data when
393  *     the station is otherwise quiet, or
394  *   - when operating as a repeater so we see repeater-sta beacons
395  *   - when scanning
396  */
397 
ath_calcrxfilter(struct ath_softc * sc)398 u32 ath_calcrxfilter(struct ath_softc *sc)
399 {
400 #define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
401 
402 	u32 rfilt;
403 
404 	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
405 		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
406 		| ATH9K_RX_FILTER_MCAST;
407 
408 	if (sc->rx.rxfilter & FIF_PROBE_REQ)
409 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
410 
411 	/*
412 	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
413 	 * mode interface or when in monitor mode. AP mode does not need this
414 	 * since it receives all in-BSS frames anyway.
415 	 */
416 	if (sc->sc_ah->is_monitoring)
417 		rfilt |= ATH9K_RX_FILTER_PROM;
418 
419 	if (sc->rx.rxfilter & FIF_CONTROL)
420 		rfilt |= ATH9K_RX_FILTER_CONTROL;
421 
422 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
423 	    (sc->nvifs <= 1) &&
424 	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
425 		rfilt |= ATH9K_RX_FILTER_MYBEACON;
426 	else
427 		rfilt |= ATH9K_RX_FILTER_BEACON;
428 
429 	if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) ||
430 	    AR_SREV_9285_12_OR_LATER(sc->sc_ah)) &&
431 	    (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
432 	    (sc->rx.rxfilter & FIF_PSPOLL))
433 		rfilt |= ATH9K_RX_FILTER_PSPOLL;
434 
435 	if (conf_is_ht(&sc->hw->conf))
436 		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
437 
438 	if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
439 		/* The following may also be needed for other older chips */
440 		if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
441 			rfilt |= ATH9K_RX_FILTER_PROM;
442 		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
443 	}
444 
445 	return rfilt;
446 
447 #undef RX_FILTER_PRESERVE
448 }
449 
ath_startrecv(struct ath_softc * sc)450 int ath_startrecv(struct ath_softc *sc)
451 {
452 	struct ath_hw *ah = sc->sc_ah;
453 	struct ath_buf *bf, *tbf;
454 
455 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
456 		ath_edma_start_recv(sc);
457 		return 0;
458 	}
459 
460 	spin_lock_bh(&sc->rx.rxbuflock);
461 	if (list_empty(&sc->rx.rxbuf))
462 		goto start_recv;
463 
464 	sc->rx.rxlink = NULL;
465 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
466 		ath_rx_buf_link(sc, bf);
467 	}
468 
469 	/* We could have deleted elements so the list may be empty now */
470 	if (list_empty(&sc->rx.rxbuf))
471 		goto start_recv;
472 
473 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
474 	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
475 	ath9k_hw_rxena(ah);
476 
477 start_recv:
478 	ath_opmode_init(sc);
479 	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
480 
481 	spin_unlock_bh(&sc->rx.rxbuflock);
482 
483 	return 0;
484 }
485 
ath_stoprecv(struct ath_softc * sc)486 bool ath_stoprecv(struct ath_softc *sc)
487 {
488 	struct ath_hw *ah = sc->sc_ah;
489 	bool stopped, reset = false;
490 
491 	spin_lock_bh(&sc->rx.rxbuflock);
492 	ath9k_hw_abortpcurecv(ah);
493 	ath9k_hw_setrxfilter(ah, 0);
494 	stopped = ath9k_hw_stopdmarecv(ah, &reset);
495 
496 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
497 		ath_edma_stop_recv(sc);
498 	else
499 		sc->rx.rxlink = NULL;
500 	spin_unlock_bh(&sc->rx.rxbuflock);
501 
502 	if (!(ah->ah_flags & AH_UNPLUGGED) &&
503 	    unlikely(!stopped)) {
504 		ath_err(ath9k_hw_common(sc->sc_ah),
505 			"Could not stop RX, we could be "
506 			"confusing the DMA engine when we start RX up\n");
507 		ATH_DBG_WARN_ON_ONCE(!stopped);
508 	}
509 	return stopped && !reset;
510 }
511 
ath_flushrecv(struct ath_softc * sc)512 void ath_flushrecv(struct ath_softc *sc)
513 {
514 	sc->sc_flags |= SC_OP_RXFLUSH;
515 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
516 		ath_rx_tasklet(sc, 1, true);
517 	ath_rx_tasklet(sc, 1, false);
518 	sc->sc_flags &= ~SC_OP_RXFLUSH;
519 }
520 
ath_beacon_dtim_pending_cab(struct sk_buff * skb)521 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
522 {
523 	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
524 	struct ieee80211_mgmt *mgmt;
525 	u8 *pos, *end, id, elen;
526 	struct ieee80211_tim_ie *tim;
527 
528 	mgmt = (struct ieee80211_mgmt *)skb->data;
529 	pos = mgmt->u.beacon.variable;
530 	end = skb->data + skb->len;
531 
532 	while (pos + 2 < end) {
533 		id = *pos++;
534 		elen = *pos++;
535 		if (pos + elen > end)
536 			break;
537 
538 		if (id == WLAN_EID_TIM) {
539 			if (elen < sizeof(*tim))
540 				break;
541 			tim = (struct ieee80211_tim_ie *) pos;
542 			if (tim->dtim_count != 0)
543 				break;
544 			return tim->bitmap_ctrl & 0x01;
545 		}
546 
547 		pos += elen;
548 	}
549 
550 	return false;
551 }
552 
ath_rx_ps_beacon(struct ath_softc * sc,struct sk_buff * skb)553 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
554 {
555 	struct ieee80211_mgmt *mgmt;
556 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
557 
558 	if (skb->len < 24 + 8 + 2 + 2)
559 		return;
560 
561 	mgmt = (struct ieee80211_mgmt *)skb->data;
562 	if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
563 		/* TODO:  This doesn't work well if you have stations
564 		 * associated to two different APs because curbssid
565 		 * is just the last AP that any of the stations associated
566 		 * with.
567 		 */
568 		return; /* not from our current AP */
569 	}
570 
571 	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
572 
573 	if (sc->ps_flags & PS_BEACON_SYNC) {
574 		sc->ps_flags &= ~PS_BEACON_SYNC;
575 		ath_dbg(common, ATH_DBG_PS,
576 			"Reconfigure Beacon timers based on timestamp from the AP\n");
577 		ath_beacon_config(sc, NULL);
578 	}
579 
580 	if (ath_beacon_dtim_pending_cab(skb)) {
581 		/*
582 		 * Remain awake waiting for buffered broadcast/multicast
583 		 * frames. If the last broadcast/multicast frame is not
584 		 * received properly, the next beacon frame will work as
585 		 * a backup trigger for returning into NETWORK SLEEP state,
586 		 * so we are waiting for it as well.
587 		 */
588 		ath_dbg(common, ATH_DBG_PS,
589 			"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
590 		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
591 		return;
592 	}
593 
594 	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
595 		/*
596 		 * This can happen if a broadcast frame is dropped or the AP
597 		 * fails to send a frame indicating that all CAB frames have
598 		 * been delivered.
599 		 */
600 		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
601 		ath_dbg(common, ATH_DBG_PS,
602 			"PS wait for CAB frames timed out\n");
603 	}
604 }
605 
ath_rx_ps(struct ath_softc * sc,struct sk_buff * skb)606 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
607 {
608 	struct ieee80211_hdr *hdr;
609 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
610 
611 	hdr = (struct ieee80211_hdr *)skb->data;
612 
613 	/* Process Beacon and CAB receive in PS state */
614 	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
615 	    && ieee80211_is_beacon(hdr->frame_control))
616 		ath_rx_ps_beacon(sc, skb);
617 	else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
618 		 (ieee80211_is_data(hdr->frame_control) ||
619 		  ieee80211_is_action(hdr->frame_control)) &&
620 		 is_multicast_ether_addr(hdr->addr1) &&
621 		 !ieee80211_has_moredata(hdr->frame_control)) {
622 		/*
623 		 * No more broadcast/multicast frames to be received at this
624 		 * point.
625 		 */
626 		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
627 		ath_dbg(common, ATH_DBG_PS,
628 			"All PS CAB frames received, back to sleep\n");
629 	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
630 		   !is_multicast_ether_addr(hdr->addr1) &&
631 		   !ieee80211_has_morefrags(hdr->frame_control)) {
632 		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
633 		ath_dbg(common, ATH_DBG_PS,
634 			"Going back to sleep after having received PS-Poll data (0x%lx)\n",
635 			sc->ps_flags & (PS_WAIT_FOR_BEACON |
636 					PS_WAIT_FOR_CAB |
637 					PS_WAIT_FOR_PSPOLL_DATA |
638 					PS_WAIT_FOR_TX_ACK));
639 	}
640 }
641 
ath_edma_get_buffers(struct ath_softc * sc,enum ath9k_rx_qtype qtype)642 static bool ath_edma_get_buffers(struct ath_softc *sc,
643 				 enum ath9k_rx_qtype qtype)
644 {
645 	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
646 	struct ath_hw *ah = sc->sc_ah;
647 	struct ath_common *common = ath9k_hw_common(ah);
648 	struct sk_buff *skb;
649 	struct ath_buf *bf;
650 	int ret;
651 
652 	skb = skb_peek(&rx_edma->rx_fifo);
653 	if (!skb)
654 		return false;
655 
656 	bf = SKB_CB_ATHBUF(skb);
657 	BUG_ON(!bf);
658 
659 	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
660 				common->rx_bufsize, DMA_FROM_DEVICE);
661 
662 	ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
663 	if (ret == -EINPROGRESS) {
664 		/*let device gain the buffer again*/
665 		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
666 				common->rx_bufsize, DMA_FROM_DEVICE);
667 		return false;
668 	}
669 
670 	__skb_unlink(skb, &rx_edma->rx_fifo);
671 	if (ret == -EINVAL) {
672 		/* corrupt descriptor, skip this one and the following one */
673 		list_add_tail(&bf->list, &sc->rx.rxbuf);
674 		ath_rx_edma_buf_link(sc, qtype);
675 		skb = skb_peek(&rx_edma->rx_fifo);
676 		if (!skb)
677 			return true;
678 
679 		bf = SKB_CB_ATHBUF(skb);
680 		BUG_ON(!bf);
681 
682 		__skb_unlink(skb, &rx_edma->rx_fifo);
683 		list_add_tail(&bf->list, &sc->rx.rxbuf);
684 		ath_rx_edma_buf_link(sc, qtype);
685 		return true;
686 	}
687 	skb_queue_tail(&rx_edma->rx_buffers, skb);
688 
689 	return true;
690 }
691 
ath_edma_get_next_rx_buf(struct ath_softc * sc,struct ath_rx_status * rs,enum ath9k_rx_qtype qtype)692 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
693 						struct ath_rx_status *rs,
694 						enum ath9k_rx_qtype qtype)
695 {
696 	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
697 	struct sk_buff *skb;
698 	struct ath_buf *bf;
699 
700 	while (ath_edma_get_buffers(sc, qtype));
701 	skb = __skb_dequeue(&rx_edma->rx_buffers);
702 	if (!skb)
703 		return NULL;
704 
705 	bf = SKB_CB_ATHBUF(skb);
706 	ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
707 	return bf;
708 }
709 
ath_get_next_rx_buf(struct ath_softc * sc,struct ath_rx_status * rs)710 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
711 					   struct ath_rx_status *rs)
712 {
713 	struct ath_hw *ah = sc->sc_ah;
714 	struct ath_common *common = ath9k_hw_common(ah);
715 	struct ath_desc *ds;
716 	struct ath_buf *bf;
717 	int ret;
718 
719 	if (list_empty(&sc->rx.rxbuf)) {
720 		sc->rx.rxlink = NULL;
721 		return NULL;
722 	}
723 
724 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
725 	ds = bf->bf_desc;
726 
727 	/*
728 	 * Must provide the virtual address of the current
729 	 * descriptor, the physical address, and the virtual
730 	 * address of the next descriptor in the h/w chain.
731 	 * This allows the HAL to look ahead to see if the
732 	 * hardware is done with a descriptor by checking the
733 	 * done bit in the following descriptor and the address
734 	 * of the current descriptor the DMA engine is working
735 	 * on.  All this is necessary because of our use of
736 	 * a self-linked list to avoid rx overruns.
737 	 */
738 	ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
739 	if (ret == -EINPROGRESS) {
740 		struct ath_rx_status trs;
741 		struct ath_buf *tbf;
742 		struct ath_desc *tds;
743 
744 		memset(&trs, 0, sizeof(trs));
745 		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
746 			sc->rx.rxlink = NULL;
747 			return NULL;
748 		}
749 
750 		tbf = list_entry(bf->list.next, struct ath_buf, list);
751 
752 		/*
753 		 * On some hardware the descriptor status words could
754 		 * get corrupted, including the done bit. Because of
755 		 * this, check if the next descriptor's done bit is
756 		 * set or not.
757 		 *
758 		 * If the next descriptor's done bit is set, the current
759 		 * descriptor has been corrupted. Force s/w to discard
760 		 * this descriptor and continue...
761 		 */
762 
763 		tds = tbf->bf_desc;
764 		ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
765 		if (ret == -EINPROGRESS)
766 			return NULL;
767 	}
768 
769 	if (!bf->bf_mpdu)
770 		return bf;
771 
772 	/*
773 	 * Synchronize the DMA transfer with CPU before
774 	 * 1. accessing the frame
775 	 * 2. requeueing the same buffer to h/w
776 	 */
777 	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
778 			common->rx_bufsize,
779 			DMA_FROM_DEVICE);
780 
781 	return bf;
782 }
783 
784 /* Assumes you've already done the endian to CPU conversion */
ath9k_rx_accept(struct ath_common * common,struct ieee80211_hdr * hdr,struct ieee80211_rx_status * rxs,struct ath_rx_status * rx_stats,bool * decrypt_error)785 static bool ath9k_rx_accept(struct ath_common *common,
786 			    struct ieee80211_hdr *hdr,
787 			    struct ieee80211_rx_status *rxs,
788 			    struct ath_rx_status *rx_stats,
789 			    bool *decrypt_error)
790 {
791 #define is_mc_or_valid_tkip_keyix ((is_mc ||			\
792 		(rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
793 		test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
794 
795 	struct ath_hw *ah = common->ah;
796 	__le16 fc;
797 	u8 rx_status_len = ah->caps.rx_status_len;
798 
799 	fc = hdr->frame_control;
800 
801 	if (!rx_stats->rs_datalen)
802 		return false;
803         /*
804          * rs_status follows rs_datalen so if rs_datalen is too large
805          * we can take a hint that hardware corrupted it, so ignore
806          * those frames.
807          */
808 	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
809 		return false;
810 
811 	/* Only use error bits from the last fragment */
812 	if (rx_stats->rs_more)
813 		return true;
814 
815 	/*
816 	 * The rx_stats->rs_status will not be set until the end of the
817 	 * chained descriptors so it can be ignored if rs_more is set. The
818 	 * rs_more will be false at the last element of the chained
819 	 * descriptors.
820 	 */
821 	if (rx_stats->rs_status != 0) {
822 		if (rx_stats->rs_status & ATH9K_RXERR_CRC)
823 			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
824 		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
825 			return false;
826 
827 		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
828 			*decrypt_error = true;
829 		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
830 			bool is_mc;
831 			/*
832 			 * The MIC error bit is only valid if the frame
833 			 * is not a control frame or fragment, and it was
834 			 * decrypted using a valid TKIP key.
835 			 */
836 			is_mc = !!is_multicast_ether_addr(hdr->addr1);
837 
838 			if (!ieee80211_is_ctl(fc) &&
839 			    !ieee80211_has_morefrags(fc) &&
840 			    !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
841 			    is_mc_or_valid_tkip_keyix)
842 				rxs->flag |= RX_FLAG_MMIC_ERROR;
843 			else
844 				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
845 		}
846 		/*
847 		 * Reject error frames with the exception of
848 		 * decryption and MIC failures. For monitor mode,
849 		 * we also ignore the CRC error.
850 		 */
851 		if (ah->is_monitoring) {
852 			if (rx_stats->rs_status &
853 			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
854 			      ATH9K_RXERR_CRC))
855 				return false;
856 		} else {
857 			if (rx_stats->rs_status &
858 			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
859 				return false;
860 			}
861 		}
862 	}
863 	return true;
864 }
865 
ath9k_process_rate(struct ath_common * common,struct ieee80211_hw * hw,struct ath_rx_status * rx_stats,struct ieee80211_rx_status * rxs)866 static int ath9k_process_rate(struct ath_common *common,
867 			      struct ieee80211_hw *hw,
868 			      struct ath_rx_status *rx_stats,
869 			      struct ieee80211_rx_status *rxs)
870 {
871 	struct ieee80211_supported_band *sband;
872 	enum ieee80211_band band;
873 	unsigned int i = 0;
874 
875 	band = hw->conf.channel->band;
876 	sband = hw->wiphy->bands[band];
877 
878 	if (rx_stats->rs_rate & 0x80) {
879 		/* HT rate */
880 		rxs->flag |= RX_FLAG_HT;
881 		if (rx_stats->rs_flags & ATH9K_RX_2040)
882 			rxs->flag |= RX_FLAG_40MHZ;
883 		if (rx_stats->rs_flags & ATH9K_RX_GI)
884 			rxs->flag |= RX_FLAG_SHORT_GI;
885 		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
886 		return 0;
887 	}
888 
889 	for (i = 0; i < sband->n_bitrates; i++) {
890 		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
891 			rxs->rate_idx = i;
892 			return 0;
893 		}
894 		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
895 			rxs->flag |= RX_FLAG_SHORTPRE;
896 			rxs->rate_idx = i;
897 			return 0;
898 		}
899 	}
900 
901 	/*
902 	 * No valid hardware bitrate found -- we should not get here
903 	 * because hardware has already validated this frame as OK.
904 	 */
905 	ath_dbg(common, ATH_DBG_XMIT,
906 		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
907 		rx_stats->rs_rate);
908 
909 	return -EINVAL;
910 }
911 
ath9k_process_rssi(struct ath_common * common,struct ieee80211_hw * hw,struct ieee80211_hdr * hdr,struct ath_rx_status * rx_stats)912 static void ath9k_process_rssi(struct ath_common *common,
913 			       struct ieee80211_hw *hw,
914 			       struct ieee80211_hdr *hdr,
915 			       struct ath_rx_status *rx_stats)
916 {
917 	struct ath_softc *sc = hw->priv;
918 	struct ath_hw *ah = common->ah;
919 	int last_rssi;
920 	__le16 fc;
921 
922 	if (ah->opmode != NL80211_IFTYPE_STATION)
923 		return;
924 
925 	fc = hdr->frame_control;
926 	if (!ieee80211_is_beacon(fc) ||
927 	    compare_ether_addr(hdr->addr3, common->curbssid)) {
928 		/* TODO:  This doesn't work well if you have stations
929 		 * associated to two different APs because curbssid
930 		 * is just the last AP that any of the stations associated
931 		 * with.
932 		 */
933 		return;
934 	}
935 
936 	if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
937 		ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
938 
939 	last_rssi = sc->last_rssi;
940 	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
941 		rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
942 					      ATH_RSSI_EP_MULTIPLIER);
943 	if (rx_stats->rs_rssi < 0)
944 		rx_stats->rs_rssi = 0;
945 
946 	/* Update Beacon RSSI, this is used by ANI. */
947 	ah->stats.avgbrssi = rx_stats->rs_rssi;
948 }
949 
950 /*
951  * For Decrypt or Demic errors, we only mark packet status here and always push
952  * up the frame up to let mac80211 handle the actual error case, be it no
953  * decryption key or real decryption error. This let us keep statistics there.
954  */
ath9k_rx_skb_preprocess(struct ath_common * common,struct ieee80211_hw * hw,struct ieee80211_hdr * hdr,struct ath_rx_status * rx_stats,struct ieee80211_rx_status * rx_status,bool * decrypt_error)955 static int ath9k_rx_skb_preprocess(struct ath_common *common,
956 				   struct ieee80211_hw *hw,
957 				   struct ieee80211_hdr *hdr,
958 				   struct ath_rx_status *rx_stats,
959 				   struct ieee80211_rx_status *rx_status,
960 				   bool *decrypt_error)
961 {
962 	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
963 
964 	/*
965 	 * everything but the rate is checked here, the rate check is done
966 	 * separately to avoid doing two lookups for a rate for each frame.
967 	 */
968 	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
969 		return -EINVAL;
970 
971 	/* Only use status info from the last fragment */
972 	if (rx_stats->rs_more)
973 		return 0;
974 
975 	ath9k_process_rssi(common, hw, hdr, rx_stats);
976 
977 	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
978 		return -EINVAL;
979 
980 	rx_status->band = hw->conf.channel->band;
981 	rx_status->freq = hw->conf.channel->center_freq;
982 	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
983 	rx_status->antenna = rx_stats->rs_antenna;
984 	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
985 
986 	return 0;
987 }
988 
ath9k_rx_skb_postprocess(struct ath_common * common,struct sk_buff * skb,struct ath_rx_status * rx_stats,struct ieee80211_rx_status * rxs,bool decrypt_error)989 static void ath9k_rx_skb_postprocess(struct ath_common *common,
990 				     struct sk_buff *skb,
991 				     struct ath_rx_status *rx_stats,
992 				     struct ieee80211_rx_status *rxs,
993 				     bool decrypt_error)
994 {
995 	struct ath_hw *ah = common->ah;
996 	struct ieee80211_hdr *hdr;
997 	int hdrlen, padpos, padsize;
998 	u8 keyix;
999 	__le16 fc;
1000 
1001 	/* see if any padding is done by the hw and remove it */
1002 	hdr = (struct ieee80211_hdr *) skb->data;
1003 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1004 	fc = hdr->frame_control;
1005 	padpos = ath9k_cmn_padpos(hdr->frame_control);
1006 
1007 	/* The MAC header is padded to have 32-bit boundary if the
1008 	 * packet payload is non-zero. The general calculation for
1009 	 * padsize would take into account odd header lengths:
1010 	 * padsize = (4 - padpos % 4) % 4; However, since only
1011 	 * even-length headers are used, padding can only be 0 or 2
1012 	 * bytes and we can optimize this a bit. In addition, we must
1013 	 * not try to remove padding from short control frames that do
1014 	 * not have payload. */
1015 	padsize = padpos & 3;
1016 	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1017 		memmove(skb->data + padsize, skb->data, padpos);
1018 		skb_pull(skb, padsize);
1019 	}
1020 
1021 	keyix = rx_stats->rs_keyix;
1022 
1023 	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1024 	    ieee80211_has_protected(fc)) {
1025 		rxs->flag |= RX_FLAG_DECRYPTED;
1026 	} else if (ieee80211_has_protected(fc)
1027 		   && !decrypt_error && skb->len >= hdrlen + 4) {
1028 		keyix = skb->data[hdrlen + 3] >> 6;
1029 
1030 		if (test_bit(keyix, common->keymap))
1031 			rxs->flag |= RX_FLAG_DECRYPTED;
1032 	}
1033 	if (ah->sw_mgmt_crypto &&
1034 	    (rxs->flag & RX_FLAG_DECRYPTED) &&
1035 	    ieee80211_is_mgmt(fc))
1036 		/* Use software decrypt for management frames. */
1037 		rxs->flag &= ~RX_FLAG_DECRYPTED;
1038 }
1039 
ath_lnaconf_alt_good_scan(struct ath_ant_comb * antcomb,struct ath_hw_antcomb_conf ant_conf,int main_rssi_avg)1040 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1041 				      struct ath_hw_antcomb_conf ant_conf,
1042 				      int main_rssi_avg)
1043 {
1044 	antcomb->quick_scan_cnt = 0;
1045 
1046 	if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1047 		antcomb->rssi_lna2 = main_rssi_avg;
1048 	else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1049 		antcomb->rssi_lna1 = main_rssi_avg;
1050 
1051 	switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1052 	case (0x10): /* LNA2 A-B */
1053 		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1054 		antcomb->first_quick_scan_conf =
1055 			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1056 		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1057 		break;
1058 	case (0x20): /* LNA1 A-B */
1059 		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1060 		antcomb->first_quick_scan_conf =
1061 			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1062 		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1063 		break;
1064 	case (0x21): /* LNA1 LNA2 */
1065 		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1066 		antcomb->first_quick_scan_conf =
1067 			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1068 		antcomb->second_quick_scan_conf =
1069 			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1070 		break;
1071 	case (0x12): /* LNA2 LNA1 */
1072 		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1073 		antcomb->first_quick_scan_conf =
1074 			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1075 		antcomb->second_quick_scan_conf =
1076 			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1077 		break;
1078 	case (0x13): /* LNA2 A+B */
1079 		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1080 		antcomb->first_quick_scan_conf =
1081 			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1082 		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1083 		break;
1084 	case (0x23): /* LNA1 A+B */
1085 		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1086 		antcomb->first_quick_scan_conf =
1087 			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1088 		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1089 		break;
1090 	default:
1091 		break;
1092 	}
1093 }
1094 
ath_select_ant_div_from_quick_scan(struct ath_ant_comb * antcomb,struct ath_hw_antcomb_conf * div_ant_conf,int main_rssi_avg,int alt_rssi_avg,int alt_ratio)1095 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1096 				struct ath_hw_antcomb_conf *div_ant_conf,
1097 				int main_rssi_avg, int alt_rssi_avg,
1098 				int alt_ratio)
1099 {
1100 	/* alt_good */
1101 	switch (antcomb->quick_scan_cnt) {
1102 	case 0:
1103 		/* set alt to main, and alt to first conf */
1104 		div_ant_conf->main_lna_conf = antcomb->main_conf;
1105 		div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1106 		break;
1107 	case 1:
1108 		/* set alt to main, and alt to first conf */
1109 		div_ant_conf->main_lna_conf = antcomb->main_conf;
1110 		div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1111 		antcomb->rssi_first = main_rssi_avg;
1112 		antcomb->rssi_second = alt_rssi_avg;
1113 
1114 		if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1115 			/* main is LNA1 */
1116 			if (ath_is_alt_ant_ratio_better(alt_ratio,
1117 						ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1118 						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1119 						main_rssi_avg, alt_rssi_avg,
1120 						antcomb->total_pkt_count))
1121 				antcomb->first_ratio = true;
1122 			else
1123 				antcomb->first_ratio = false;
1124 		} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1125 			if (ath_is_alt_ant_ratio_better(alt_ratio,
1126 						ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1127 						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1128 						main_rssi_avg, alt_rssi_avg,
1129 						antcomb->total_pkt_count))
1130 				antcomb->first_ratio = true;
1131 			else
1132 				antcomb->first_ratio = false;
1133 		} else {
1134 			if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1135 			    (alt_rssi_avg > main_rssi_avg +
1136 			    ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1137 			    (alt_rssi_avg > main_rssi_avg)) &&
1138 			    (antcomb->total_pkt_count > 50))
1139 				antcomb->first_ratio = true;
1140 			else
1141 				antcomb->first_ratio = false;
1142 		}
1143 		break;
1144 	case 2:
1145 		antcomb->alt_good = false;
1146 		antcomb->scan_not_start = false;
1147 		antcomb->scan = false;
1148 		antcomb->rssi_first = main_rssi_avg;
1149 		antcomb->rssi_third = alt_rssi_avg;
1150 
1151 		if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1152 			antcomb->rssi_lna1 = alt_rssi_avg;
1153 		else if (antcomb->second_quick_scan_conf ==
1154 			 ATH_ANT_DIV_COMB_LNA2)
1155 			antcomb->rssi_lna2 = alt_rssi_avg;
1156 		else if (antcomb->second_quick_scan_conf ==
1157 			 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1158 			if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1159 				antcomb->rssi_lna2 = main_rssi_avg;
1160 			else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1161 				antcomb->rssi_lna1 = main_rssi_avg;
1162 		}
1163 
1164 		if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1165 		    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1166 			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1167 		else
1168 			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1169 
1170 		if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1171 			if (ath_is_alt_ant_ratio_better(alt_ratio,
1172 						ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1173 						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1174 						main_rssi_avg, alt_rssi_avg,
1175 						antcomb->total_pkt_count))
1176 				antcomb->second_ratio = true;
1177 			else
1178 				antcomb->second_ratio = false;
1179 		} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1180 			if (ath_is_alt_ant_ratio_better(alt_ratio,
1181 						ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1182 						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1183 						main_rssi_avg, alt_rssi_avg,
1184 						antcomb->total_pkt_count))
1185 				antcomb->second_ratio = true;
1186 			else
1187 				antcomb->second_ratio = false;
1188 		} else {
1189 			if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1190 			    (alt_rssi_avg > main_rssi_avg +
1191 			    ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1192 			    (alt_rssi_avg > main_rssi_avg)) &&
1193 			    (antcomb->total_pkt_count > 50))
1194 				antcomb->second_ratio = true;
1195 			else
1196 				antcomb->second_ratio = false;
1197 		}
1198 
1199 		/* set alt to the conf with maximun ratio */
1200 		if (antcomb->first_ratio && antcomb->second_ratio) {
1201 			if (antcomb->rssi_second > antcomb->rssi_third) {
1202 				/* first alt*/
1203 				if ((antcomb->first_quick_scan_conf ==
1204 				    ATH_ANT_DIV_COMB_LNA1) ||
1205 				    (antcomb->first_quick_scan_conf ==
1206 				    ATH_ANT_DIV_COMB_LNA2))
1207 					/* Set alt LNA1 or LNA2*/
1208 					if (div_ant_conf->main_lna_conf ==
1209 					    ATH_ANT_DIV_COMB_LNA2)
1210 						div_ant_conf->alt_lna_conf =
1211 							ATH_ANT_DIV_COMB_LNA1;
1212 					else
1213 						div_ant_conf->alt_lna_conf =
1214 							ATH_ANT_DIV_COMB_LNA2;
1215 				else
1216 					/* Set alt to A+B or A-B */
1217 					div_ant_conf->alt_lna_conf =
1218 						antcomb->first_quick_scan_conf;
1219 			} else if ((antcomb->second_quick_scan_conf ==
1220 				   ATH_ANT_DIV_COMB_LNA1) ||
1221 				   (antcomb->second_quick_scan_conf ==
1222 				   ATH_ANT_DIV_COMB_LNA2)) {
1223 				/* Set alt LNA1 or LNA2 */
1224 				if (div_ant_conf->main_lna_conf ==
1225 				    ATH_ANT_DIV_COMB_LNA2)
1226 					div_ant_conf->alt_lna_conf =
1227 						ATH_ANT_DIV_COMB_LNA1;
1228 				else
1229 					div_ant_conf->alt_lna_conf =
1230 						ATH_ANT_DIV_COMB_LNA2;
1231 			} else {
1232 				/* Set alt to A+B or A-B */
1233 				div_ant_conf->alt_lna_conf =
1234 					antcomb->second_quick_scan_conf;
1235 			}
1236 		} else if (antcomb->first_ratio) {
1237 			/* first alt */
1238 			if ((antcomb->first_quick_scan_conf ==
1239 			    ATH_ANT_DIV_COMB_LNA1) ||
1240 			    (antcomb->first_quick_scan_conf ==
1241 			    ATH_ANT_DIV_COMB_LNA2))
1242 					/* Set alt LNA1 or LNA2 */
1243 				if (div_ant_conf->main_lna_conf ==
1244 				    ATH_ANT_DIV_COMB_LNA2)
1245 					div_ant_conf->alt_lna_conf =
1246 							ATH_ANT_DIV_COMB_LNA1;
1247 				else
1248 					div_ant_conf->alt_lna_conf =
1249 							ATH_ANT_DIV_COMB_LNA2;
1250 			else
1251 				/* Set alt to A+B or A-B */
1252 				div_ant_conf->alt_lna_conf =
1253 						antcomb->first_quick_scan_conf;
1254 		} else if (antcomb->second_ratio) {
1255 				/* second alt */
1256 			if ((antcomb->second_quick_scan_conf ==
1257 			    ATH_ANT_DIV_COMB_LNA1) ||
1258 			    (antcomb->second_quick_scan_conf ==
1259 			    ATH_ANT_DIV_COMB_LNA2))
1260 				/* Set alt LNA1 or LNA2 */
1261 				if (div_ant_conf->main_lna_conf ==
1262 				    ATH_ANT_DIV_COMB_LNA2)
1263 					div_ant_conf->alt_lna_conf =
1264 						ATH_ANT_DIV_COMB_LNA1;
1265 				else
1266 					div_ant_conf->alt_lna_conf =
1267 						ATH_ANT_DIV_COMB_LNA2;
1268 			else
1269 				/* Set alt to A+B or A-B */
1270 				div_ant_conf->alt_lna_conf =
1271 						antcomb->second_quick_scan_conf;
1272 		} else {
1273 			/* main is largest */
1274 			if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1275 			    (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1276 				/* Set alt LNA1 or LNA2 */
1277 				if (div_ant_conf->main_lna_conf ==
1278 				    ATH_ANT_DIV_COMB_LNA2)
1279 					div_ant_conf->alt_lna_conf =
1280 							ATH_ANT_DIV_COMB_LNA1;
1281 				else
1282 					div_ant_conf->alt_lna_conf =
1283 							ATH_ANT_DIV_COMB_LNA2;
1284 			else
1285 				/* Set alt to A+B or A-B */
1286 				div_ant_conf->alt_lna_conf = antcomb->main_conf;
1287 		}
1288 		break;
1289 	default:
1290 		break;
1291 	}
1292 }
1293 
ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf * ant_conf)1294 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf)
1295 {
1296 	/* Adjust the fast_div_bias based on main and alt lna conf */
1297 	switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) {
1298 	case (0x01): /* A-B LNA2 */
1299 		ant_conf->fast_div_bias = 0x3b;
1300 		break;
1301 	case (0x02): /* A-B LNA1 */
1302 		ant_conf->fast_div_bias = 0x3d;
1303 		break;
1304 	case (0x03): /* A-B A+B */
1305 		ant_conf->fast_div_bias = 0x1;
1306 		break;
1307 	case (0x10): /* LNA2 A-B */
1308 		ant_conf->fast_div_bias = 0x7;
1309 		break;
1310 	case (0x12): /* LNA2 LNA1 */
1311 		ant_conf->fast_div_bias = 0x2;
1312 		break;
1313 	case (0x13): /* LNA2 A+B */
1314 		ant_conf->fast_div_bias = 0x7;
1315 		break;
1316 	case (0x20): /* LNA1 A-B */
1317 		ant_conf->fast_div_bias = 0x6;
1318 		break;
1319 	case (0x21): /* LNA1 LNA2 */
1320 		ant_conf->fast_div_bias = 0x0;
1321 		break;
1322 	case (0x23): /* LNA1 A+B */
1323 		ant_conf->fast_div_bias = 0x6;
1324 		break;
1325 	case (0x30): /* A+B A-B */
1326 		ant_conf->fast_div_bias = 0x1;
1327 		break;
1328 	case (0x31): /* A+B LNA2 */
1329 		ant_conf->fast_div_bias = 0x3b;
1330 		break;
1331 	case (0x32): /* A+B LNA1 */
1332 		ant_conf->fast_div_bias = 0x3d;
1333 		break;
1334 	default:
1335 		break;
1336 	}
1337 }
1338 
1339 /* Antenna diversity and combining */
ath_ant_comb_scan(struct ath_softc * sc,struct ath_rx_status * rs)1340 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1341 {
1342 	struct ath_hw_antcomb_conf div_ant_conf;
1343 	struct ath_ant_comb *antcomb = &sc->ant_comb;
1344 	int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1345 	int curr_main_set, curr_bias;
1346 	int main_rssi = rs->rs_rssi_ctl0;
1347 	int alt_rssi = rs->rs_rssi_ctl1;
1348 	int rx_ant_conf,  main_ant_conf;
1349 	bool short_scan = false;
1350 
1351 	rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1352 		       ATH_ANT_RX_MASK;
1353 	main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1354 			 ATH_ANT_RX_MASK;
1355 
1356 	/* Record packet only when alt_rssi is positive */
1357 	if (alt_rssi > 0) {
1358 		antcomb->total_pkt_count++;
1359 		antcomb->main_total_rssi += main_rssi;
1360 		antcomb->alt_total_rssi  += alt_rssi;
1361 		if (main_ant_conf == rx_ant_conf)
1362 			antcomb->main_recv_cnt++;
1363 		else
1364 			antcomb->alt_recv_cnt++;
1365 	}
1366 
1367 	/* Short scan check */
1368 	if (antcomb->scan && antcomb->alt_good) {
1369 		if (time_after(jiffies, antcomb->scan_start_time +
1370 		    msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1371 			short_scan = true;
1372 		else
1373 			if (antcomb->total_pkt_count ==
1374 			    ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1375 				alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1376 					    antcomb->total_pkt_count);
1377 				if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1378 					short_scan = true;
1379 			}
1380 	}
1381 
1382 	if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1383 	    rs->rs_moreaggr) && !short_scan)
1384 		return;
1385 
1386 	if (antcomb->total_pkt_count) {
1387 		alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1388 			     antcomb->total_pkt_count);
1389 		main_rssi_avg = (antcomb->main_total_rssi /
1390 				 antcomb->total_pkt_count);
1391 		alt_rssi_avg = (antcomb->alt_total_rssi /
1392 				 antcomb->total_pkt_count);
1393 	}
1394 
1395 
1396 	ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1397 	curr_alt_set = div_ant_conf.alt_lna_conf;
1398 	curr_main_set = div_ant_conf.main_lna_conf;
1399 	curr_bias = div_ant_conf.fast_div_bias;
1400 
1401 	antcomb->count++;
1402 
1403 	if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1404 		if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1405 			ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1406 						  main_rssi_avg);
1407 			antcomb->alt_good = true;
1408 		} else {
1409 			antcomb->alt_good = false;
1410 		}
1411 
1412 		antcomb->count = 0;
1413 		antcomb->scan = true;
1414 		antcomb->scan_not_start = true;
1415 	}
1416 
1417 	if (!antcomb->scan) {
1418 		if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1419 			if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1420 				/* Switch main and alt LNA */
1421 				div_ant_conf.main_lna_conf =
1422 						ATH_ANT_DIV_COMB_LNA2;
1423 				div_ant_conf.alt_lna_conf  =
1424 						ATH_ANT_DIV_COMB_LNA1;
1425 			} else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1426 				div_ant_conf.main_lna_conf =
1427 						ATH_ANT_DIV_COMB_LNA1;
1428 				div_ant_conf.alt_lna_conf  =
1429 						ATH_ANT_DIV_COMB_LNA2;
1430 			}
1431 
1432 			goto div_comb_done;
1433 		} else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1434 			   (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1435 			/* Set alt to another LNA */
1436 			if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1437 				div_ant_conf.alt_lna_conf =
1438 						ATH_ANT_DIV_COMB_LNA1;
1439 			else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1440 				div_ant_conf.alt_lna_conf =
1441 						ATH_ANT_DIV_COMB_LNA2;
1442 
1443 			goto div_comb_done;
1444 		}
1445 
1446 		if ((alt_rssi_avg < (main_rssi_avg +
1447 		    ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA)))
1448 			goto div_comb_done;
1449 	}
1450 
1451 	if (!antcomb->scan_not_start) {
1452 		switch (curr_alt_set) {
1453 		case ATH_ANT_DIV_COMB_LNA2:
1454 			antcomb->rssi_lna2 = alt_rssi_avg;
1455 			antcomb->rssi_lna1 = main_rssi_avg;
1456 			antcomb->scan = true;
1457 			/* set to A+B */
1458 			div_ant_conf.main_lna_conf =
1459 				ATH_ANT_DIV_COMB_LNA1;
1460 			div_ant_conf.alt_lna_conf  =
1461 				ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1462 			break;
1463 		case ATH_ANT_DIV_COMB_LNA1:
1464 			antcomb->rssi_lna1 = alt_rssi_avg;
1465 			antcomb->rssi_lna2 = main_rssi_avg;
1466 			antcomb->scan = true;
1467 			/* set to A+B */
1468 			div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1469 			div_ant_conf.alt_lna_conf  =
1470 				ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1471 			break;
1472 		case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1473 			antcomb->rssi_add = alt_rssi_avg;
1474 			antcomb->scan = true;
1475 			/* set to A-B */
1476 			div_ant_conf.alt_lna_conf =
1477 				ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1478 			break;
1479 		case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1480 			antcomb->rssi_sub = alt_rssi_avg;
1481 			antcomb->scan = false;
1482 			if (antcomb->rssi_lna2 >
1483 			    (antcomb->rssi_lna1 +
1484 			    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1485 				/* use LNA2 as main LNA */
1486 				if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1487 				    (antcomb->rssi_add > antcomb->rssi_sub)) {
1488 					/* set to A+B */
1489 					div_ant_conf.main_lna_conf =
1490 						ATH_ANT_DIV_COMB_LNA2;
1491 					div_ant_conf.alt_lna_conf  =
1492 						ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1493 				} else if (antcomb->rssi_sub >
1494 					   antcomb->rssi_lna1) {
1495 					/* set to A-B */
1496 					div_ant_conf.main_lna_conf =
1497 						ATH_ANT_DIV_COMB_LNA2;
1498 					div_ant_conf.alt_lna_conf =
1499 						ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1500 				} else {
1501 					/* set to LNA1 */
1502 					div_ant_conf.main_lna_conf =
1503 						ATH_ANT_DIV_COMB_LNA2;
1504 					div_ant_conf.alt_lna_conf =
1505 						ATH_ANT_DIV_COMB_LNA1;
1506 				}
1507 			} else {
1508 				/* use LNA1 as main LNA */
1509 				if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1510 				    (antcomb->rssi_add > antcomb->rssi_sub)) {
1511 					/* set to A+B */
1512 					div_ant_conf.main_lna_conf =
1513 						ATH_ANT_DIV_COMB_LNA1;
1514 					div_ant_conf.alt_lna_conf  =
1515 						ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1516 				} else if (antcomb->rssi_sub >
1517 					   antcomb->rssi_lna1) {
1518 					/* set to A-B */
1519 					div_ant_conf.main_lna_conf =
1520 						ATH_ANT_DIV_COMB_LNA1;
1521 					div_ant_conf.alt_lna_conf =
1522 						ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1523 				} else {
1524 					/* set to LNA2 */
1525 					div_ant_conf.main_lna_conf =
1526 						ATH_ANT_DIV_COMB_LNA1;
1527 					div_ant_conf.alt_lna_conf =
1528 						ATH_ANT_DIV_COMB_LNA2;
1529 				}
1530 			}
1531 			break;
1532 		default:
1533 			break;
1534 		}
1535 	} else {
1536 		if (!antcomb->alt_good) {
1537 			antcomb->scan_not_start = false;
1538 			/* Set alt to another LNA */
1539 			if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1540 				div_ant_conf.main_lna_conf =
1541 						ATH_ANT_DIV_COMB_LNA2;
1542 				div_ant_conf.alt_lna_conf =
1543 						ATH_ANT_DIV_COMB_LNA1;
1544 			} else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1545 				div_ant_conf.main_lna_conf =
1546 						ATH_ANT_DIV_COMB_LNA1;
1547 				div_ant_conf.alt_lna_conf =
1548 						ATH_ANT_DIV_COMB_LNA2;
1549 			}
1550 			goto div_comb_done;
1551 		}
1552 	}
1553 
1554 	ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1555 					   main_rssi_avg, alt_rssi_avg,
1556 					   alt_ratio);
1557 
1558 	antcomb->quick_scan_cnt++;
1559 
1560 div_comb_done:
1561 	ath_ant_div_conf_fast_divbias(&div_ant_conf);
1562 
1563 	ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1564 
1565 	antcomb->scan_start_time = jiffies;
1566 	antcomb->total_pkt_count = 0;
1567 	antcomb->main_total_rssi = 0;
1568 	antcomb->alt_total_rssi = 0;
1569 	antcomb->main_recv_cnt = 0;
1570 	antcomb->alt_recv_cnt = 0;
1571 }
1572 
ath_rx_tasklet(struct ath_softc * sc,int flush,bool hp)1573 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1574 {
1575 	struct ath_buf *bf;
1576 	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1577 	struct ieee80211_rx_status *rxs;
1578 	struct ath_hw *ah = sc->sc_ah;
1579 	struct ath_common *common = ath9k_hw_common(ah);
1580 	/*
1581 	 * The hw can technically differ from common->hw when using ath9k
1582 	 * virtual wiphy so to account for that we iterate over the active
1583 	 * wiphys and find the appropriate wiphy and therefore hw.
1584 	 */
1585 	struct ieee80211_hw *hw = sc->hw;
1586 	struct ieee80211_hdr *hdr;
1587 	int retval;
1588 	bool decrypt_error = false;
1589 	struct ath_rx_status rs;
1590 	enum ath9k_rx_qtype qtype;
1591 	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1592 	int dma_type;
1593 	u8 rx_status_len = ah->caps.rx_status_len;
1594 	u64 tsf = 0;
1595 	u32 tsf_lower = 0;
1596 	unsigned long flags;
1597 
1598 	if (edma)
1599 		dma_type = DMA_BIDIRECTIONAL;
1600 	else
1601 		dma_type = DMA_FROM_DEVICE;
1602 
1603 	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1604 	spin_lock_bh(&sc->rx.rxbuflock);
1605 
1606 	tsf = ath9k_hw_gettsf64(ah);
1607 	tsf_lower = tsf & 0xffffffff;
1608 
1609 	do {
1610 		/* If handling rx interrupt and flush is in progress => exit */
1611 		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1612 			break;
1613 
1614 		memset(&rs, 0, sizeof(rs));
1615 		if (edma)
1616 			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1617 		else
1618 			bf = ath_get_next_rx_buf(sc, &rs);
1619 
1620 		if (!bf)
1621 			break;
1622 
1623 		skb = bf->bf_mpdu;
1624 		if (!skb)
1625 			continue;
1626 
1627 		/*
1628 		 * Take frame header from the first fragment and RX status from
1629 		 * the last one.
1630 		 */
1631 		if (sc->rx.frag)
1632 			hdr_skb = sc->rx.frag;
1633 		else
1634 			hdr_skb = skb;
1635 
1636 		hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1637 		rxs = IEEE80211_SKB_RXCB(hdr_skb);
1638 
1639 		ath_debug_stat_rx(sc, &rs);
1640 
1641 		/*
1642 		 * If we're asked to flush receive queue, directly
1643 		 * chain it back at the queue without processing it.
1644 		 */
1645 		if (flush)
1646 			goto requeue_drop_frag;
1647 
1648 		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1649 						 rxs, &decrypt_error);
1650 		if (retval)
1651 			goto requeue_drop_frag;
1652 
1653 		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1654 		if (rs.rs_tstamp > tsf_lower &&
1655 		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1656 			rxs->mactime -= 0x100000000ULL;
1657 
1658 		if (rs.rs_tstamp < tsf_lower &&
1659 		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1660 			rxs->mactime += 0x100000000ULL;
1661 
1662 		/* Ensure we always have an skb to requeue once we are done
1663 		 * processing the current buffer's skb */
1664 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1665 
1666 		/* If there is no memory we ignore the current RX'd frame,
1667 		 * tell hardware it can give us a new frame using the old
1668 		 * skb and put it at the tail of the sc->rx.rxbuf list for
1669 		 * processing. */
1670 		if (!requeue_skb)
1671 			goto requeue_drop_frag;
1672 
1673 		/* Unmap the frame */
1674 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1675 				 common->rx_bufsize,
1676 				 dma_type);
1677 
1678 		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1679 		if (ah->caps.rx_status_len)
1680 			skb_pull(skb, ah->caps.rx_status_len);
1681 
1682 		if (!rs.rs_more)
1683 			ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1684 						 rxs, decrypt_error);
1685 
1686 		/* We will now give hardware our shiny new allocated skb */
1687 		bf->bf_mpdu = requeue_skb;
1688 		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1689 						 common->rx_bufsize,
1690 						 dma_type);
1691 		if (unlikely(dma_mapping_error(sc->dev,
1692 			  bf->bf_buf_addr))) {
1693 			dev_kfree_skb_any(requeue_skb);
1694 			bf->bf_mpdu = NULL;
1695 			bf->bf_buf_addr = 0;
1696 			ath_err(common, "dma_mapping_error() on RX\n");
1697 			ieee80211_rx(hw, skb);
1698 			break;
1699 		}
1700 
1701 		if (rs.rs_more) {
1702 			/*
1703 			 * rs_more indicates chained descriptors which can be
1704 			 * used to link buffers together for a sort of
1705 			 * scatter-gather operation.
1706 			 */
1707 			if (sc->rx.frag) {
1708 				/* too many fragments - cannot handle frame */
1709 				dev_kfree_skb_any(sc->rx.frag);
1710 				dev_kfree_skb_any(skb);
1711 				skb = NULL;
1712 			}
1713 			sc->rx.frag = skb;
1714 			goto requeue;
1715 		}
1716 
1717 		if (sc->rx.frag) {
1718 			int space = skb->len - skb_tailroom(hdr_skb);
1719 
1720 			sc->rx.frag = NULL;
1721 
1722 			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1723 				dev_kfree_skb(skb);
1724 				goto requeue_drop_frag;
1725 			}
1726 
1727 			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1728 						  skb->len);
1729 			dev_kfree_skb_any(skb);
1730 			skb = hdr_skb;
1731 		}
1732 
1733 		/*
1734 		 * change the default rx antenna if rx diversity chooses the
1735 		 * other antenna 3 times in a row.
1736 		 */
1737 		if (sc->rx.defant != rs.rs_antenna) {
1738 			if (++sc->rx.rxotherant >= 3)
1739 				ath_setdefantenna(sc, rs.rs_antenna);
1740 		} else {
1741 			sc->rx.rxotherant = 0;
1742 		}
1743 
1744 		spin_lock_irqsave(&sc->sc_pm_lock, flags);
1745 
1746 		if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1747 					      PS_WAIT_FOR_CAB |
1748 					      PS_WAIT_FOR_PSPOLL_DATA)) ||
1749 					unlikely(ath9k_check_auto_sleep(sc)))
1750 			ath_rx_ps(sc, skb);
1751 		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1752 
1753 		if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1754 			ath_ant_comb_scan(sc, &rs);
1755 
1756 		ieee80211_rx(hw, skb);
1757 
1758 requeue_drop_frag:
1759 		if (sc->rx.frag) {
1760 			dev_kfree_skb_any(sc->rx.frag);
1761 			sc->rx.frag = NULL;
1762 		}
1763 requeue:
1764 		if (edma) {
1765 			list_add_tail(&bf->list, &sc->rx.rxbuf);
1766 			ath_rx_edma_buf_link(sc, qtype);
1767 		} else {
1768 			list_move_tail(&bf->list, &sc->rx.rxbuf);
1769 			ath_rx_buf_link(sc, bf);
1770 		}
1771 	} while (1);
1772 
1773 	spin_unlock_bh(&sc->rx.rxbuflock);
1774 
1775 	return 0;
1776 }
1777