1 /*
2  * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3  * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  */
18 
19 /********************************************\
20 Queue Control Unit, DCF Control Unit Functions
21 \********************************************/
22 
23 #include "ath5k.h"
24 #include "reg.h"
25 #include "debug.h"
26 #include <linux/log2.h>
27 
28 /**
29  * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
30  *
31  * Here we setup parameters for the 12 available TX queues. Note that
32  * on the various registers we can usually only map the first 10 of them so
33  * basically we have 10 queues to play with. Each queue has a matching
34  * QCU that controls when the queue will get triggered and multiple QCUs
35  * can be mapped to a single DCU that controls the various DFS parameters
36  * for the various queues. In our setup we have a 1:1 mapping between QCUs
37  * and DCUs allowing us to have different DFS settings for each queue.
38  *
39  * When a frame goes into a TX queue, QCU decides when it'll trigger a
40  * transmission based on various criteria (such as how many data we have inside
41  * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
42  * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
43  * (arbitrator) decides the priority of each QCU based on it's configuration
44  * (e.g. beacons are always transmitted when they leave DCU bypassing all other
45  * frames from other queues waiting to be transmitted). After a frame leaves
46  * the DCU it goes to PCU for further processing and then to PHY for
47  * the actual transmission.
48  */
49 
50 
51 /******************\
52 * Helper functions *
53 \******************/
54 
55 /**
56  * ath5k_hw_num_tx_pending() - Get number of pending frames for a  given queue
57  * @ah: The &struct ath5k_hw
58  * @queue: One of enum ath5k_tx_queue_id
59  */
60 u32
ath5k_hw_num_tx_pending(struct ath5k_hw * ah,unsigned int queue)61 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
62 {
63 	u32 pending;
64 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
65 
66 	/* Return if queue is declared inactive */
67 	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
68 		return false;
69 
70 	/* XXX: How about AR5K_CFG_TXCNT ? */
71 	if (ah->ah_version == AR5K_AR5210)
72 		return false;
73 
74 	pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
75 	pending &= AR5K_QCU_STS_FRMPENDCNT;
76 
77 	/* It's possible to have no frames pending even if TXE
78 	 * is set. To indicate that q has not stopped return
79 	 * true */
80 	if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
81 		return true;
82 
83 	return pending;
84 }
85 
86 /**
87  * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
88  * @ah: The &struct ath5k_hw
89  * @queue: One of enum ath5k_tx_queue_id
90  */
91 void
ath5k_hw_release_tx_queue(struct ath5k_hw * ah,unsigned int queue)92 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
93 {
94 	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
95 		return;
96 
97 	/* This queue will be skipped in further operations */
98 	ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
99 	/*For SIMR setup*/
100 	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
101 }
102 
103 /**
104  * ath5k_cw_validate() - Make sure the given cw is valid
105  * @cw_req: The contention window value to check
106  *
107  * Make sure cw is a power of 2 minus 1 and smaller than 1024
108  */
109 static u16
ath5k_cw_validate(u16 cw_req)110 ath5k_cw_validate(u16 cw_req)
111 {
112 	cw_req = min(cw_req, (u16)1023);
113 
114 	/* Check if cw_req + 1 a power of 2 */
115 	if (is_power_of_2(cw_req + 1))
116 		return cw_req;
117 
118 	/* Check if cw_req is a power of 2 */
119 	if (is_power_of_2(cw_req))
120 		return cw_req - 1;
121 
122 	/* If none of the above is correct
123 	 * find the closest power of 2 */
124 	cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
125 
126 	return cw_req;
127 }
128 
129 /**
130  * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
131  * @ah: The &struct ath5k_hw
132  * @queue: One of enum ath5k_tx_queue_id
133  * @queue_info: The &struct ath5k_txq_info to fill
134  */
135 int
ath5k_hw_get_tx_queueprops(struct ath5k_hw * ah,int queue,struct ath5k_txq_info * queue_info)136 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
137 		struct ath5k_txq_info *queue_info)
138 {
139 	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
140 	return 0;
141 }
142 
143 /**
144  * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
145  * @ah: The &struct ath5k_hw
146  * @queue: One of enum ath5k_tx_queue_id
147  * @qinfo: The &struct ath5k_txq_info to use
148  *
149  * Returns 0 on success or -EIO if queue is inactive
150  */
151 int
ath5k_hw_set_tx_queueprops(struct ath5k_hw * ah,int queue,const struct ath5k_txq_info * qinfo)152 ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
153 				const struct ath5k_txq_info *qinfo)
154 {
155 	struct ath5k_txq_info *qi;
156 
157 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
158 
159 	qi = &ah->ah_txq[queue];
160 
161 	if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
162 		return -EIO;
163 
164 	/* copy and validate values */
165 	qi->tqi_type = qinfo->tqi_type;
166 	qi->tqi_subtype = qinfo->tqi_subtype;
167 	qi->tqi_flags = qinfo->tqi_flags;
168 	/*
169 	 * According to the docs: Although the AIFS field is 8 bit wide,
170 	 * the maximum supported value is 0xFC. Setting it higher than that
171 	 * will cause the DCU to hang.
172 	 */
173 	qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
174 	qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
175 	qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
176 	qi->tqi_cbr_period = qinfo->tqi_cbr_period;
177 	qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
178 	qi->tqi_burst_time = qinfo->tqi_burst_time;
179 	qi->tqi_ready_time = qinfo->tqi_ready_time;
180 
181 	/*XXX: Is this supported on 5210 ?*/
182 	/*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
183 	if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
184 		((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
185 		 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
186 	     qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
187 		qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
188 
189 	return 0;
190 }
191 
192 /**
193  * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
194  * @ah: The &struct ath5k_hw
195  * @queue_type: One of enum ath5k_tx_queue
196  * @queue_info: The &struct ath5k_txq_info to use
197  *
198  * Returns 0 on success, -EINVAL on invalid arguments
199  */
200 int
ath5k_hw_setup_tx_queue(struct ath5k_hw * ah,enum ath5k_tx_queue queue_type,struct ath5k_txq_info * queue_info)201 ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
202 		struct ath5k_txq_info *queue_info)
203 {
204 	unsigned int queue;
205 	int ret;
206 
207 	/*
208 	 * Get queue by type
209 	 */
210 	/* 5210 only has 2 queues */
211 	if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
212 		switch (queue_type) {
213 		case AR5K_TX_QUEUE_DATA:
214 			queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
215 			break;
216 		case AR5K_TX_QUEUE_BEACON:
217 		case AR5K_TX_QUEUE_CAB:
218 			queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
219 			break;
220 		default:
221 			return -EINVAL;
222 		}
223 	} else {
224 		switch (queue_type) {
225 		case AR5K_TX_QUEUE_DATA:
226 			for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
227 				ah->ah_txq[queue].tqi_type !=
228 				AR5K_TX_QUEUE_INACTIVE; queue++) {
229 
230 				if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
231 					return -EINVAL;
232 			}
233 			break;
234 		case AR5K_TX_QUEUE_UAPSD:
235 			queue = AR5K_TX_QUEUE_ID_UAPSD;
236 			break;
237 		case AR5K_TX_QUEUE_BEACON:
238 			queue = AR5K_TX_QUEUE_ID_BEACON;
239 			break;
240 		case AR5K_TX_QUEUE_CAB:
241 			queue = AR5K_TX_QUEUE_ID_CAB;
242 			break;
243 		default:
244 			return -EINVAL;
245 		}
246 	}
247 
248 	/*
249 	 * Setup internal queue structure
250 	 */
251 	memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
252 	ah->ah_txq[queue].tqi_type = queue_type;
253 
254 	if (queue_info != NULL) {
255 		queue_info->tqi_type = queue_type;
256 		ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
257 		if (ret)
258 			return ret;
259 	}
260 
261 	/*
262 	 * We use ah_txq_status to hold a temp value for
263 	 * the Secondary interrupt mask registers on 5211+
264 	 * check out ath5k_hw_reset_tx_queue
265 	 */
266 	AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
267 
268 	return queue;
269 }
270 
271 
272 /*******************************\
273 * Single QCU/DCU initialization *
274 \*******************************/
275 
276 /**
277  * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
278  * @ah: The &struct ath5k_hw
279  * @queue: One of enum ath5k_tx_queue_id
280  *
281  * This function is used when initializing a queue, to set
282  * retry limits based on ah->ah_retry_* and the chipset used.
283  */
284 void
ath5k_hw_set_tx_retry_limits(struct ath5k_hw * ah,unsigned int queue)285 ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
286 				  unsigned int queue)
287 {
288 	/* Single data queue on AR5210 */
289 	if (ah->ah_version == AR5K_AR5210) {
290 		struct ath5k_txq_info *tq = &ah->ah_txq[queue];
291 
292 		if (queue > 0)
293 			return;
294 
295 		ath5k_hw_reg_write(ah,
296 			(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
297 			| AR5K_REG_SM(ah->ah_retry_long,
298 				      AR5K_NODCU_RETRY_LMT_SLG_RETRY)
299 			| AR5K_REG_SM(ah->ah_retry_short,
300 				      AR5K_NODCU_RETRY_LMT_SSH_RETRY)
301 			| AR5K_REG_SM(ah->ah_retry_long,
302 				      AR5K_NODCU_RETRY_LMT_LG_RETRY)
303 			| AR5K_REG_SM(ah->ah_retry_short,
304 				      AR5K_NODCU_RETRY_LMT_SH_RETRY),
305 			AR5K_NODCU_RETRY_LMT);
306 	/* DCU on AR5211+ */
307 	} else {
308 		ath5k_hw_reg_write(ah,
309 			AR5K_REG_SM(ah->ah_retry_long,
310 				    AR5K_DCU_RETRY_LMT_RTS)
311 			| AR5K_REG_SM(ah->ah_retry_long,
312 				      AR5K_DCU_RETRY_LMT_STA_RTS)
313 			| AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
314 				      AR5K_DCU_RETRY_LMT_STA_DATA),
315 			AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
316 	}
317 }
318 
319 /**
320  * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
321  * @ah: The &struct ath5k_hw
322  * @queue: One of enum ath5k_tx_queue_id
323  *
324  * Set DCF properties for the given transmit queue on DCU
325  * and configures all queue-specific parameters.
326  */
327 int
ath5k_hw_reset_tx_queue(struct ath5k_hw * ah,unsigned int queue)328 ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
329 {
330 	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
331 
332 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
333 
334 	tq = &ah->ah_txq[queue];
335 
336 	/* Skip if queue inactive or if we are on AR5210
337 	 * that doesn't have QCU/DCU */
338 	if ((ah->ah_version == AR5K_AR5210) ||
339 	(tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
340 		return 0;
341 
342 	/*
343 	 * Set contention window (cw_min/cw_max)
344 	 * and arbitrated interframe space (aifs)...
345 	 */
346 	ath5k_hw_reg_write(ah,
347 		AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
348 		AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
349 		AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
350 		AR5K_QUEUE_DFS_LOCAL_IFS(queue));
351 
352 	/*
353 	 * Set tx retry limits for this queue
354 	 */
355 	ath5k_hw_set_tx_retry_limits(ah, queue);
356 
357 
358 	/*
359 	 * Set misc registers
360 	 */
361 
362 	/* Enable DCU to wait for next fragment from QCU */
363 	AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
364 				AR5K_DCU_MISC_FRAG_WAIT);
365 
366 	/* On Maui and Spirit use the global seqnum on DCU */
367 	if (ah->ah_mac_version < AR5K_SREV_AR5211)
368 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
369 					AR5K_DCU_MISC_SEQNUM_CTL);
370 
371 	/* Constant bit rate period */
372 	if (tq->tqi_cbr_period) {
373 		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
374 					AR5K_QCU_CBRCFG_INTVAL) |
375 					AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
376 					AR5K_QCU_CBRCFG_ORN_THRES),
377 					AR5K_QUEUE_CBRCFG(queue));
378 
379 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
380 					AR5K_QCU_MISC_FRSHED_CBR);
381 
382 		if (tq->tqi_cbr_overflow_limit)
383 			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
384 					AR5K_QCU_MISC_CBR_THRES_ENABLE);
385 	}
386 
387 	/* Ready time interval */
388 	if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
389 		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
390 					AR5K_QCU_RDYTIMECFG_INTVAL) |
391 					AR5K_QCU_RDYTIMECFG_ENABLE,
392 					AR5K_QUEUE_RDYTIMECFG(queue));
393 
394 	if (tq->tqi_burst_time) {
395 		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
396 					AR5K_DCU_CHAN_TIME_DUR) |
397 					AR5K_DCU_CHAN_TIME_ENABLE,
398 					AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
399 
400 		if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
401 			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
402 					AR5K_QCU_MISC_RDY_VEOL_POLICY);
403 	}
404 
405 	/* Enable/disable Post frame backoff */
406 	if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
407 		ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
408 					AR5K_QUEUE_DFS_MISC(queue));
409 
410 	/* Enable/disable fragmentation burst backoff */
411 	if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
412 		ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
413 					AR5K_QUEUE_DFS_MISC(queue));
414 
415 	/*
416 	 * Set registers by queue type
417 	 */
418 	switch (tq->tqi_type) {
419 	case AR5K_TX_QUEUE_BEACON:
420 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
421 				AR5K_QCU_MISC_FRSHED_DBA_GT |
422 				AR5K_QCU_MISC_CBREXP_BCN_DIS |
423 				AR5K_QCU_MISC_BCN_ENABLE);
424 
425 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
426 				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
427 				AR5K_DCU_MISC_ARBLOCK_CTL_S) |
428 				AR5K_DCU_MISC_ARBLOCK_IGNORE |
429 				AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
430 				AR5K_DCU_MISC_BCN_ENABLE);
431 		break;
432 
433 	case AR5K_TX_QUEUE_CAB:
434 		/* XXX: use BCN_SENT_GT, if we can figure out how */
435 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
436 					AR5K_QCU_MISC_FRSHED_DBA_GT |
437 					AR5K_QCU_MISC_CBREXP_DIS |
438 					AR5K_QCU_MISC_CBREXP_BCN_DIS);
439 
440 		ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
441 					(AR5K_TUNE_SW_BEACON_RESP -
442 					AR5K_TUNE_DMA_BEACON_RESP) -
443 				AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
444 					AR5K_QCU_RDYTIMECFG_ENABLE,
445 					AR5K_QUEUE_RDYTIMECFG(queue));
446 
447 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
448 					(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
449 					AR5K_DCU_MISC_ARBLOCK_CTL_S));
450 		break;
451 
452 	case AR5K_TX_QUEUE_UAPSD:
453 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
454 					AR5K_QCU_MISC_CBREXP_DIS);
455 		break;
456 
457 	case AR5K_TX_QUEUE_DATA:
458 	default:
459 			break;
460 	}
461 
462 	/* TODO: Handle frame compression */
463 
464 	/*
465 	 * Enable interrupts for this tx queue
466 	 * in the secondary interrupt mask registers
467 	 */
468 	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
469 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
470 
471 	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
472 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
473 
474 	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
475 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
476 
477 	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
478 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
479 
480 	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
481 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
482 
483 	if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
484 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
485 
486 	if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
487 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
488 
489 	if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
490 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
491 
492 	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
493 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
494 
495 	/* Update secondary interrupt mask registers */
496 
497 	/* Filter out inactive queues */
498 	ah->ah_txq_imr_txok &= ah->ah_txq_status;
499 	ah->ah_txq_imr_txerr &= ah->ah_txq_status;
500 	ah->ah_txq_imr_txurn &= ah->ah_txq_status;
501 	ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
502 	ah->ah_txq_imr_txeol &= ah->ah_txq_status;
503 	ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
504 	ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
505 	ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
506 	ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
507 
508 	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
509 					AR5K_SIMR0_QCU_TXOK) |
510 					AR5K_REG_SM(ah->ah_txq_imr_txdesc,
511 					AR5K_SIMR0_QCU_TXDESC),
512 					AR5K_SIMR0);
513 
514 	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
515 					AR5K_SIMR1_QCU_TXERR) |
516 					AR5K_REG_SM(ah->ah_txq_imr_txeol,
517 					AR5K_SIMR1_QCU_TXEOL),
518 					AR5K_SIMR1);
519 
520 	/* Update SIMR2 but don't overwrite rest simr2 settings */
521 	AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
522 	AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
523 				AR5K_REG_SM(ah->ah_txq_imr_txurn,
524 				AR5K_SIMR2_QCU_TXURN));
525 
526 	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
527 				AR5K_SIMR3_QCBRORN) |
528 				AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
529 				AR5K_SIMR3_QCBRURN),
530 				AR5K_SIMR3);
531 
532 	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
533 				AR5K_SIMR4_QTRIG), AR5K_SIMR4);
534 
535 	/* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
536 	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
537 				AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
538 
539 	/* No queue has TXNOFRM enabled, disable the interrupt
540 	 * by setting AR5K_TXNOFRM to zero */
541 	if (ah->ah_txq_imr_nofrm == 0)
542 		ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
543 
544 	/* Set QCU mask for this DCU to save power */
545 	AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
546 
547 	return 0;
548 }
549 
550 
551 /**************************\
552 * Global QCU/DCU functions *
553 \**************************/
554 
555 /**
556  * ath5k_hw_set_ifs_intervals()  - Set global inter-frame spaces on DCU
557  * @ah: The &struct ath5k_hw
558  * @slot_time: Slot time in us
559  *
560  * Sets the global IFS intervals on DCU (also works on AR5210) for
561  * the given slot time and the current bwmode.
562  */
ath5k_hw_set_ifs_intervals(struct ath5k_hw * ah,unsigned int slot_time)563 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
564 {
565 	struct ieee80211_channel *channel = ah->ah_current_channel;
566 	struct ieee80211_rate *rate;
567 	u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
568 	u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
569 
570 	if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
571 		return -EINVAL;
572 
573 	sifs = ath5k_hw_get_default_sifs(ah);
574 	sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
575 
576 	/* EIFS
577 	 * Txtime of ack at lowest rate + SIFS + DIFS
578 	 * (DIFS = SIFS + 2 * Slot time)
579 	 *
580 	 * Note: HAL has some predefined values for EIFS
581 	 * Turbo:   (37 + 2 * 6)
582 	 * Default: (74 + 2 * 9)
583 	 * Half:    (149 + 2 * 13)
584 	 * Quarter: (298 + 2 * 21)
585 	 *
586 	 * (74 + 2 * 6) for AR5210 default and turbo !
587 	 *
588 	 * According to the formula we have
589 	 * ack_tx_time = 25 for turbo and
590 	 * ack_tx_time = 42.5 * clock multiplier
591 	 * for default/half/quarter.
592 	 *
593 	 * This can't be right, 42 is what we would get
594 	 * from ath5k_hw_get_frame_dur_for_bwmode or
595 	 * ieee80211_generic_frame_duration for zero frame
596 	 * length and without SIFS !
597 	 *
598 	 * Also we have different lowest rate for 802.11a
599 	 */
600 	if (channel->band == IEEE80211_BAND_5GHZ)
601 		rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
602 	else
603 		rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
604 
605 	ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
606 
607 	/* ack_tx_time includes an SIFS already */
608 	eifs = ack_tx_time + sifs + 2 * slot_time;
609 	eifs_clock = ath5k_hw_htoclock(ah, eifs);
610 
611 	/* Set IFS settings on AR5210 */
612 	if (ah->ah_version == AR5K_AR5210) {
613 		u32 pifs, pifs_clock, difs, difs_clock;
614 
615 		/* Set slot time */
616 		ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
617 
618 		/* Set EIFS */
619 		eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
620 
621 		/* PIFS = Slot time + SIFS */
622 		pifs = slot_time + sifs;
623 		pifs_clock = ath5k_hw_htoclock(ah, pifs);
624 		pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
625 
626 		/* DIFS = SIFS + 2 * Slot time */
627 		difs = sifs + 2 * slot_time;
628 		difs_clock = ath5k_hw_htoclock(ah, difs);
629 
630 		/* Set SIFS/DIFS */
631 		ath5k_hw_reg_write(ah, (difs_clock <<
632 				AR5K_IFS0_DIFS_S) | sifs_clock,
633 				AR5K_IFS0);
634 
635 		/* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
636 		ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
637 				(AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
638 				AR5K_IFS1);
639 
640 		return 0;
641 	}
642 
643 	/* Set IFS slot time */
644 	ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
645 
646 	/* Set EIFS interval */
647 	ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
648 
649 	/* Set SIFS interval in usecs */
650 	AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
651 				AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
652 				sifs);
653 
654 	/* Set SIFS interval in clock cycles */
655 	ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
656 
657 	return 0;
658 }
659 
660 
661 /**
662  * ath5k_hw_init_queues() - Initialize tx queues
663  * @ah: The &struct ath5k_hw
664  *
665  * Initializes all tx queues based on information on
666  * ah->ah_txq* set by the driver
667  */
668 int
ath5k_hw_init_queues(struct ath5k_hw * ah)669 ath5k_hw_init_queues(struct ath5k_hw *ah)
670 {
671 	int i, ret;
672 
673 	/* TODO: HW Compression support for data queues */
674 	/* TODO: Burst prefetch for data queues */
675 
676 	/*
677 	 * Reset queues and start beacon timers at the end of the reset routine
678 	 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
679 	 * Note: If we want we can assign multiple qcus on one dcu.
680 	 */
681 	if (ah->ah_version != AR5K_AR5210)
682 		for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
683 			ret = ath5k_hw_reset_tx_queue(ah, i);
684 			if (ret) {
685 				ATH5K_ERR(ah,
686 					"failed to reset TX queue #%d\n", i);
687 				return ret;
688 			}
689 		}
690 	else
691 		/* No QCU/DCU on AR5210, just set tx
692 		 * retry limits. We set IFS parameters
693 		 * on ath5k_hw_set_ifs_intervals */
694 		ath5k_hw_set_tx_retry_limits(ah, 0);
695 
696 	/* Set the turbo flag when operating on 40MHz */
697 	if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
698 		AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
699 				AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
700 
701 	/* If we didn't set IFS timings through
702 	 * ath5k_hw_set_coverage_class make sure
703 	 * we set them here */
704 	if (!ah->ah_coverage_class) {
705 		unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
706 		ath5k_hw_set_ifs_intervals(ah, slot_time);
707 	}
708 
709 	return 0;
710 }
711