1 /*
2 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 /*
22 Module: rt2x00
23 Abstract: rt2x00 queue datastructures and routines
24 */
25
26 #ifndef RT2X00QUEUE_H
27 #define RT2X00QUEUE_H
28
29 #include <linux/prefetch.h>
30
31 /**
32 * DOC: Entry frame size
33 *
34 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
35 * for USB devices this restriction does not apply, but the value of
36 * 2432 makes sense since it is big enough to contain the maximum fragment
37 * size according to the ieee802.11 specs.
38 * The aggregation size depends on support from the driver, but should
39 * be something around 3840 bytes.
40 */
41 #define DATA_FRAME_SIZE 2432
42 #define MGMT_FRAME_SIZE 256
43 #define AGGREGATION_SIZE 3840
44
45 /**
46 * enum data_queue_qid: Queue identification
47 *
48 * @QID_AC_VO: AC VO queue
49 * @QID_AC_VI: AC VI queue
50 * @QID_AC_BE: AC BE queue
51 * @QID_AC_BK: AC BK queue
52 * @QID_HCCA: HCCA queue
53 * @QID_MGMT: MGMT queue (prio queue)
54 * @QID_RX: RX queue
55 * @QID_OTHER: None of the above (don't use, only present for completeness)
56 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
57 * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
58 */
59 enum data_queue_qid {
60 QID_AC_VO = 0,
61 QID_AC_VI = 1,
62 QID_AC_BE = 2,
63 QID_AC_BK = 3,
64 QID_HCCA = 4,
65 QID_MGMT = 13,
66 QID_RX = 14,
67 QID_OTHER = 15,
68 QID_BEACON,
69 QID_ATIM,
70 };
71
72 /**
73 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
74 *
75 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
76 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
77 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
78 * mac80211 but was stripped for processing by the driver.
79 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
80 * don't try to pass it back.
81 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
82 * skb, instead of in the desc field.
83 */
84 enum skb_frame_desc_flags {
85 SKBDESC_DMA_MAPPED_RX = 1 << 0,
86 SKBDESC_DMA_MAPPED_TX = 1 << 1,
87 SKBDESC_IV_STRIPPED = 1 << 2,
88 SKBDESC_NOT_MAC80211 = 1 << 3,
89 SKBDESC_DESC_IN_SKB = 1 << 4,
90 };
91
92 /**
93 * struct skb_frame_desc: Descriptor information for the skb buffer
94 *
95 * This structure is placed over the driver_data array, this means that
96 * this structure should not exceed the size of that array (40 bytes).
97 *
98 * @flags: Frame flags, see &enum skb_frame_desc_flags.
99 * @desc_len: Length of the frame descriptor.
100 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
101 * @tx_rate_flags: the TX rate flags, used for TX status reporting
102 * @desc: Pointer to descriptor part of the frame.
103 * Note that this pointer could point to something outside
104 * of the scope of the skb->data pointer.
105 * @iv: IV/EIV data used during encryption/decryption.
106 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
107 * @entry: The entry to which this sk buffer belongs.
108 */
109 struct skb_frame_desc {
110 u8 flags;
111
112 u8 desc_len;
113 u8 tx_rate_idx;
114 u8 tx_rate_flags;
115
116 void *desc;
117
118 __le32 iv[2];
119
120 dma_addr_t skb_dma;
121
122 struct queue_entry *entry;
123 };
124
125 /**
126 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
127 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
128 */
get_skb_frame_desc(struct sk_buff * skb)129 static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
130 {
131 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
132 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
133 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
134 }
135
136 /**
137 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
138 *
139 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
140 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
141 * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
142 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
143 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
144 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
145 * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
146 */
147 enum rxdone_entry_desc_flags {
148 RXDONE_SIGNAL_PLCP = BIT(0),
149 RXDONE_SIGNAL_BITRATE = BIT(1),
150 RXDONE_SIGNAL_MCS = BIT(2),
151 RXDONE_MY_BSS = BIT(3),
152 RXDONE_CRYPTO_IV = BIT(4),
153 RXDONE_CRYPTO_ICV = BIT(5),
154 RXDONE_L2PAD = BIT(6),
155 };
156
157 /**
158 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
159 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
160 * from &rxdone_entry_desc to a signal value type.
161 */
162 #define RXDONE_SIGNAL_MASK \
163 ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
164
165 /**
166 * struct rxdone_entry_desc: RX Entry descriptor
167 *
168 * Summary of information that has been read from the RX frame descriptor.
169 *
170 * @timestamp: RX Timestamp
171 * @signal: Signal of the received frame.
172 * @rssi: RSSI of the received frame.
173 * @size: Data size of the received frame.
174 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
175 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
176 * @rate_mode: Rate mode (See @enum rate_modulation).
177 * @cipher: Cipher type used during decryption.
178 * @cipher_status: Decryption status.
179 * @iv: IV/EIV data used during decryption.
180 * @icv: ICV data used during decryption.
181 */
182 struct rxdone_entry_desc {
183 u64 timestamp;
184 int signal;
185 int rssi;
186 int size;
187 int flags;
188 int dev_flags;
189 u16 rate_mode;
190 u8 cipher;
191 u8 cipher_status;
192
193 __le32 iv[2];
194 __le32 icv;
195 };
196
197 /**
198 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
199 *
200 * Every txdone report has to contain the basic result of the
201 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
202 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
203 * conjunction with all of these flags but should only be set
204 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
205 * in conjunction with &TXDONE_FAILURE.
206 *
207 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
208 * @TXDONE_SUCCESS: Frame was successfully send
209 * @TXDONE_FALLBACK: Hardware used fallback rates for retries
210 * @TXDONE_FAILURE: Frame was not successfully send
211 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
212 * frame transmission failed due to excessive retries.
213 */
214 enum txdone_entry_desc_flags {
215 TXDONE_UNKNOWN,
216 TXDONE_SUCCESS,
217 TXDONE_FALLBACK,
218 TXDONE_FAILURE,
219 TXDONE_EXCESSIVE_RETRY,
220 TXDONE_AMPDU,
221 };
222
223 /**
224 * struct txdone_entry_desc: TX done entry descriptor
225 *
226 * Summary of information that has been read from the TX frame descriptor
227 * after the device is done with transmission.
228 *
229 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
230 * @retry: Retry count.
231 */
232 struct txdone_entry_desc {
233 unsigned long flags;
234 int retry;
235 };
236
237 /**
238 * enum txentry_desc_flags: Status flags for TX entry descriptor
239 *
240 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
241 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
242 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
243 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
244 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
245 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
246 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
247 * @ENTRY_TXD_ACK: An ACK is required for this frame.
248 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
249 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
250 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
251 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
252 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
253 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
254 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
255 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
256 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
257 */
258 enum txentry_desc_flags {
259 ENTRY_TXD_RTS_FRAME,
260 ENTRY_TXD_CTS_FRAME,
261 ENTRY_TXD_GENERATE_SEQ,
262 ENTRY_TXD_FIRST_FRAGMENT,
263 ENTRY_TXD_MORE_FRAG,
264 ENTRY_TXD_REQ_TIMESTAMP,
265 ENTRY_TXD_BURST,
266 ENTRY_TXD_ACK,
267 ENTRY_TXD_RETRY_MODE,
268 ENTRY_TXD_ENCRYPT,
269 ENTRY_TXD_ENCRYPT_PAIRWISE,
270 ENTRY_TXD_ENCRYPT_IV,
271 ENTRY_TXD_ENCRYPT_MMIC,
272 ENTRY_TXD_HT_AMPDU,
273 ENTRY_TXD_HT_BW_40,
274 ENTRY_TXD_HT_SHORT_GI,
275 ENTRY_TXD_HT_MIMO_PS,
276 };
277
278 /**
279 * struct txentry_desc: TX Entry descriptor
280 *
281 * Summary of information for the frame descriptor before sending a TX frame.
282 *
283 * @flags: Descriptor flags (See &enum queue_entry_flags).
284 * @length: Length of the entire frame.
285 * @header_length: Length of 802.11 header.
286 * @length_high: PLCP length high word.
287 * @length_low: PLCP length low word.
288 * @signal: PLCP signal.
289 * @service: PLCP service.
290 * @msc: MCS.
291 * @stbc: Use Space Time Block Coding (only available for MCS rates < 8).
292 * @ba_size: Size of the recepients RX reorder buffer - 1.
293 * @rate_mode: Rate mode (See @enum rate_modulation).
294 * @mpdu_density: MDPU density.
295 * @retry_limit: Max number of retries.
296 * @ifs: IFS value.
297 * @txop: IFS value for 11n capable chips.
298 * @cipher: Cipher type used for encryption.
299 * @key_idx: Key index used for encryption.
300 * @iv_offset: Position where IV should be inserted by hardware.
301 * @iv_len: Length of IV data.
302 */
303 struct txentry_desc {
304 unsigned long flags;
305
306 u16 length;
307 u16 header_length;
308
309 union {
310 struct {
311 u16 length_high;
312 u16 length_low;
313 u16 signal;
314 u16 service;
315 enum ifs ifs;
316 } plcp;
317
318 struct {
319 u16 mcs;
320 u8 stbc;
321 u8 ba_size;
322 u8 mpdu_density;
323 enum txop txop;
324 int wcid;
325 } ht;
326 } u;
327
328 enum rate_modulation rate_mode;
329
330 short retry_limit;
331
332 enum cipher cipher;
333 u16 key_idx;
334 u16 iv_offset;
335 u16 iv_len;
336 };
337
338 /**
339 * enum queue_entry_flags: Status flags for queue entry
340 *
341 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
342 * As long as this bit is set, this entry may only be touched
343 * through the interface structure.
344 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
345 * transfer (either TX or RX depending on the queue). The entry should
346 * only be touched after the device has signaled it is done with it.
347 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
348 * for the signal to start sending.
349 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
350 * while transferring the data to the hardware. No TX status report will
351 * be expected from the hardware.
352 * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
353 * returned. It is now waiting for the status reporting before the
354 * entry can be reused again.
355 */
356 enum queue_entry_flags {
357 ENTRY_BCN_ASSIGNED,
358 ENTRY_OWNER_DEVICE_DATA,
359 ENTRY_DATA_PENDING,
360 ENTRY_DATA_IO_FAILED,
361 ENTRY_DATA_STATUS_PENDING,
362 };
363
364 /**
365 * struct queue_entry: Entry inside the &struct data_queue
366 *
367 * @flags: Entry flags, see &enum queue_entry_flags.
368 * @last_action: Timestamp of last change.
369 * @queue: The data queue (&struct data_queue) to which this entry belongs.
370 * @skb: The buffer which is currently being transmitted (for TX queue),
371 * or used to directly receive data in (for RX queue).
372 * @entry_idx: The entry index number.
373 * @priv_data: Private data belonging to this queue entry. The pointer
374 * points to data specific to a particular driver and queue type.
375 */
376 struct queue_entry {
377 unsigned long flags;
378 unsigned long last_action;
379
380 struct data_queue *queue;
381
382 struct sk_buff *skb;
383
384 unsigned int entry_idx;
385
386 void *priv_data;
387 };
388
389 /**
390 * enum queue_index: Queue index type
391 *
392 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
393 * owned by the hardware then the queue is considered to be full.
394 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
395 * transferred to the hardware.
396 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
397 * the hardware and for which we need to run the txdone handler. If this
398 * entry is not owned by the hardware the queue is considered to be empty.
399 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
400 * of the index array.
401 */
402 enum queue_index {
403 Q_INDEX,
404 Q_INDEX_DMA_DONE,
405 Q_INDEX_DONE,
406 Q_INDEX_MAX,
407 };
408
409 /**
410 * enum data_queue_flags: Status flags for data queues
411 *
412 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
413 * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
414 * be transmitted and beacon queues will start beaconing the configured
415 * beacons.
416 * @QUEUE_PAUSED: The queue has been started but is currently paused.
417 * When this bit is set, the queue has been stopped in mac80211,
418 * preventing new frames to be enqueued. However, a few frames
419 * might still appear shortly after the pausing...
420 */
421 enum data_queue_flags {
422 QUEUE_STARTED,
423 QUEUE_PAUSED,
424 };
425
426 /**
427 * struct data_queue: Data queue
428 *
429 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
430 * @entries: Base address of the &struct queue_entry which are
431 * part of this queue.
432 * @qid: The queue identification, see &enum data_queue_qid.
433 * @flags: Entry flags, see &enum queue_entry_flags.
434 * @status_lock: The mutex for protecting the start/stop/flush
435 * handling on this queue.
436 * @tx_lock: Spinlock to serialize tx operations on this queue.
437 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
438 * @index_crypt needs to be changed this lock should be grabbed to prevent
439 * index corruption due to concurrency.
440 * @count: Number of frames handled in the queue.
441 * @limit: Maximum number of entries in the queue.
442 * @threshold: Minimum number of free entries before queue is kicked by force.
443 * @length: Number of frames in queue.
444 * @index: Index pointers to entry positions in the queue,
445 * use &enum queue_index to get a specific index field.
446 * @txop: maximum burst time.
447 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
448 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
449 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
450 * @data_size: Maximum data size for the frames in this queue.
451 * @desc_size: Hardware descriptor size for the data in this queue.
452 * @usb_endpoint: Device endpoint used for communication (USB only)
453 * @usb_maxpacket: Max packet size for given endpoint (USB only)
454 */
455 struct data_queue {
456 struct rt2x00_dev *rt2x00dev;
457 struct queue_entry *entries;
458
459 enum data_queue_qid qid;
460 unsigned long flags;
461
462 struct mutex status_lock;
463 spinlock_t tx_lock;
464 spinlock_t index_lock;
465
466 unsigned int count;
467 unsigned short limit;
468 unsigned short threshold;
469 unsigned short length;
470 unsigned short index[Q_INDEX_MAX];
471
472 unsigned short txop;
473 unsigned short aifs;
474 unsigned short cw_min;
475 unsigned short cw_max;
476
477 unsigned short data_size;
478 unsigned short desc_size;
479
480 unsigned short usb_endpoint;
481 unsigned short usb_maxpacket;
482 };
483
484 /**
485 * struct data_queue_desc: Data queue description
486 *
487 * The information in this structure is used by drivers
488 * to inform rt2x00lib about the creation of the data queue.
489 *
490 * @entry_num: Maximum number of entries for a queue.
491 * @data_size: Maximum data size for the frames in this queue.
492 * @desc_size: Hardware descriptor size for the data in this queue.
493 * @priv_size: Size of per-queue_entry private data.
494 */
495 struct data_queue_desc {
496 unsigned short entry_num;
497 unsigned short data_size;
498 unsigned short desc_size;
499 unsigned short priv_size;
500 };
501
502 /**
503 * queue_end - Return pointer to the last queue (HELPER MACRO).
504 * @__dev: Pointer to &struct rt2x00_dev
505 *
506 * Using the base rx pointer and the maximum number of available queues,
507 * this macro will return the address of 1 position beyond the end of the
508 * queues array.
509 */
510 #define queue_end(__dev) \
511 &(__dev)->rx[(__dev)->data_queues]
512
513 /**
514 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
515 * @__dev: Pointer to &struct rt2x00_dev
516 *
517 * Using the base tx pointer and the maximum number of available TX
518 * queues, this macro will return the address of 1 position beyond
519 * the end of the TX queue array.
520 */
521 #define tx_queue_end(__dev) \
522 &(__dev)->tx[(__dev)->ops->tx_queues]
523
524 /**
525 * queue_next - Return pointer to next queue in list (HELPER MACRO).
526 * @__queue: Current queue for which we need the next queue
527 *
528 * Using the current queue address we take the address directly
529 * after the queue to take the next queue. Note that this macro
530 * should be used carefully since it does not protect against
531 * moving past the end of the list. (See macros &queue_end and
532 * &tx_queue_end for determining the end of the queue).
533 */
534 #define queue_next(__queue) \
535 &(__queue)[1]
536
537 /**
538 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
539 * @__entry: Pointer where the current queue entry will be stored in.
540 * @__start: Start queue pointer.
541 * @__end: End queue pointer.
542 *
543 * This macro will loop through all queues between &__start and &__end.
544 */
545 #define queue_loop(__entry, __start, __end) \
546 for ((__entry) = (__start); \
547 prefetch(queue_next(__entry)), (__entry) != (__end);\
548 (__entry) = queue_next(__entry))
549
550 /**
551 * queue_for_each - Loop through all queues
552 * @__dev: Pointer to &struct rt2x00_dev
553 * @__entry: Pointer where the current queue entry will be stored in.
554 *
555 * This macro will loop through all available queues.
556 */
557 #define queue_for_each(__dev, __entry) \
558 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
559
560 /**
561 * tx_queue_for_each - Loop through the TX queues
562 * @__dev: Pointer to &struct rt2x00_dev
563 * @__entry: Pointer where the current queue entry will be stored in.
564 *
565 * This macro will loop through all TX related queues excluding
566 * the Beacon and Atim queues.
567 */
568 #define tx_queue_for_each(__dev, __entry) \
569 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
570
571 /**
572 * txall_queue_for_each - Loop through all TX related queues
573 * @__dev: Pointer to &struct rt2x00_dev
574 * @__entry: Pointer where the current queue entry will be stored in.
575 *
576 * This macro will loop through all TX related queues including
577 * the Beacon and Atim queues.
578 */
579 #define txall_queue_for_each(__dev, __entry) \
580 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
581
582 /**
583 * rt2x00queue_for_each_entry - Loop through all entries in the queue
584 * @queue: Pointer to @data_queue
585 * @start: &enum queue_index Pointer to start index
586 * @end: &enum queue_index Pointer to end index
587 * @data: Data to pass to the callback function
588 * @fn: The function to call for each &struct queue_entry
589 *
590 * This will walk through all entries in the queue, in chronological
591 * order. This means it will start at the current @start pointer
592 * and will walk through the queue until it reaches the @end pointer.
593 *
594 * If fn returns true for an entry rt2x00queue_for_each_entry will stop
595 * processing and return true as well.
596 */
597 bool rt2x00queue_for_each_entry(struct data_queue *queue,
598 enum queue_index start,
599 enum queue_index end,
600 void *data,
601 bool (*fn)(struct queue_entry *entry,
602 void *data));
603
604 /**
605 * rt2x00queue_empty - Check if the queue is empty.
606 * @queue: Queue to check if empty.
607 */
rt2x00queue_empty(struct data_queue * queue)608 static inline int rt2x00queue_empty(struct data_queue *queue)
609 {
610 return queue->length == 0;
611 }
612
613 /**
614 * rt2x00queue_full - Check if the queue is full.
615 * @queue: Queue to check if full.
616 */
rt2x00queue_full(struct data_queue * queue)617 static inline int rt2x00queue_full(struct data_queue *queue)
618 {
619 return queue->length == queue->limit;
620 }
621
622 /**
623 * rt2x00queue_free - Check the number of available entries in queue.
624 * @queue: Queue to check.
625 */
rt2x00queue_available(struct data_queue * queue)626 static inline int rt2x00queue_available(struct data_queue *queue)
627 {
628 return queue->limit - queue->length;
629 }
630
631 /**
632 * rt2x00queue_threshold - Check if the queue is below threshold
633 * @queue: Queue to check.
634 */
rt2x00queue_threshold(struct data_queue * queue)635 static inline int rt2x00queue_threshold(struct data_queue *queue)
636 {
637 return rt2x00queue_available(queue) < queue->threshold;
638 }
639 /**
640 * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
641 * @entry: Queue entry to check.
642 */
rt2x00queue_dma_timeout(struct queue_entry * entry)643 static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
644 {
645 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
646 return false;
647 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
648 }
649
650 /**
651 * _rt2x00_desc_read - Read a word from the hardware descriptor.
652 * @desc: Base descriptor address
653 * @word: Word index from where the descriptor should be read.
654 * @value: Address where the descriptor value should be written into.
655 */
_rt2x00_desc_read(__le32 * desc,const u8 word,__le32 * value)656 static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
657 {
658 *value = desc[word];
659 }
660
661 /**
662 * rt2x00_desc_read - Read a word from the hardware descriptor, this
663 * function will take care of the byte ordering.
664 * @desc: Base descriptor address
665 * @word: Word index from where the descriptor should be read.
666 * @value: Address where the descriptor value should be written into.
667 */
rt2x00_desc_read(__le32 * desc,const u8 word,u32 * value)668 static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
669 {
670 __le32 tmp;
671 _rt2x00_desc_read(desc, word, &tmp);
672 *value = le32_to_cpu(tmp);
673 }
674
675 /**
676 * rt2x00_desc_write - write a word to the hardware descriptor, this
677 * function will take care of the byte ordering.
678 * @desc: Base descriptor address
679 * @word: Word index from where the descriptor should be written.
680 * @value: Value that should be written into the descriptor.
681 */
_rt2x00_desc_write(__le32 * desc,const u8 word,__le32 value)682 static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
683 {
684 desc[word] = value;
685 }
686
687 /**
688 * rt2x00_desc_write - write a word to the hardware descriptor.
689 * @desc: Base descriptor address
690 * @word: Word index from where the descriptor should be written.
691 * @value: Value that should be written into the descriptor.
692 */
rt2x00_desc_write(__le32 * desc,const u8 word,u32 value)693 static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
694 {
695 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
696 }
697
698 #endif /* RT2X00QUEUE_H */
699