1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29 #ifndef __iwl_trans_int_pcie_h__
30 #define __iwl_trans_int_pcie_h__
31
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/skbuff.h>
35 #include <linux/wait.h>
36 #include <linux/pci.h>
37
38 #include "iwl-fh.h"
39 #include "iwl-csr.h"
40 #include "iwl-shared.h"
41 #include "iwl-trans.h"
42 #include "iwl-debug.h"
43 #include "iwl-io.h"
44 #include "iwl-op-mode.h"
45
46 struct iwl_tx_queue;
47 struct iwl_queue;
48 struct iwl_host_cmd;
49
50 /*This file includes the declaration that are internal to the
51 * trans_pcie layer */
52
53 struct iwl_rx_mem_buffer {
54 dma_addr_t page_dma;
55 struct page *page;
56 struct list_head list;
57 };
58
59 /**
60 * struct isr_statistics - interrupt statistics
61 *
62 */
63 struct isr_statistics {
64 u32 hw;
65 u32 sw;
66 u32 err_code;
67 u32 sch;
68 u32 alive;
69 u32 rfkill;
70 u32 ctkill;
71 u32 wakeup;
72 u32 rx;
73 u32 tx;
74 u32 unhandled;
75 };
76
77 /**
78 * struct iwl_rx_queue - Rx queue
79 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
80 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
81 * @pool:
82 * @queue:
83 * @read: Shared index to newest available Rx buffer
84 * @write: Shared index to oldest written Rx packet
85 * @free_count: Number of pre-allocated buffers in rx_free
86 * @write_actual:
87 * @rx_free: list of free SKBs for use
88 * @rx_used: List of Rx buffers with no SKB
89 * @need_update: flag to indicate we need to update read/write index
90 * @rb_stts: driver's pointer to receive buffer status
91 * @rb_stts_dma: bus address of receive buffer status
92 * @lock:
93 *
94 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
95 */
96 struct iwl_rx_queue {
97 __le32 *bd;
98 dma_addr_t bd_dma;
99 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
100 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
101 u32 read;
102 u32 write;
103 u32 free_count;
104 u32 write_actual;
105 struct list_head rx_free;
106 struct list_head rx_used;
107 int need_update;
108 struct iwl_rb_status *rb_stts;
109 dma_addr_t rb_stts_dma;
110 spinlock_t lock;
111 };
112
113 struct iwl_dma_ptr {
114 dma_addr_t dma;
115 void *addr;
116 size_t size;
117 };
118
119 /**
120 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
121 * @index -- current index
122 * @n_bd -- total number of entries in queue (must be power of 2)
123 */
iwl_queue_inc_wrap(int index,int n_bd)124 static inline int iwl_queue_inc_wrap(int index, int n_bd)
125 {
126 return ++index & (n_bd - 1);
127 }
128
129 /**
130 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
131 * @index -- current index
132 * @n_bd -- total number of entries in queue (must be power of 2)
133 */
iwl_queue_dec_wrap(int index,int n_bd)134 static inline int iwl_queue_dec_wrap(int index, int n_bd)
135 {
136 return --index & (n_bd - 1);
137 }
138
139 /*
140 * This queue number is required for proper operation
141 * because the ucode will stop/start the scheduler as
142 * required.
143 */
144 #define IWL_IPAN_MCAST_QUEUE 8
145
146 struct iwl_cmd_meta {
147 /* only for SYNC commands, iff the reply skb is wanted */
148 struct iwl_host_cmd *source;
149
150 u32 flags;
151
152 DEFINE_DMA_UNMAP_ADDR(mapping);
153 DEFINE_DMA_UNMAP_LEN(len);
154 };
155
156 /*
157 * Generic queue structure
158 *
159 * Contains common data for Rx and Tx queues.
160 *
161 * Note the difference between n_bd and n_window: the hardware
162 * always assumes 256 descriptors, so n_bd is always 256 (unless
163 * there might be HW changes in the future). For the normal TX
164 * queues, n_window, which is the size of the software queue data
165 * is also 256; however, for the command queue, n_window is only
166 * 32 since we don't need so many commands pending. Since the HW
167 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
168 * the software buffers (in the variables @meta, @txb in struct
169 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
170 * in the same struct) have 256.
171 * This means that we end up with the following:
172 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
173 * SW entries: | 0 | ... | 31 |
174 * where N is a number between 0 and 7. This means that the SW
175 * data is a window overlayed over the HW queue.
176 */
177 struct iwl_queue {
178 int n_bd; /* number of BDs in this queue */
179 int write_ptr; /* 1-st empty entry (index) host_w*/
180 int read_ptr; /* last used entry (index) host_r*/
181 /* use for monitoring and recovering the stuck queue */
182 dma_addr_t dma_addr; /* physical addr for BD's */
183 int n_window; /* safe queue window */
184 u32 id;
185 int low_mark; /* low watermark, resume queue if free
186 * space more than this */
187 int high_mark; /* high watermark, stop queue if free
188 * space less than this */
189 };
190
191 /**
192 * struct iwl_tx_queue - Tx Queue for DMA
193 * @q: generic Rx/Tx queue descriptor
194 * @bd: base of circular buffer of TFDs
195 * @cmd: array of command/TX buffer pointers
196 * @meta: array of meta data for each command/tx buffer
197 * @dma_addr_cmd: physical address of cmd/tx buffer array
198 * @txb: array of per-TFD driver data
199 * lock: queue lock
200 * @time_stamp: time (in jiffies) of last read_ptr change
201 * @need_update: indicates need to update read/write index
202 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
203 * @sta_id: valid if sched_retry is set
204 * @tid: valid if sched_retry is set
205 *
206 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
207 * descriptors) and required locking structures.
208 */
209 #define TFD_TX_CMD_SLOTS 256
210 #define TFD_CMD_SLOTS 32
211
212 /*
213 * The FH will write back to the first TB only, so we need
214 * to copy some data into the buffer regardless of whether
215 * it should be mapped or not. This indicates how much to
216 * copy, even for HCMDs it must be big enough to fit the
217 * DRAM scratch from the TX cmd, at least 16 bytes.
218 */
219 #define IWL_HCMD_MIN_COPY_SIZE 16
220
221 struct iwl_tx_queue {
222 struct iwl_queue q;
223 struct iwl_tfd *tfds;
224 struct iwl_device_cmd **cmd;
225 struct iwl_cmd_meta *meta;
226 struct sk_buff **skbs;
227 spinlock_t lock;
228 unsigned long time_stamp;
229 u8 need_update;
230 u8 sched_retry;
231 u8 active;
232 u8 swq_id;
233
234 u16 sta_id;
235 u16 tid;
236 };
237
238 /**
239 * struct iwl_trans_pcie - PCIe transport specific data
240 * @rxq: all the RX queue data
241 * @rx_replenish: work that will be called when buffers need to be allocated
242 * @trans: pointer to the generic transport area
243 * @irq - the irq number for the device
244 * @irq_requested: true when the irq has been requested
245 * @scd_base_addr: scheduler sram base address in SRAM
246 * @scd_bc_tbls: pointer to the byte count table of the scheduler
247 * @kw: keep warm address
248 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
249 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
250 * @mcast_queue:
251 * @txq: Tx DMA processing queues
252 * @txq_ctx_active_msk: what queue is active
253 * queue_stopped: tracks what queue is stopped
254 * queue_stop_count: tracks what SW queue is stopped
255 * @pci_dev: basic pci-network driver stuff
256 * @hw_base: pci hardware address support
257 * @ucode_write_complete: indicates that the ucode has been copied.
258 * @ucode_write_waitq: wait queue for uCode load
259 * @status - transport specific status flags
260 * @cmd_queue - command queue number
261 */
262 struct iwl_trans_pcie {
263 struct iwl_rx_queue rxq;
264 struct work_struct rx_replenish;
265 struct iwl_trans *trans;
266
267 /* INT ICT Table */
268 __le32 *ict_tbl;
269 dma_addr_t ict_tbl_dma;
270 int ict_index;
271 u32 inta;
272 bool use_ict;
273 bool irq_requested;
274 struct tasklet_struct irq_tasklet;
275 struct isr_statistics isr_stats;
276
277 unsigned int irq;
278 spinlock_t irq_lock;
279 u32 inta_mask;
280 u32 scd_base_addr;
281 struct iwl_dma_ptr scd_bc_tbls;
282 struct iwl_dma_ptr kw;
283
284 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
285 const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
286 u8 mcast_queue[NUM_IWL_RXON_CTX];
287 u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
288
289 struct iwl_tx_queue *txq;
290 unsigned long txq_ctx_active_msk;
291 #define IWL_MAX_HW_QUEUES 32
292 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
293 atomic_t queue_stop_count[4];
294
295 /* PCI bus related data */
296 struct pci_dev *pci_dev;
297 void __iomem *hw_base;
298
299 bool ucode_write_complete;
300 wait_queue_head_t ucode_write_waitq;
301 unsigned long status;
302 u8 cmd_queue;
303 u8 n_no_reclaim_cmds;
304 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
305 };
306
307 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
308 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
309
310 /*****************************************************
311 * RX
312 ******************************************************/
313 void iwl_bg_rx_replenish(struct work_struct *data);
314 void iwl_irq_tasklet(struct iwl_trans *trans);
315 void iwlagn_rx_replenish(struct iwl_trans *trans);
316 void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
317 struct iwl_rx_queue *q);
318
319 /*****************************************************
320 * ICT
321 ******************************************************/
322 void iwl_reset_ict(struct iwl_trans *trans);
323 void iwl_disable_ict(struct iwl_trans *trans);
324 int iwl_alloc_isr_ict(struct iwl_trans *trans);
325 void iwl_free_isr_ict(struct iwl_trans *trans);
326 irqreturn_t iwl_isr_ict(int irq, void *data);
327
328 /*****************************************************
329 * TX / HCMD
330 ******************************************************/
331 void iwl_txq_update_write_ptr(struct iwl_trans *trans,
332 struct iwl_tx_queue *txq);
333 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
334 struct iwl_tx_queue *txq,
335 dma_addr_t addr, u16 len, u8 reset);
336 int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
337 int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
338 void iwl_tx_cmd_complete(struct iwl_trans *trans,
339 struct iwl_rx_cmd_buffer *rxb, int handler_status);
340 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
341 struct iwl_tx_queue *txq,
342 u16 byte_cnt);
343 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
344 int sta_id, int tid);
345 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
346 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
347 struct iwl_tx_queue *txq,
348 int tx_fifo_id, int scd_retry);
349 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
350 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
351 enum iwl_rxon_context_id ctx,
352 int sta_id, int tid, int frame_limit, u16 ssn);
353 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
354 enum dma_data_direction dma_dir);
355 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
356 struct sk_buff_head *skbs);
357 int iwl_queue_space(const struct iwl_queue *q);
358
359 /*****************************************************
360 * Error handling
361 ******************************************************/
362 int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
363 char **buf, bool display);
364 int iwl_dump_fh(struct iwl_trans *trans, char **buf);
365 void iwl_dump_csr(struct iwl_trans *trans);
366
367 /*****************************************************
368 * Helpers
369 ******************************************************/
iwl_disable_interrupts(struct iwl_trans * trans)370 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
371 {
372 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
373 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
374
375 /* disable interrupts from uCode/NIC to host */
376 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
377
378 /* acknowledge/clear/reset any interrupts still pending
379 * from uCode or flow handler (Rx/Tx DMA) */
380 iwl_write32(trans, CSR_INT, 0xffffffff);
381 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
382 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
383 }
384
iwl_enable_interrupts(struct iwl_trans * trans)385 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
386 {
387 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
388
389 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
390 set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
391 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
392 }
393
iwl_enable_rfkill_int(struct iwl_trans * trans)394 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
395 {
396 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
397 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
398 }
399
400 /*
401 * we have 8 bits used like this:
402 *
403 * 7 6 5 4 3 2 1 0
404 * | | | | | | | |
405 * | | | | | | +-+-------- AC queue (0-3)
406 * | | | | | |
407 * | +-+-+-+-+------------ HW queue ID
408 * |
409 * +---------------------- unused
410 */
iwl_set_swq_id(struct iwl_tx_queue * txq,u8 ac,u8 hwq)411 static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
412 {
413 BUG_ON(ac > 3); /* only have 2 bits */
414 BUG_ON(hwq > 31); /* only use 5 bits */
415
416 txq->swq_id = (hwq << 2) | ac;
417 }
418
iwl_get_queue_ac(struct iwl_tx_queue * txq)419 static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
420 {
421 return txq->swq_id & 0x3;
422 }
423
iwl_wake_queue(struct iwl_trans * trans,struct iwl_tx_queue * txq)424 static inline void iwl_wake_queue(struct iwl_trans *trans,
425 struct iwl_tx_queue *txq)
426 {
427 u8 queue = txq->swq_id;
428 u8 ac = queue & 3;
429 u8 hwq = (queue >> 2) & 0x1f;
430 struct iwl_trans_pcie *trans_pcie =
431 IWL_TRANS_GET_PCIE_TRANS(trans);
432
433 if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
434 if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
435 iwl_op_mode_queue_not_full(trans->op_mode, ac);
436 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
437 hwq, ac);
438 } else {
439 IWL_DEBUG_TX_QUEUES(trans,
440 "Don't wake hwq %d ac %d stop count %d",
441 hwq, ac,
442 atomic_read(&trans_pcie->queue_stop_count[ac]));
443 }
444 }
445 }
446
iwl_stop_queue(struct iwl_trans * trans,struct iwl_tx_queue * txq)447 static inline void iwl_stop_queue(struct iwl_trans *trans,
448 struct iwl_tx_queue *txq)
449 {
450 u8 queue = txq->swq_id;
451 u8 ac = queue & 3;
452 u8 hwq = (queue >> 2) & 0x1f;
453 struct iwl_trans_pcie *trans_pcie =
454 IWL_TRANS_GET_PCIE_TRANS(trans);
455
456 if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
457 if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
458 iwl_op_mode_queue_full(trans->op_mode, ac);
459 IWL_DEBUG_TX_QUEUES(trans,
460 "Stop hwq %d ac %d stop count %d",
461 hwq, ac,
462 atomic_read(&trans_pcie->queue_stop_count[ac]));
463 } else {
464 IWL_DEBUG_TX_QUEUES(trans,
465 "Don't stop hwq %d ac %d stop count %d",
466 hwq, ac,
467 atomic_read(&trans_pcie->queue_stop_count[ac]));
468 }
469 } else {
470 IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
471 hwq);
472 }
473 }
474
iwl_txq_ctx_activate(struct iwl_trans_pcie * trans_pcie,int txq_id)475 static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
476 int txq_id)
477 {
478 set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
479 }
480
iwl_txq_ctx_deactivate(struct iwl_trans_pcie * trans_pcie,int txq_id)481 static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
482 int txq_id)
483 {
484 clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
485 }
486
iwl_queue_used(const struct iwl_queue * q,int i)487 static inline int iwl_queue_used(const struct iwl_queue *q, int i)
488 {
489 return q->write_ptr >= q->read_ptr ?
490 (i >= q->read_ptr && i < q->write_ptr) :
491 !(i < q->read_ptr && i >= q->write_ptr);
492 }
493
get_cmd_index(struct iwl_queue * q,u32 index)494 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
495 {
496 return index & (q->n_window - 1);
497 }
498
499 #define IWL_TX_FIFO_BK 0 /* shared */
500 #define IWL_TX_FIFO_BE 1
501 #define IWL_TX_FIFO_VI 2 /* shared */
502 #define IWL_TX_FIFO_VO 3
503 #define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
504 #define IWL_TX_FIFO_BE_IPAN 4
505 #define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
506 #define IWL_TX_FIFO_VO_IPAN 5
507 /* re-uses the VO FIFO, uCode will properly flush/schedule */
508 #define IWL_TX_FIFO_AUX 5
509 #define IWL_TX_FIFO_UNUSED -1
510
511 /* AUX (TX during scan dwell) queue */
512 #define IWL_AUX_QUEUE 10
513
514 #endif /* __iwl_trans_int_pcie_h__ */
515