1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/pci-aspm.h>
37 #include <linux/slab.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/delay.h>
40 #include <linux/sched.h>
41 #include <linux/skbuff.h>
42 #include <linux/netdevice.h>
43 #include <linux/wireless.h>
44 #include <linux/firmware.h>
45 #include <linux/etherdevice.h>
46 #include <linux/if_arp.h>
47
48 #include <net/mac80211.h>
49
50 #include <asm/div64.h>
51
52 #define DRV_NAME "iwl4965"
53
54 #include "iwl-eeprom.h"
55 #include "iwl-dev.h"
56 #include "iwl-core.h"
57 #include "iwl-io.h"
58 #include "iwl-helpers.h"
59 #include "iwl-sta.h"
60 #include "iwl-4965-calib.h"
61 #include "iwl-4965.h"
62 #include "iwl-4965-led.h"
63
64
65 /******************************************************************************
66 *
67 * module boiler plate
68 *
69 ******************************************************************************/
70
71 /*
72 * module name, copyright, version, etc.
73 */
74 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
75
76 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
77 #define VD "d"
78 #else
79 #define VD
80 #endif
81
82 #define DRV_VERSION IWLWIFI_VERSION VD
83
84
85 MODULE_DESCRIPTION(DRV_DESCRIPTION);
86 MODULE_VERSION(DRV_VERSION);
87 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88 MODULE_LICENSE("GPL");
89 MODULE_ALIAS("iwl4965");
90
iwl4965_update_chain_flags(struct iwl_priv * priv)91 void iwl4965_update_chain_flags(struct iwl_priv *priv)
92 {
93 struct iwl_rxon_context *ctx;
94
95 if (priv->cfg->ops->hcmd->set_rxon_chain) {
96 for_each_context(priv, ctx) {
97 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
98 if (ctx->active.rx_chain != ctx->staging.rx_chain)
99 iwl_legacy_commit_rxon(priv, ctx);
100 }
101 }
102 }
103
iwl4965_clear_free_frames(struct iwl_priv * priv)104 static void iwl4965_clear_free_frames(struct iwl_priv *priv)
105 {
106 struct list_head *element;
107
108 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
109 priv->frames_count);
110
111 while (!list_empty(&priv->free_frames)) {
112 element = priv->free_frames.next;
113 list_del(element);
114 kfree(list_entry(element, struct iwl_frame, list));
115 priv->frames_count--;
116 }
117
118 if (priv->frames_count) {
119 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
120 priv->frames_count);
121 priv->frames_count = 0;
122 }
123 }
124
iwl4965_get_free_frame(struct iwl_priv * priv)125 static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
126 {
127 struct iwl_frame *frame;
128 struct list_head *element;
129 if (list_empty(&priv->free_frames)) {
130 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
131 if (!frame) {
132 IWL_ERR(priv, "Could not allocate frame!\n");
133 return NULL;
134 }
135
136 priv->frames_count++;
137 return frame;
138 }
139
140 element = priv->free_frames.next;
141 list_del(element);
142 return list_entry(element, struct iwl_frame, list);
143 }
144
iwl4965_free_frame(struct iwl_priv * priv,struct iwl_frame * frame)145 static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
146 {
147 memset(frame, 0, sizeof(*frame));
148 list_add(&frame->list, &priv->free_frames);
149 }
150
iwl4965_fill_beacon_frame(struct iwl_priv * priv,struct ieee80211_hdr * hdr,int left)151 static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
152 struct ieee80211_hdr *hdr,
153 int left)
154 {
155 lockdep_assert_held(&priv->mutex);
156
157 if (!priv->beacon_skb)
158 return 0;
159
160 if (priv->beacon_skb->len > left)
161 return 0;
162
163 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
164
165 return priv->beacon_skb->len;
166 }
167
168 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
iwl4965_set_beacon_tim(struct iwl_priv * priv,struct iwl_tx_beacon_cmd * tx_beacon_cmd,u8 * beacon,u32 frame_size)169 static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
170 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
171 u8 *beacon, u32 frame_size)
172 {
173 u16 tim_idx;
174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
175
176 /*
177 * The index is relative to frame start but we start looking at the
178 * variable-length part of the beacon.
179 */
180 tim_idx = mgmt->u.beacon.variable - beacon;
181
182 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
183 while ((tim_idx < (frame_size - 2)) &&
184 (beacon[tim_idx] != WLAN_EID_TIM))
185 tim_idx += beacon[tim_idx+1] + 2;
186
187 /* If TIM field was found, set variables */
188 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
189 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
190 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
191 } else
192 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
193 }
194
iwl4965_hw_get_beacon_cmd(struct iwl_priv * priv,struct iwl_frame * frame)195 static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
196 struct iwl_frame *frame)
197 {
198 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
199 u32 frame_size;
200 u32 rate_flags;
201 u32 rate;
202 /*
203 * We have to set up the TX command, the TX Beacon command, and the
204 * beacon contents.
205 */
206
207 lockdep_assert_held(&priv->mutex);
208
209 if (!priv->beacon_ctx) {
210 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
211 return 0;
212 }
213
214 /* Initialize memory */
215 tx_beacon_cmd = &frame->u.beacon;
216 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
217
218 /* Set up TX beacon contents */
219 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
220 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
221 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
222 return 0;
223 if (!frame_size)
224 return 0;
225
226 /* Set up TX command fields */
227 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
228 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
229 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
230 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
231 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
232
233 /* Set up TX beacon command fields */
234 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
235 frame_size);
236
237 /* Set up packet rate and flags */
238 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
239 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
240 priv->hw_params.valid_tx_ant);
241 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
242 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
243 rate_flags |= RATE_MCS_CCK_MSK;
244 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
245 rate_flags);
246
247 return sizeof(*tx_beacon_cmd) + frame_size;
248 }
249
iwl4965_send_beacon_cmd(struct iwl_priv * priv)250 int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
251 {
252 struct iwl_frame *frame;
253 unsigned int frame_size;
254 int rc;
255
256 frame = iwl4965_get_free_frame(priv);
257 if (!frame) {
258 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
259 "command.\n");
260 return -ENOMEM;
261 }
262
263 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
264 if (!frame_size) {
265 IWL_ERR(priv, "Error configuring the beacon command\n");
266 iwl4965_free_frame(priv, frame);
267 return -EINVAL;
268 }
269
270 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
271 &frame->u.cmd[0]);
272
273 iwl4965_free_frame(priv, frame);
274
275 return rc;
276 }
277
iwl4965_tfd_tb_get_addr(struct iwl_tfd * tfd,u8 idx)278 static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
279 {
280 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
281
282 dma_addr_t addr = get_unaligned_le32(&tb->lo);
283 if (sizeof(dma_addr_t) > sizeof(u32))
284 addr |=
285 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
286
287 return addr;
288 }
289
iwl4965_tfd_tb_get_len(struct iwl_tfd * tfd,u8 idx)290 static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
291 {
292 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
293
294 return le16_to_cpu(tb->hi_n_len) >> 4;
295 }
296
iwl4965_tfd_set_tb(struct iwl_tfd * tfd,u8 idx,dma_addr_t addr,u16 len)297 static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
298 dma_addr_t addr, u16 len)
299 {
300 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
301 u16 hi_n_len = len << 4;
302
303 put_unaligned_le32(addr, &tb->lo);
304 if (sizeof(dma_addr_t) > sizeof(u32))
305 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
306
307 tb->hi_n_len = cpu_to_le16(hi_n_len);
308
309 tfd->num_tbs = idx + 1;
310 }
311
iwl4965_tfd_get_num_tbs(struct iwl_tfd * tfd)312 static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
313 {
314 return tfd->num_tbs & 0x1f;
315 }
316
317 /**
318 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
319 * @priv - driver private data
320 * @txq - tx queue
321 *
322 * Does NOT advance any TFD circular buffer read/write indexes
323 * Does NOT free the TFD itself (which is within circular buffer)
324 */
iwl4965_hw_txq_free_tfd(struct iwl_priv * priv,struct iwl_tx_queue * txq)325 void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
326 {
327 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
328 struct iwl_tfd *tfd;
329 struct pci_dev *dev = priv->pci_dev;
330 int index = txq->q.read_ptr;
331 int i;
332 int num_tbs;
333
334 tfd = &tfd_tmp[index];
335
336 /* Sanity check on number of chunks */
337 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
338
339 if (num_tbs >= IWL_NUM_OF_TBS) {
340 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
341 /* @todo issue fatal error, it is quite serious situation */
342 return;
343 }
344
345 /* Unmap tx_cmd */
346 if (num_tbs)
347 pci_unmap_single(dev,
348 dma_unmap_addr(&txq->meta[index], mapping),
349 dma_unmap_len(&txq->meta[index], len),
350 PCI_DMA_BIDIRECTIONAL);
351
352 /* Unmap chunks, if any. */
353 for (i = 1; i < num_tbs; i++)
354 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
355 iwl4965_tfd_tb_get_len(tfd, i),
356 PCI_DMA_TODEVICE);
357
358 /* free SKB */
359 if (txq->txb) {
360 struct sk_buff *skb;
361
362 skb = txq->txb[txq->q.read_ptr].skb;
363
364 /* can be called from irqs-disabled context */
365 if (skb) {
366 dev_kfree_skb_any(skb);
367 txq->txb[txq->q.read_ptr].skb = NULL;
368 }
369 }
370 }
371
iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv * priv,struct iwl_tx_queue * txq,dma_addr_t addr,u16 len,u8 reset,u8 pad)372 int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq,
374 dma_addr_t addr, u16 len,
375 u8 reset, u8 pad)
376 {
377 struct iwl_queue *q;
378 struct iwl_tfd *tfd, *tfd_tmp;
379 u32 num_tbs;
380
381 q = &txq->q;
382 tfd_tmp = (struct iwl_tfd *)txq->tfds;
383 tfd = &tfd_tmp[q->write_ptr];
384
385 if (reset)
386 memset(tfd, 0, sizeof(*tfd));
387
388 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
389
390 /* Each TFD can point to a maximum 20 Tx buffers */
391 if (num_tbs >= IWL_NUM_OF_TBS) {
392 IWL_ERR(priv, "Error can not send more than %d chunks\n",
393 IWL_NUM_OF_TBS);
394 return -EINVAL;
395 }
396
397 BUG_ON(addr & ~DMA_BIT_MASK(36));
398 if (unlikely(addr & ~IWL_TX_DMA_MASK))
399 IWL_ERR(priv, "Unaligned address = %llx\n",
400 (unsigned long long)addr);
401
402 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
403
404 return 0;
405 }
406
407 /*
408 * Tell nic where to find circular buffer of Tx Frame Descriptors for
409 * given Tx queue, and enable the DMA channel used for that queue.
410 *
411 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
412 * channels supported in hardware.
413 */
iwl4965_hw_tx_queue_init(struct iwl_priv * priv,struct iwl_tx_queue * txq)414 int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
415 struct iwl_tx_queue *txq)
416 {
417 int txq_id = txq->q.id;
418
419 /* Circular buffer (TFD queue in DRAM) physical base address */
420 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
421 txq->q.dma_addr >> 8);
422
423 return 0;
424 }
425
426 /******************************************************************************
427 *
428 * Generic RX handler implementations
429 *
430 ******************************************************************************/
iwl4965_rx_reply_alive(struct iwl_priv * priv,struct iwl_rx_mem_buffer * rxb)431 static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433 {
434 struct iwl_rx_packet *pkt = rxb_addr(rxb);
435 struct iwl_alive_resp *palive;
436 struct delayed_work *pwork;
437
438 palive = &pkt->u.alive_frame;
439
440 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
441 "0x%01X 0x%01X\n",
442 palive->is_valid, palive->ver_type,
443 palive->ver_subtype);
444
445 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
446 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
447 memcpy(&priv->card_alive_init,
448 &pkt->u.alive_frame,
449 sizeof(struct iwl_init_alive_resp));
450 pwork = &priv->init_alive_start;
451 } else {
452 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
453 memcpy(&priv->card_alive, &pkt->u.alive_frame,
454 sizeof(struct iwl_alive_resp));
455 pwork = &priv->alive_start;
456 }
457
458 /* We delay the ALIVE response by 5ms to
459 * give the HW RF Kill time to activate... */
460 if (palive->is_valid == UCODE_VALID_OK)
461 queue_delayed_work(priv->workqueue, pwork,
462 msecs_to_jiffies(5));
463 else
464 IWL_WARN(priv, "uCode did not respond OK.\n");
465 }
466
467 /**
468 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
469 *
470 * This callback is provided in order to send a statistics request.
471 *
472 * This timer function is continually reset to execute within
473 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
474 * was received. We need to ensure we receive the statistics in order
475 * to update the temperature used for calibrating the TXPOWER.
476 */
iwl4965_bg_statistics_periodic(unsigned long data)477 static void iwl4965_bg_statistics_periodic(unsigned long data)
478 {
479 struct iwl_priv *priv = (struct iwl_priv *)data;
480
481 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
482 return;
483
484 /* dont send host command if rf-kill is on */
485 if (!iwl_legacy_is_ready_rf(priv))
486 return;
487
488 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
489 }
490
491
iwl4965_print_cont_event_trace(struct iwl_priv * priv,u32 base,u32 start_idx,u32 num_events,u32 mode)492 static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
493 u32 start_idx, u32 num_events,
494 u32 mode)
495 {
496 u32 i;
497 u32 ptr; /* SRAM byte address of log data */
498 u32 ev, time, data; /* event log data */
499 unsigned long reg_flags;
500
501 if (mode == 0)
502 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
503 else
504 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
505
506 /* Make sure device is powered up for SRAM reads */
507 spin_lock_irqsave(&priv->reg_lock, reg_flags);
508 if (iwl_grab_nic_access(priv)) {
509 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
510 return;
511 }
512
513 /* Set starting address; reads will auto-increment */
514 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
515 rmb();
516
517 /*
518 * "time" is actually "data" for mode 0 (no timestamp).
519 * place event id # at far right for easier visual parsing.
520 */
521 for (i = 0; i < num_events; i++) {
522 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
523 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
524 if (mode == 0) {
525 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
526 0, time, ev);
527 } else {
528 data = _iwl_legacy_read_direct32(priv,
529 HBUS_TARG_MEM_RDAT);
530 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
531 time, data, ev);
532 }
533 }
534 /* Allow device to power down */
535 iwl_release_nic_access(priv);
536 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
537 }
538
iwl4965_continuous_event_trace(struct iwl_priv * priv)539 static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
540 {
541 u32 capacity; /* event log capacity in # entries */
542 u32 base; /* SRAM byte address of event log header */
543 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
544 u32 num_wraps; /* # times uCode wrapped to top of log */
545 u32 next_entry; /* index of next entry to be written by uCode */
546
547 if (priv->ucode_type == UCODE_INIT)
548 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
549 else
550 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
551 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
552 capacity = iwl_legacy_read_targ_mem(priv, base);
553 num_wraps = iwl_legacy_read_targ_mem(priv,
554 base + (2 * sizeof(u32)));
555 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
556 next_entry = iwl_legacy_read_targ_mem(priv,
557 base + (3 * sizeof(u32)));
558 } else
559 return;
560
561 if (num_wraps == priv->event_log.num_wraps) {
562 iwl4965_print_cont_event_trace(priv,
563 base, priv->event_log.next_entry,
564 next_entry - priv->event_log.next_entry,
565 mode);
566 priv->event_log.non_wraps_count++;
567 } else {
568 if ((num_wraps - priv->event_log.num_wraps) > 1)
569 priv->event_log.wraps_more_count++;
570 else
571 priv->event_log.wraps_once_count++;
572 trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
573 num_wraps - priv->event_log.num_wraps,
574 next_entry, priv->event_log.next_entry);
575 if (next_entry < priv->event_log.next_entry) {
576 iwl4965_print_cont_event_trace(priv, base,
577 priv->event_log.next_entry,
578 capacity - priv->event_log.next_entry,
579 mode);
580
581 iwl4965_print_cont_event_trace(priv, base, 0,
582 next_entry, mode);
583 } else {
584 iwl4965_print_cont_event_trace(priv, base,
585 next_entry, capacity - next_entry,
586 mode);
587
588 iwl4965_print_cont_event_trace(priv, base, 0,
589 next_entry, mode);
590 }
591 }
592 priv->event_log.num_wraps = num_wraps;
593 priv->event_log.next_entry = next_entry;
594 }
595
596 /**
597 * iwl4965_bg_ucode_trace - Timer callback to log ucode event
598 *
599 * The timer is continually set to execute every
600 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
601 * this function is to perform continuous uCode event logging operation
602 * if enabled
603 */
iwl4965_bg_ucode_trace(unsigned long data)604 static void iwl4965_bg_ucode_trace(unsigned long data)
605 {
606 struct iwl_priv *priv = (struct iwl_priv *)data;
607
608 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
609 return;
610
611 if (priv->event_log.ucode_trace) {
612 iwl4965_continuous_event_trace(priv);
613 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
614 mod_timer(&priv->ucode_trace,
615 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
616 }
617 }
618
iwl4965_rx_beacon_notif(struct iwl_priv * priv,struct iwl_rx_mem_buffer * rxb)619 static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
620 struct iwl_rx_mem_buffer *rxb)
621 {
622 struct iwl_rx_packet *pkt = rxb_addr(rxb);
623 struct iwl4965_beacon_notif *beacon =
624 (struct iwl4965_beacon_notif *)pkt->u.raw;
625 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
626 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
627
628 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
629 "tsf %d %d rate %d\n",
630 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
631 beacon->beacon_notify_hdr.failure_frame,
632 le32_to_cpu(beacon->ibss_mgr_status),
633 le32_to_cpu(beacon->high_tsf),
634 le32_to_cpu(beacon->low_tsf), rate);
635 #endif
636
637 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
638 }
639
iwl4965_perform_ct_kill_task(struct iwl_priv * priv)640 static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
641 {
642 unsigned long flags;
643
644 IWL_DEBUG_POWER(priv, "Stop all queues\n");
645
646 if (priv->mac80211_registered)
647 ieee80211_stop_queues(priv->hw);
648
649 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
650 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
651 iwl_read32(priv, CSR_UCODE_DRV_GP1);
652
653 spin_lock_irqsave(&priv->reg_lock, flags);
654 if (!iwl_grab_nic_access(priv))
655 iwl_release_nic_access(priv);
656 spin_unlock_irqrestore(&priv->reg_lock, flags);
657 }
658
659 /* Handle notification from uCode that card's power state is changing
660 * due to software, hardware, or critical temperature RFKILL */
iwl4965_rx_card_state_notif(struct iwl_priv * priv,struct iwl_rx_mem_buffer * rxb)661 static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
662 struct iwl_rx_mem_buffer *rxb)
663 {
664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
665 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
666 unsigned long status = priv->status;
667
668 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
669 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
670 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
671 (flags & CT_CARD_DISABLED) ?
672 "Reached" : "Not reached");
673
674 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
675 CT_CARD_DISABLED)) {
676
677 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
678 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
679
680 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
681 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
682
683 if (!(flags & RXON_CARD_DISABLED)) {
684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
686 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
687 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
688 }
689 }
690
691 if (flags & CT_CARD_DISABLED)
692 iwl4965_perform_ct_kill_task(priv);
693
694 if (flags & HW_CARD_DISABLED)
695 set_bit(STATUS_RF_KILL_HW, &priv->status);
696 else
697 clear_bit(STATUS_RF_KILL_HW, &priv->status);
698
699 if (!(flags & RXON_CARD_DISABLED))
700 iwl_legacy_scan_cancel(priv);
701
702 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
703 test_bit(STATUS_RF_KILL_HW, &priv->status)))
704 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
705 test_bit(STATUS_RF_KILL_HW, &priv->status));
706 else
707 wake_up_interruptible(&priv->wait_command_queue);
708 }
709
710 /**
711 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
712 *
713 * Setup the RX handlers for each of the reply types sent from the uCode
714 * to the host.
715 *
716 * This function chains into the hardware specific files for them to setup
717 * any hardware specific handlers as well.
718 */
iwl4965_setup_rx_handlers(struct iwl_priv * priv)719 static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
720 {
721 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
722 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
723 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
724 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
725 iwl_legacy_rx_spectrum_measure_notif;
726 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
727 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
728 iwl_legacy_rx_pm_debug_statistics_notif;
729 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
730
731 /*
732 * The same handler is used for both the REPLY to a discrete
733 * statistics request from the host as well as for the periodic
734 * statistics notifications (after received beacons) from the uCode.
735 */
736 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
737 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
738
739 iwl_legacy_setup_rx_scan_handlers(priv);
740
741 /* status change handler */
742 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
743 iwl4965_rx_card_state_notif;
744
745 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
746 iwl4965_rx_missed_beacon_notif;
747 /* Rx handlers */
748 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
749 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
750 /* block ack */
751 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
752 /* Set up hardware specific Rx handlers */
753 priv->cfg->ops->lib->rx_handler_setup(priv);
754 }
755
756 /**
757 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
758 *
759 * Uses the priv->rx_handlers callback function array to invoke
760 * the appropriate handlers, including command responses,
761 * frame-received notifications, and other notifications.
762 */
iwl4965_rx_handle(struct iwl_priv * priv)763 void iwl4965_rx_handle(struct iwl_priv *priv)
764 {
765 struct iwl_rx_mem_buffer *rxb;
766 struct iwl_rx_packet *pkt;
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 u32 r, i;
769 int reclaim;
770 unsigned long flags;
771 u8 fill_rx = 0;
772 u32 count = 8;
773 int total_empty;
774
775 /* uCode's read index (stored in shared DRAM) indicates the last Rx
776 * buffer that the driver may process (last buffer filled by ucode). */
777 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
778 i = rxq->read;
779
780 /* Rx interrupt, but nothing sent from uCode */
781 if (i == r)
782 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
783
784 /* calculate total frames need to be restock after handling RX */
785 total_empty = r - rxq->write_actual;
786 if (total_empty < 0)
787 total_empty += RX_QUEUE_SIZE;
788
789 if (total_empty > (RX_QUEUE_SIZE / 2))
790 fill_rx = 1;
791
792 while (i != r) {
793 int len;
794
795 rxb = rxq->queue[i];
796
797 /* If an RXB doesn't have a Rx queue slot associated with it,
798 * then a bug has been introduced in the queue refilling
799 * routines -- catch it here */
800 BUG_ON(rxb == NULL);
801
802 rxq->queue[i] = NULL;
803
804 pci_unmap_page(priv->pci_dev, rxb->page_dma,
805 PAGE_SIZE << priv->hw_params.rx_page_order,
806 PCI_DMA_FROMDEVICE);
807 pkt = rxb_addr(rxb);
808
809 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
810 len += sizeof(u32); /* account for status word */
811 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
812
813 /* Reclaim a command buffer only if this packet is a response
814 * to a (driver-originated) command.
815 * If the packet (e.g. Rx frame) originated from uCode,
816 * there is no command buffer to reclaim.
817 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
818 * but apparently a few don't get set; catch them here. */
819 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
820 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
821 (pkt->hdr.cmd != REPLY_RX) &&
822 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
823 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
824 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
825 (pkt->hdr.cmd != REPLY_TX);
826
827 /* Based on type of command response or notification,
828 * handle those that need handling via function in
829 * rx_handlers table. See iwl4965_setup_rx_handlers() */
830 if (priv->rx_handlers[pkt->hdr.cmd]) {
831 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
832 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
833 pkt->hdr.cmd);
834 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
835 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
836 } else {
837 /* No handling needed */
838 IWL_DEBUG_RX(priv,
839 "r %d i %d No handler needed for %s, 0x%02x\n",
840 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
841 pkt->hdr.cmd);
842 }
843
844 /*
845 * XXX: After here, we should always check rxb->page
846 * against NULL before touching it or its virtual
847 * memory (pkt). Because some rx_handler might have
848 * already taken or freed the pages.
849 */
850
851 if (reclaim) {
852 /* Invoke any callbacks, transfer the buffer to caller,
853 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
854 * as we reclaim the driver command queue */
855 if (rxb->page)
856 iwl_legacy_tx_cmd_complete(priv, rxb);
857 else
858 IWL_WARN(priv, "Claim null rxb?\n");
859 }
860
861 /* Reuse the page if possible. For notification packets and
862 * SKBs that fail to Rx correctly, add them back into the
863 * rx_free list for reuse later. */
864 spin_lock_irqsave(&rxq->lock, flags);
865 if (rxb->page != NULL) {
866 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
867 0, PAGE_SIZE << priv->hw_params.rx_page_order,
868 PCI_DMA_FROMDEVICE);
869 list_add_tail(&rxb->list, &rxq->rx_free);
870 rxq->free_count++;
871 } else
872 list_add_tail(&rxb->list, &rxq->rx_used);
873
874 spin_unlock_irqrestore(&rxq->lock, flags);
875
876 i = (i + 1) & RX_QUEUE_MASK;
877 /* If there are a lot of unused frames,
878 * restock the Rx queue so ucode wont assert. */
879 if (fill_rx) {
880 count++;
881 if (count >= 8) {
882 rxq->read = i;
883 iwl4965_rx_replenish_now(priv);
884 count = 0;
885 }
886 }
887 }
888
889 /* Backtrack one entry */
890 rxq->read = i;
891 if (fill_rx)
892 iwl4965_rx_replenish_now(priv);
893 else
894 iwl4965_rx_queue_restock(priv);
895 }
896
897 /* call this function to flush any scheduled tasklet */
iwl4965_synchronize_irq(struct iwl_priv * priv)898 static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
899 {
900 /* wait to make sure we flush pending tasklet*/
901 synchronize_irq(priv->pci_dev->irq);
902 tasklet_kill(&priv->irq_tasklet);
903 }
904
iwl4965_irq_tasklet(struct iwl_priv * priv)905 static void iwl4965_irq_tasklet(struct iwl_priv *priv)
906 {
907 u32 inta, handled = 0;
908 u32 inta_fh;
909 unsigned long flags;
910 u32 i;
911 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
912 u32 inta_mask;
913 #endif
914
915 spin_lock_irqsave(&priv->lock, flags);
916
917 /* Ack/clear/reset pending uCode interrupts.
918 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
919 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
920 inta = iwl_read32(priv, CSR_INT);
921 iwl_write32(priv, CSR_INT, inta);
922
923 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
924 * Any new interrupts that happen after this, either while we're
925 * in this tasklet, or later, will show up in next ISR/tasklet. */
926 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
927 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
928
929 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
930 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
931 /* just for debug */
932 inta_mask = iwl_read32(priv, CSR_INT_MASK);
933 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
934 inta, inta_mask, inta_fh);
935 }
936 #endif
937
938 spin_unlock_irqrestore(&priv->lock, flags);
939
940 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
941 * atomic, make sure that inta covers all the interrupts that
942 * we've discovered, even if FH interrupt came in just after
943 * reading CSR_INT. */
944 if (inta_fh & CSR49_FH_INT_RX_MASK)
945 inta |= CSR_INT_BIT_FH_RX;
946 if (inta_fh & CSR49_FH_INT_TX_MASK)
947 inta |= CSR_INT_BIT_FH_TX;
948
949 /* Now service all interrupt bits discovered above. */
950 if (inta & CSR_INT_BIT_HW_ERR) {
951 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
952
953 /* Tell the device to stop sending interrupts */
954 iwl_legacy_disable_interrupts(priv);
955
956 priv->isr_stats.hw++;
957 iwl_legacy_irq_handle_error(priv);
958
959 handled |= CSR_INT_BIT_HW_ERR;
960
961 return;
962 }
963
964 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
965 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
966 /* NIC fires this, but we don't use it, redundant with WAKEUP */
967 if (inta & CSR_INT_BIT_SCD) {
968 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
969 "the frame/frames.\n");
970 priv->isr_stats.sch++;
971 }
972
973 /* Alive notification via Rx interrupt will do the real work */
974 if (inta & CSR_INT_BIT_ALIVE) {
975 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
976 priv->isr_stats.alive++;
977 }
978 }
979 #endif
980 /* Safely ignore these bits for debug checks below */
981 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
982
983 /* HW RF KILL switch toggled */
984 if (inta & CSR_INT_BIT_RF_KILL) {
985 int hw_rf_kill = 0;
986 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
987 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
988 hw_rf_kill = 1;
989
990 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
991 hw_rf_kill ? "disable radio" : "enable radio");
992
993 priv->isr_stats.rfkill++;
994
995 /* driver only loads ucode once setting the interface up.
996 * the driver allows loading the ucode even if the radio
997 * is killed. Hence update the killswitch state here. The
998 * rfkill handler will care about restarting if needed.
999 */
1000 if (!test_bit(STATUS_ALIVE, &priv->status)) {
1001 if (hw_rf_kill)
1002 set_bit(STATUS_RF_KILL_HW, &priv->status);
1003 else
1004 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1005 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1006 }
1007
1008 handled |= CSR_INT_BIT_RF_KILL;
1009 }
1010
1011 /* Chip got too hot and stopped itself */
1012 if (inta & CSR_INT_BIT_CT_KILL) {
1013 IWL_ERR(priv, "Microcode CT kill error detected.\n");
1014 priv->isr_stats.ctkill++;
1015 handled |= CSR_INT_BIT_CT_KILL;
1016 }
1017
1018 /* Error detected by uCode */
1019 if (inta & CSR_INT_BIT_SW_ERR) {
1020 IWL_ERR(priv, "Microcode SW error detected. "
1021 " Restarting 0x%X.\n", inta);
1022 priv->isr_stats.sw++;
1023 iwl_legacy_irq_handle_error(priv);
1024 handled |= CSR_INT_BIT_SW_ERR;
1025 }
1026
1027 /*
1028 * uCode wakes up after power-down sleep.
1029 * Tell device about any new tx or host commands enqueued,
1030 * and about any Rx buffers made available while asleep.
1031 */
1032 if (inta & CSR_INT_BIT_WAKEUP) {
1033 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1034 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1035 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1036 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
1037 priv->isr_stats.wakeup++;
1038 handled |= CSR_INT_BIT_WAKEUP;
1039 }
1040
1041 /* All uCode command responses, including Tx command responses,
1042 * Rx "responses" (frame-received notification), and other
1043 * notifications from uCode come through here*/
1044 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1045 iwl4965_rx_handle(priv);
1046 priv->isr_stats.rx++;
1047 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1048 }
1049
1050 /* This "Tx" DMA channel is used only for loading uCode */
1051 if (inta & CSR_INT_BIT_FH_TX) {
1052 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1053 priv->isr_stats.tx++;
1054 handled |= CSR_INT_BIT_FH_TX;
1055 /* Wake up uCode load routine, now that load is complete */
1056 priv->ucode_write_complete = 1;
1057 wake_up_interruptible(&priv->wait_command_queue);
1058 }
1059
1060 if (inta & ~handled) {
1061 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1062 priv->isr_stats.unhandled++;
1063 }
1064
1065 if (inta & ~(priv->inta_mask)) {
1066 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1067 inta & ~priv->inta_mask);
1068 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1069 }
1070
1071 /* Re-enable all interrupts */
1072 /* only Re-enable if diabled by irq */
1073 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1074 iwl_legacy_enable_interrupts(priv);
1075
1076 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1077 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1078 inta = iwl_read32(priv, CSR_INT);
1079 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1080 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1081 IWL_DEBUG_ISR(priv,
1082 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1083 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1084 }
1085 #endif
1086 }
1087
1088 /*****************************************************************************
1089 *
1090 * sysfs attributes
1091 *
1092 *****************************************************************************/
1093
1094 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1095
1096 /*
1097 * The following adds a new attribute to the sysfs representation
1098 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
1099 * used for controlling the debug level.
1100 *
1101 * See the level definitions in iwl for details.
1102 *
1103 * The debug_level being managed using sysfs below is a per device debug
1104 * level that is used instead of the global debug level if it (the per
1105 * device debug level) is set.
1106 */
iwl4965_show_debug_level(struct device * d,struct device_attribute * attr,char * buf)1107 static ssize_t iwl4965_show_debug_level(struct device *d,
1108 struct device_attribute *attr, char *buf)
1109 {
1110 struct iwl_priv *priv = dev_get_drvdata(d);
1111 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
1112 }
iwl4965_store_debug_level(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1113 static ssize_t iwl4965_store_debug_level(struct device *d,
1114 struct device_attribute *attr,
1115 const char *buf, size_t count)
1116 {
1117 struct iwl_priv *priv = dev_get_drvdata(d);
1118 unsigned long val;
1119 int ret;
1120
1121 ret = strict_strtoul(buf, 0, &val);
1122 if (ret)
1123 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
1124 else {
1125 priv->debug_level = val;
1126 if (iwl_legacy_alloc_traffic_mem(priv))
1127 IWL_ERR(priv,
1128 "Not enough memory to generate traffic log\n");
1129 }
1130 return strnlen(buf, count);
1131 }
1132
1133 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1134 iwl4965_show_debug_level, iwl4965_store_debug_level);
1135
1136
1137 #endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1138
1139
iwl4965_show_temperature(struct device * d,struct device_attribute * attr,char * buf)1140 static ssize_t iwl4965_show_temperature(struct device *d,
1141 struct device_attribute *attr, char *buf)
1142 {
1143 struct iwl_priv *priv = dev_get_drvdata(d);
1144
1145 if (!iwl_legacy_is_alive(priv))
1146 return -EAGAIN;
1147
1148 return sprintf(buf, "%d\n", priv->temperature);
1149 }
1150
1151 static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1152
iwl4965_show_tx_power(struct device * d,struct device_attribute * attr,char * buf)1153 static ssize_t iwl4965_show_tx_power(struct device *d,
1154 struct device_attribute *attr, char *buf)
1155 {
1156 struct iwl_priv *priv = dev_get_drvdata(d);
1157
1158 if (!iwl_legacy_is_ready_rf(priv))
1159 return sprintf(buf, "off\n");
1160 else
1161 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1162 }
1163
iwl4965_store_tx_power(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1164 static ssize_t iwl4965_store_tx_power(struct device *d,
1165 struct device_attribute *attr,
1166 const char *buf, size_t count)
1167 {
1168 struct iwl_priv *priv = dev_get_drvdata(d);
1169 unsigned long val;
1170 int ret;
1171
1172 ret = strict_strtoul(buf, 10, &val);
1173 if (ret)
1174 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1175 else {
1176 ret = iwl_legacy_set_tx_power(priv, val, false);
1177 if (ret)
1178 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1179 ret);
1180 else
1181 ret = count;
1182 }
1183 return ret;
1184 }
1185
1186 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1187 iwl4965_show_tx_power, iwl4965_store_tx_power);
1188
1189 static struct attribute *iwl_sysfs_entries[] = {
1190 &dev_attr_temperature.attr,
1191 &dev_attr_tx_power.attr,
1192 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1193 &dev_attr_debug_level.attr,
1194 #endif
1195 NULL
1196 };
1197
1198 static struct attribute_group iwl_attribute_group = {
1199 .name = NULL, /* put in device directory */
1200 .attrs = iwl_sysfs_entries,
1201 };
1202
1203 /******************************************************************************
1204 *
1205 * uCode download functions
1206 *
1207 ******************************************************************************/
1208
iwl4965_dealloc_ucode_pci(struct iwl_priv * priv)1209 static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1210 {
1211 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1212 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1213 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1214 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1215 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1216 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1217 }
1218
iwl4965_nic_start(struct iwl_priv * priv)1219 static void iwl4965_nic_start(struct iwl_priv *priv)
1220 {
1221 /* Remove all resets to allow NIC to operate */
1222 iwl_write32(priv, CSR_RESET, 0);
1223 }
1224
1225 static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1226 void *context);
1227 static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1228 u32 max_probe_length);
1229
iwl4965_request_firmware(struct iwl_priv * priv,bool first)1230 static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1231 {
1232 const char *name_pre = priv->cfg->fw_name_pre;
1233 char tag[8];
1234
1235 if (first) {
1236 priv->fw_index = priv->cfg->ucode_api_max;
1237 sprintf(tag, "%d", priv->fw_index);
1238 } else {
1239 priv->fw_index--;
1240 sprintf(tag, "%d", priv->fw_index);
1241 }
1242
1243 if (priv->fw_index < priv->cfg->ucode_api_min) {
1244 IWL_ERR(priv, "no suitable firmware found!\n");
1245 return -ENOENT;
1246 }
1247
1248 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1249
1250 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1251 priv->firmware_name);
1252
1253 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1254 &priv->pci_dev->dev, GFP_KERNEL, priv,
1255 iwl4965_ucode_callback);
1256 }
1257
1258 struct iwl4965_firmware_pieces {
1259 const void *inst, *data, *init, *init_data, *boot;
1260 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1261 };
1262
iwl4965_load_firmware(struct iwl_priv * priv,const struct firmware * ucode_raw,struct iwl4965_firmware_pieces * pieces)1263 static int iwl4965_load_firmware(struct iwl_priv *priv,
1264 const struct firmware *ucode_raw,
1265 struct iwl4965_firmware_pieces *pieces)
1266 {
1267 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1268 u32 api_ver, hdr_size;
1269 const u8 *src;
1270
1271 priv->ucode_ver = le32_to_cpu(ucode->ver);
1272 api_ver = IWL_UCODE_API(priv->ucode_ver);
1273
1274 switch (api_ver) {
1275 default:
1276 case 0:
1277 case 1:
1278 case 2:
1279 hdr_size = 24;
1280 if (ucode_raw->size < hdr_size) {
1281 IWL_ERR(priv, "File size too small!\n");
1282 return -EINVAL;
1283 }
1284 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1285 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1286 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1287 pieces->init_data_size =
1288 le32_to_cpu(ucode->v1.init_data_size);
1289 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1290 src = ucode->v1.data;
1291 break;
1292 }
1293
1294 /* Verify size of file vs. image size info in file's header */
1295 if (ucode_raw->size != hdr_size + pieces->inst_size +
1296 pieces->data_size + pieces->init_size +
1297 pieces->init_data_size + pieces->boot_size) {
1298
1299 IWL_ERR(priv,
1300 "uCode file size %d does not match expected size\n",
1301 (int)ucode_raw->size);
1302 return -EINVAL;
1303 }
1304
1305 pieces->inst = src;
1306 src += pieces->inst_size;
1307 pieces->data = src;
1308 src += pieces->data_size;
1309 pieces->init = src;
1310 src += pieces->init_size;
1311 pieces->init_data = src;
1312 src += pieces->init_data_size;
1313 pieces->boot = src;
1314 src += pieces->boot_size;
1315
1316 return 0;
1317 }
1318
1319 /**
1320 * iwl4965_ucode_callback - callback when firmware was loaded
1321 *
1322 * If loaded successfully, copies the firmware into buffers
1323 * for the card to fetch (via DMA).
1324 */
1325 static void
iwl4965_ucode_callback(const struct firmware * ucode_raw,void * context)1326 iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1327 {
1328 struct iwl_priv *priv = context;
1329 struct iwl_ucode_header *ucode;
1330 int err;
1331 struct iwl4965_firmware_pieces pieces;
1332 const unsigned int api_max = priv->cfg->ucode_api_max;
1333 const unsigned int api_min = priv->cfg->ucode_api_min;
1334 u32 api_ver;
1335
1336 u32 max_probe_length = 200;
1337 u32 standard_phy_calibration_size =
1338 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1339
1340 memset(&pieces, 0, sizeof(pieces));
1341
1342 if (!ucode_raw) {
1343 if (priv->fw_index <= priv->cfg->ucode_api_max)
1344 IWL_ERR(priv,
1345 "request for firmware file '%s' failed.\n",
1346 priv->firmware_name);
1347 goto try_again;
1348 }
1349
1350 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1351 priv->firmware_name, ucode_raw->size);
1352
1353 /* Make sure that we got at least the API version number */
1354 if (ucode_raw->size < 4) {
1355 IWL_ERR(priv, "File size way too small!\n");
1356 goto try_again;
1357 }
1358
1359 /* Data from ucode file: header followed by uCode images */
1360 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1361
1362 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1363
1364 if (err)
1365 goto try_again;
1366
1367 api_ver = IWL_UCODE_API(priv->ucode_ver);
1368
1369 /*
1370 * api_ver should match the api version forming part of the
1371 * firmware filename ... but we don't check for that and only rely
1372 * on the API version read from firmware header from here on forward
1373 */
1374 if (api_ver < api_min || api_ver > api_max) {
1375 IWL_ERR(priv,
1376 "Driver unable to support your firmware API. "
1377 "Driver supports v%u, firmware is v%u.\n",
1378 api_max, api_ver);
1379 goto try_again;
1380 }
1381
1382 if (api_ver != api_max)
1383 IWL_ERR(priv,
1384 "Firmware has old API version. Expected v%u, "
1385 "got v%u. New firmware can be obtained "
1386 "from http://www.intellinuxwireless.org.\n",
1387 api_max, api_ver);
1388
1389 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1390 IWL_UCODE_MAJOR(priv->ucode_ver),
1391 IWL_UCODE_MINOR(priv->ucode_ver),
1392 IWL_UCODE_API(priv->ucode_ver),
1393 IWL_UCODE_SERIAL(priv->ucode_ver));
1394
1395 snprintf(priv->hw->wiphy->fw_version,
1396 sizeof(priv->hw->wiphy->fw_version),
1397 "%u.%u.%u.%u",
1398 IWL_UCODE_MAJOR(priv->ucode_ver),
1399 IWL_UCODE_MINOR(priv->ucode_ver),
1400 IWL_UCODE_API(priv->ucode_ver),
1401 IWL_UCODE_SERIAL(priv->ucode_ver));
1402
1403 /*
1404 * For any of the failures below (before allocating pci memory)
1405 * we will try to load a version with a smaller API -- maybe the
1406 * user just got a corrupted version of the latest API.
1407 */
1408
1409 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1410 priv->ucode_ver);
1411 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1412 pieces.inst_size);
1413 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1414 pieces.data_size);
1415 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1416 pieces.init_size);
1417 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1418 pieces.init_data_size);
1419 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1420 pieces.boot_size);
1421
1422 /* Verify that uCode images will fit in card's SRAM */
1423 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1424 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1425 pieces.inst_size);
1426 goto try_again;
1427 }
1428
1429 if (pieces.data_size > priv->hw_params.max_data_size) {
1430 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1431 pieces.data_size);
1432 goto try_again;
1433 }
1434
1435 if (pieces.init_size > priv->hw_params.max_inst_size) {
1436 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1437 pieces.init_size);
1438 goto try_again;
1439 }
1440
1441 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1442 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1443 pieces.init_data_size);
1444 goto try_again;
1445 }
1446
1447 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1448 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1449 pieces.boot_size);
1450 goto try_again;
1451 }
1452
1453 /* Allocate ucode buffers for card's bus-master loading ... */
1454
1455 /* Runtime instructions and 2 copies of data:
1456 * 1) unmodified from disk
1457 * 2) backup cache for save/restore during power-downs */
1458 priv->ucode_code.len = pieces.inst_size;
1459 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1460
1461 priv->ucode_data.len = pieces.data_size;
1462 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1463
1464 priv->ucode_data_backup.len = pieces.data_size;
1465 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1466
1467 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1468 !priv->ucode_data_backup.v_addr)
1469 goto err_pci_alloc;
1470
1471 /* Initialization instructions and data */
1472 if (pieces.init_size && pieces.init_data_size) {
1473 priv->ucode_init.len = pieces.init_size;
1474 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1475
1476 priv->ucode_init_data.len = pieces.init_data_size;
1477 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1478
1479 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1480 goto err_pci_alloc;
1481 }
1482
1483 /* Bootstrap (instructions only, no data) */
1484 if (pieces.boot_size) {
1485 priv->ucode_boot.len = pieces.boot_size;
1486 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1487
1488 if (!priv->ucode_boot.v_addr)
1489 goto err_pci_alloc;
1490 }
1491
1492 /* Now that we can no longer fail, copy information */
1493
1494 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1495
1496 /* Copy images into buffers for card's bus-master reads ... */
1497
1498 /* Runtime instructions (first block of data in file) */
1499 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1500 pieces.inst_size);
1501 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1502
1503 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1504 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1505
1506 /*
1507 * Runtime data
1508 * NOTE: Copy into backup buffer will be done in iwl_up()
1509 */
1510 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1511 pieces.data_size);
1512 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1513 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1514
1515 /* Initialization instructions */
1516 if (pieces.init_size) {
1517 IWL_DEBUG_INFO(priv,
1518 "Copying (but not loading) init instr len %Zd\n",
1519 pieces.init_size);
1520 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1521 }
1522
1523 /* Initialization data */
1524 if (pieces.init_data_size) {
1525 IWL_DEBUG_INFO(priv,
1526 "Copying (but not loading) init data len %Zd\n",
1527 pieces.init_data_size);
1528 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1529 pieces.init_data_size);
1530 }
1531
1532 /* Bootstrap instructions */
1533 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1534 pieces.boot_size);
1535 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1536
1537 /*
1538 * figure out the offset of chain noise reset and gain commands
1539 * base on the size of standard phy calibration commands table size
1540 */
1541 priv->_4965.phy_calib_chain_noise_reset_cmd =
1542 standard_phy_calibration_size;
1543 priv->_4965.phy_calib_chain_noise_gain_cmd =
1544 standard_phy_calibration_size + 1;
1545
1546 /**************************************************
1547 * This is still part of probe() in a sense...
1548 *
1549 * 9. Setup and register with mac80211 and debugfs
1550 **************************************************/
1551 err = iwl4965_mac_setup_register(priv, max_probe_length);
1552 if (err)
1553 goto out_unbind;
1554
1555 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1556 if (err)
1557 IWL_ERR(priv,
1558 "failed to create debugfs files. Ignoring error: %d\n", err);
1559
1560 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1561 &iwl_attribute_group);
1562 if (err) {
1563 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1564 goto out_unbind;
1565 }
1566
1567 /* We have our copies now, allow OS release its copies */
1568 release_firmware(ucode_raw);
1569 complete(&priv->_4965.firmware_loading_complete);
1570 return;
1571
1572 try_again:
1573 /* try next, if any */
1574 if (iwl4965_request_firmware(priv, false))
1575 goto out_unbind;
1576 release_firmware(ucode_raw);
1577 return;
1578
1579 err_pci_alloc:
1580 IWL_ERR(priv, "failed to allocate pci memory\n");
1581 iwl4965_dealloc_ucode_pci(priv);
1582 out_unbind:
1583 complete(&priv->_4965.firmware_loading_complete);
1584 device_release_driver(&priv->pci_dev->dev);
1585 release_firmware(ucode_raw);
1586 }
1587
1588 static const char * const desc_lookup_text[] = {
1589 "OK",
1590 "FAIL",
1591 "BAD_PARAM",
1592 "BAD_CHECKSUM",
1593 "NMI_INTERRUPT_WDG",
1594 "SYSASSERT",
1595 "FATAL_ERROR",
1596 "BAD_COMMAND",
1597 "HW_ERROR_TUNE_LOCK",
1598 "HW_ERROR_TEMPERATURE",
1599 "ILLEGAL_CHAN_FREQ",
1600 "VCC_NOT_STABLE",
1601 "FH_ERROR",
1602 "NMI_INTERRUPT_HOST",
1603 "NMI_INTERRUPT_ACTION_PT",
1604 "NMI_INTERRUPT_UNKNOWN",
1605 "UCODE_VERSION_MISMATCH",
1606 "HW_ERROR_ABS_LOCK",
1607 "HW_ERROR_CAL_LOCK_FAIL",
1608 "NMI_INTERRUPT_INST_ACTION_PT",
1609 "NMI_INTERRUPT_DATA_ACTION_PT",
1610 "NMI_TRM_HW_ER",
1611 "NMI_INTERRUPT_TRM",
1612 "NMI_INTERRUPT_BREAK_POINT"
1613 "DEBUG_0",
1614 "DEBUG_1",
1615 "DEBUG_2",
1616 "DEBUG_3",
1617 };
1618
1619 static struct { char *name; u8 num; } advanced_lookup[] = {
1620 { "NMI_INTERRUPT_WDG", 0x34 },
1621 { "SYSASSERT", 0x35 },
1622 { "UCODE_VERSION_MISMATCH", 0x37 },
1623 { "BAD_COMMAND", 0x38 },
1624 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1625 { "FATAL_ERROR", 0x3D },
1626 { "NMI_TRM_HW_ERR", 0x46 },
1627 { "NMI_INTERRUPT_TRM", 0x4C },
1628 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1629 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1630 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1631 { "NMI_INTERRUPT_HOST", 0x66 },
1632 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1633 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1634 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1635 { "ADVANCED_SYSASSERT", 0 },
1636 };
1637
iwl4965_desc_lookup(u32 num)1638 static const char *iwl4965_desc_lookup(u32 num)
1639 {
1640 int i;
1641 int max = ARRAY_SIZE(desc_lookup_text);
1642
1643 if (num < max)
1644 return desc_lookup_text[num];
1645
1646 max = ARRAY_SIZE(advanced_lookup) - 1;
1647 for (i = 0; i < max; i++) {
1648 if (advanced_lookup[i].num == num)
1649 break;
1650 }
1651 return advanced_lookup[i].name;
1652 }
1653
1654 #define ERROR_START_OFFSET (1 * sizeof(u32))
1655 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
1656
iwl4965_dump_nic_error_log(struct iwl_priv * priv)1657 void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1658 {
1659 u32 data2, line;
1660 u32 desc, time, count, base, data1;
1661 u32 blink1, blink2, ilink1, ilink2;
1662 u32 pc, hcmd;
1663
1664 if (priv->ucode_type == UCODE_INIT) {
1665 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1666 } else {
1667 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1668 }
1669
1670 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1671 IWL_ERR(priv,
1672 "Not valid error log pointer 0x%08X for %s uCode\n",
1673 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1674 return;
1675 }
1676
1677 count = iwl_legacy_read_targ_mem(priv, base);
1678
1679 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1680 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1681 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1682 priv->status, count);
1683 }
1684
1685 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1686 priv->isr_stats.err_code = desc;
1687 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1688 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1689 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1690 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1691 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1692 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1693 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1694 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1695 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1696 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1697
1698 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1699 time, data1, data2, line,
1700 blink1, blink2, ilink1, ilink2);
1701
1702 IWL_ERR(priv, "Desc Time "
1703 "data1 data2 line\n");
1704 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1705 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1706 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1707 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1708 pc, blink1, blink2, ilink1, ilink2, hcmd);
1709 }
1710
1711 #define EVENT_START_OFFSET (4 * sizeof(u32))
1712
1713 /**
1714 * iwl4965_print_event_log - Dump error event log to syslog
1715 *
1716 */
iwl4965_print_event_log(struct iwl_priv * priv,u32 start_idx,u32 num_events,u32 mode,int pos,char ** buf,size_t bufsz)1717 static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
1718 u32 num_events, u32 mode,
1719 int pos, char **buf, size_t bufsz)
1720 {
1721 u32 i;
1722 u32 base; /* SRAM byte address of event log header */
1723 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1724 u32 ptr; /* SRAM byte address of log data */
1725 u32 ev, time, data; /* event log data */
1726 unsigned long reg_flags;
1727
1728 if (num_events == 0)
1729 return pos;
1730
1731 if (priv->ucode_type == UCODE_INIT) {
1732 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1733 } else {
1734 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1735 }
1736
1737 if (mode == 0)
1738 event_size = 2 * sizeof(u32);
1739 else
1740 event_size = 3 * sizeof(u32);
1741
1742 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1743
1744 /* Make sure device is powered up for SRAM reads */
1745 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1746 iwl_grab_nic_access(priv);
1747
1748 /* Set starting address; reads will auto-increment */
1749 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1750 rmb();
1751
1752 /* "time" is actually "data" for mode 0 (no timestamp).
1753 * place event id # at far right for easier visual parsing. */
1754 for (i = 0; i < num_events; i++) {
1755 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1756 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1757 if (mode == 0) {
1758 /* data, ev */
1759 if (bufsz) {
1760 pos += scnprintf(*buf + pos, bufsz - pos,
1761 "EVT_LOG:0x%08x:%04u\n",
1762 time, ev);
1763 } else {
1764 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1765 time, ev);
1766 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1767 time, ev);
1768 }
1769 } else {
1770 data = _iwl_legacy_read_direct32(priv,
1771 HBUS_TARG_MEM_RDAT);
1772 if (bufsz) {
1773 pos += scnprintf(*buf + pos, bufsz - pos,
1774 "EVT_LOGT:%010u:0x%08x:%04u\n",
1775 time, data, ev);
1776 } else {
1777 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1778 time, data, ev);
1779 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1780 data, ev);
1781 }
1782 }
1783 }
1784
1785 /* Allow device to power down */
1786 iwl_release_nic_access(priv);
1787 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1788 return pos;
1789 }
1790
1791 /**
1792 * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
1793 */
iwl4965_print_last_event_logs(struct iwl_priv * priv,u32 capacity,u32 num_wraps,u32 next_entry,u32 size,u32 mode,int pos,char ** buf,size_t bufsz)1794 static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1795 u32 num_wraps, u32 next_entry,
1796 u32 size, u32 mode,
1797 int pos, char **buf, size_t bufsz)
1798 {
1799 /*
1800 * display the newest DEFAULT_LOG_ENTRIES entries
1801 * i.e the entries just before the next ont that uCode would fill.
1802 */
1803 if (num_wraps) {
1804 if (next_entry < size) {
1805 pos = iwl4965_print_event_log(priv,
1806 capacity - (size - next_entry),
1807 size - next_entry, mode,
1808 pos, buf, bufsz);
1809 pos = iwl4965_print_event_log(priv, 0,
1810 next_entry, mode,
1811 pos, buf, bufsz);
1812 } else
1813 pos = iwl4965_print_event_log(priv, next_entry - size,
1814 size, mode, pos, buf, bufsz);
1815 } else {
1816 if (next_entry < size) {
1817 pos = iwl4965_print_event_log(priv, 0, next_entry,
1818 mode, pos, buf, bufsz);
1819 } else {
1820 pos = iwl4965_print_event_log(priv, next_entry - size,
1821 size, mode, pos, buf, bufsz);
1822 }
1823 }
1824 return pos;
1825 }
1826
1827 #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1828
iwl4965_dump_nic_event_log(struct iwl_priv * priv,bool full_log,char ** buf,bool display)1829 int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1830 char **buf, bool display)
1831 {
1832 u32 base; /* SRAM byte address of event log header */
1833 u32 capacity; /* event log capacity in # entries */
1834 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1835 u32 num_wraps; /* # times uCode wrapped to top of log */
1836 u32 next_entry; /* index of next entry to be written by uCode */
1837 u32 size; /* # entries that we'll print */
1838 int pos = 0;
1839 size_t bufsz = 0;
1840
1841 if (priv->ucode_type == UCODE_INIT) {
1842 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1843 } else {
1844 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1845 }
1846
1847 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1848 IWL_ERR(priv,
1849 "Invalid event log pointer 0x%08X for %s uCode\n",
1850 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1851 return -EINVAL;
1852 }
1853
1854 /* event log header */
1855 capacity = iwl_legacy_read_targ_mem(priv, base);
1856 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1857 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1858 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1859
1860 size = num_wraps ? capacity : next_entry;
1861
1862 /* bail out if nothing in log */
1863 if (size == 0) {
1864 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1865 return pos;
1866 }
1867
1868 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1869 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1870 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1871 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1872 #else
1873 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1874 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1875 #endif
1876 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1877 size);
1878
1879 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1880 if (display) {
1881 if (full_log)
1882 bufsz = capacity * 48;
1883 else
1884 bufsz = size * 48;
1885 *buf = kmalloc(bufsz, GFP_KERNEL);
1886 if (!*buf)
1887 return -ENOMEM;
1888 }
1889 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1890 /*
1891 * if uCode has wrapped back to top of log,
1892 * start at the oldest entry,
1893 * i.e the next one that uCode would fill.
1894 */
1895 if (num_wraps)
1896 pos = iwl4965_print_event_log(priv, next_entry,
1897 capacity - next_entry, mode,
1898 pos, buf, bufsz);
1899 /* (then/else) start at top of log */
1900 pos = iwl4965_print_event_log(priv, 0,
1901 next_entry, mode, pos, buf, bufsz);
1902 } else
1903 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1904 next_entry, size, mode,
1905 pos, buf, bufsz);
1906 #else
1907 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1908 next_entry, size, mode,
1909 pos, buf, bufsz);
1910 #endif
1911 return pos;
1912 }
1913
iwl4965_rf_kill_ct_config(struct iwl_priv * priv)1914 static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1915 {
1916 struct iwl_ct_kill_config cmd;
1917 unsigned long flags;
1918 int ret = 0;
1919
1920 spin_lock_irqsave(&priv->lock, flags);
1921 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1922 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1923 spin_unlock_irqrestore(&priv->lock, flags);
1924
1925 cmd.critical_temperature_R =
1926 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1927
1928 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1929 sizeof(cmd), &cmd);
1930 if (ret)
1931 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1932 else
1933 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1934 "succeeded, "
1935 "critical temperature is %d\n",
1936 priv->hw_params.ct_kill_threshold);
1937 }
1938
1939 static const s8 default_queue_to_tx_fifo[] = {
1940 IWL_TX_FIFO_VO,
1941 IWL_TX_FIFO_VI,
1942 IWL_TX_FIFO_BE,
1943 IWL_TX_FIFO_BK,
1944 IWL49_CMD_FIFO_NUM,
1945 IWL_TX_FIFO_UNUSED,
1946 IWL_TX_FIFO_UNUSED,
1947 };
1948
iwl4965_alive_notify(struct iwl_priv * priv)1949 static int iwl4965_alive_notify(struct iwl_priv *priv)
1950 {
1951 u32 a;
1952 unsigned long flags;
1953 int i, chan;
1954 u32 reg_val;
1955
1956 spin_lock_irqsave(&priv->lock, flags);
1957
1958 /* Clear 4965's internal Tx Scheduler data base */
1959 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1960 IWL49_SCD_SRAM_BASE_ADDR);
1961 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1962 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1963 iwl_legacy_write_targ_mem(priv, a, 0);
1964 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1965 iwl_legacy_write_targ_mem(priv, a, 0);
1966 for (; a < priv->scd_base_addr +
1967 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1968 iwl_legacy_write_targ_mem(priv, a, 0);
1969
1970 /* Tel 4965 where to find Tx byte count tables */
1971 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1972 priv->scd_bc_tbls.dma >> 10);
1973
1974 /* Enable DMA channel */
1975 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1976 iwl_legacy_write_direct32(priv,
1977 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1978 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1979 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1980
1981 /* Update FH chicken bits */
1982 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1983 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1984 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1985
1986 /* Disable chain mode for all queues */
1987 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1988
1989 /* Initialize each Tx queue (including the command queue) */
1990 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1991
1992 /* TFD circular buffer read/write indexes */
1993 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1994 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1995
1996 /* Max Tx Window size for Scheduler-ACK mode */
1997 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1998 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1999 (SCD_WIN_SIZE <<
2000 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2001 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2002
2003 /* Frame limit */
2004 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
2005 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
2006 sizeof(u32),
2007 (SCD_FRAME_LIMIT <<
2008 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2009 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2010
2011 }
2012 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
2013 (1 << priv->hw_params.max_txq_num) - 1);
2014
2015 /* Activate all Tx DMA/FIFO channels */
2016 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
2017
2018 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
2019
2020 /* make sure all queue are not stopped */
2021 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
2022 for (i = 0; i < 4; i++)
2023 atomic_set(&priv->queue_stop_count[i], 0);
2024
2025 /* reset to 0 to enable all the queue first */
2026 priv->txq_ctx_active_msk = 0;
2027 /* Map each Tx/cmd queue to its corresponding fifo */
2028 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
2029
2030 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2031 int ac = default_queue_to_tx_fifo[i];
2032
2033 iwl_txq_ctx_activate(priv, i);
2034
2035 if (ac == IWL_TX_FIFO_UNUSED)
2036 continue;
2037
2038 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2039 }
2040
2041 spin_unlock_irqrestore(&priv->lock, flags);
2042
2043 return 0;
2044 }
2045
2046 /**
2047 * iwl4965_alive_start - called after REPLY_ALIVE notification received
2048 * from protocol/runtime uCode (initialization uCode's
2049 * Alive gets handled by iwl_init_alive_start()).
2050 */
iwl4965_alive_start(struct iwl_priv * priv)2051 static void iwl4965_alive_start(struct iwl_priv *priv)
2052 {
2053 int ret = 0;
2054 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2055
2056 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2057
2058 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2059 /* We had an error bringing up the hardware, so take it
2060 * all the way back down so we can try again */
2061 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2062 goto restart;
2063 }
2064
2065 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2066 * This is a paranoid check, because we would not have gotten the
2067 * "runtime" alive if code weren't properly loaded. */
2068 if (iwl4965_verify_ucode(priv)) {
2069 /* Runtime instruction load was bad;
2070 * take it all the way back down so we can try again */
2071 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2072 goto restart;
2073 }
2074
2075 ret = iwl4965_alive_notify(priv);
2076 if (ret) {
2077 IWL_WARN(priv,
2078 "Could not complete ALIVE transition [ntf]: %d\n", ret);
2079 goto restart;
2080 }
2081
2082
2083 /* After the ALIVE response, we can send host commands to the uCode */
2084 set_bit(STATUS_ALIVE, &priv->status);
2085
2086 /* Enable watchdog to monitor the driver tx queues */
2087 iwl_legacy_setup_watchdog(priv);
2088
2089 if (iwl_legacy_is_rfkill(priv))
2090 return;
2091
2092 ieee80211_wake_queues(priv->hw);
2093
2094 priv->active_rate = IWL_RATES_MASK;
2095
2096 if (iwl_legacy_is_associated_ctx(ctx)) {
2097 struct iwl_legacy_rxon_cmd *active_rxon =
2098 (struct iwl_legacy_rxon_cmd *)&ctx->active;
2099 /* apply any changes in staging */
2100 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2101 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2102 } else {
2103 struct iwl_rxon_context *tmp;
2104 /* Initialize our rx_config data */
2105 for_each_context(priv, tmp)
2106 iwl_legacy_connection_init_rx_config(priv, tmp);
2107
2108 if (priv->cfg->ops->hcmd->set_rxon_chain)
2109 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2110 }
2111
2112 /* Configure bluetooth coexistence if enabled */
2113 iwl_legacy_send_bt_config(priv);
2114
2115 iwl4965_reset_run_time_calib(priv);
2116
2117 set_bit(STATUS_READY, &priv->status);
2118
2119 /* Configure the adapter for unassociated operation */
2120 iwl_legacy_commit_rxon(priv, ctx);
2121
2122 /* At this point, the NIC is initialized and operational */
2123 iwl4965_rf_kill_ct_config(priv);
2124
2125 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2126 wake_up_interruptible(&priv->wait_command_queue);
2127
2128 iwl_legacy_power_update_mode(priv, true);
2129 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2130
2131 return;
2132
2133 restart:
2134 queue_work(priv->workqueue, &priv->restart);
2135 }
2136
2137 static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
2138
__iwl4965_down(struct iwl_priv * priv)2139 static void __iwl4965_down(struct iwl_priv *priv)
2140 {
2141 unsigned long flags;
2142 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2143
2144 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2145
2146 iwl_legacy_scan_cancel_timeout(priv, 200);
2147
2148 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2149
2150 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2151 * to prevent rearm timer */
2152 del_timer_sync(&priv->watchdog);
2153
2154 iwl_legacy_clear_ucode_stations(priv, NULL);
2155 iwl_legacy_dealloc_bcast_stations(priv);
2156 iwl_legacy_clear_driver_stations(priv);
2157
2158 /* Unblock any waiting calls */
2159 wake_up_interruptible_all(&priv->wait_command_queue);
2160
2161 /* Wipe out the EXIT_PENDING status bit if we are not actually
2162 * exiting the module */
2163 if (!exit_pending)
2164 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2165
2166 /* stop and reset the on-board processor */
2167 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2168
2169 /* tell the device to stop sending interrupts */
2170 spin_lock_irqsave(&priv->lock, flags);
2171 iwl_legacy_disable_interrupts(priv);
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173 iwl4965_synchronize_irq(priv);
2174
2175 if (priv->mac80211_registered)
2176 ieee80211_stop_queues(priv->hw);
2177
2178 /* If we have not previously called iwl_init() then
2179 * clear all bits but the RF Kill bit and return */
2180 if (!iwl_legacy_is_init(priv)) {
2181 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2182 STATUS_RF_KILL_HW |
2183 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2184 STATUS_GEO_CONFIGURED |
2185 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2186 STATUS_EXIT_PENDING;
2187 goto exit;
2188 }
2189
2190 /* ...otherwise clear out all the status bits but the RF Kill
2191 * bit and continue taking the NIC down. */
2192 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2193 STATUS_RF_KILL_HW |
2194 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2195 STATUS_GEO_CONFIGURED |
2196 test_bit(STATUS_FW_ERROR, &priv->status) <<
2197 STATUS_FW_ERROR |
2198 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2199 STATUS_EXIT_PENDING;
2200
2201 iwl4965_txq_ctx_stop(priv);
2202 iwl4965_rxq_stop(priv);
2203
2204 /* Power-down device's busmaster DMA clocks */
2205 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2206 udelay(5);
2207
2208 /* Make sure (redundant) we've released our request to stay awake */
2209 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
2210 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2211
2212 /* Stop the device, and put it in low power state */
2213 iwl_legacy_apm_stop(priv);
2214
2215 exit:
2216 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2217
2218 dev_kfree_skb(priv->beacon_skb);
2219 priv->beacon_skb = NULL;
2220
2221 /* clear out any free frames */
2222 iwl4965_clear_free_frames(priv);
2223 }
2224
iwl4965_down(struct iwl_priv * priv)2225 static void iwl4965_down(struct iwl_priv *priv)
2226 {
2227 mutex_lock(&priv->mutex);
2228 __iwl4965_down(priv);
2229 mutex_unlock(&priv->mutex);
2230
2231 iwl4965_cancel_deferred_work(priv);
2232 }
2233
2234 #define HW_READY_TIMEOUT (50)
2235
iwl4965_set_hw_ready(struct iwl_priv * priv)2236 static int iwl4965_set_hw_ready(struct iwl_priv *priv)
2237 {
2238 int ret = 0;
2239
2240 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2241 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2242
2243 /* See if we got it */
2244 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2245 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2246 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2247 HW_READY_TIMEOUT);
2248 if (ret != -ETIMEDOUT)
2249 priv->hw_ready = true;
2250 else
2251 priv->hw_ready = false;
2252
2253 IWL_DEBUG_INFO(priv, "hardware %s\n",
2254 (priv->hw_ready == 1) ? "ready" : "not ready");
2255 return ret;
2256 }
2257
iwl4965_prepare_card_hw(struct iwl_priv * priv)2258 static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
2259 {
2260 int ret = 0;
2261
2262 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
2263
2264 ret = iwl4965_set_hw_ready(priv);
2265 if (priv->hw_ready)
2266 return ret;
2267
2268 /* If HW is not ready, prepare the conditions to check again */
2269 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2270 CSR_HW_IF_CONFIG_REG_PREPARE);
2271
2272 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2273 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2274 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2275
2276 /* HW should be ready by now, check again. */
2277 if (ret != -ETIMEDOUT)
2278 iwl4965_set_hw_ready(priv);
2279
2280 return ret;
2281 }
2282
2283 #define MAX_HW_RESTARTS 5
2284
__iwl4965_up(struct iwl_priv * priv)2285 static int __iwl4965_up(struct iwl_priv *priv)
2286 {
2287 struct iwl_rxon_context *ctx;
2288 int i;
2289 int ret;
2290
2291 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2292 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2293 return -EIO;
2294 }
2295
2296 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2297 IWL_ERR(priv, "ucode not available for device bringup\n");
2298 return -EIO;
2299 }
2300
2301 for_each_context(priv, ctx) {
2302 ret = iwl4965_alloc_bcast_station(priv, ctx);
2303 if (ret) {
2304 iwl_legacy_dealloc_bcast_stations(priv);
2305 return ret;
2306 }
2307 }
2308
2309 iwl4965_prepare_card_hw(priv);
2310
2311 if (!priv->hw_ready) {
2312 IWL_WARN(priv, "Exit HW not ready\n");
2313 return -EIO;
2314 }
2315
2316 /* If platform's RF_KILL switch is NOT set to KILL */
2317 if (iwl_read32(priv,
2318 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2319 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2320 else
2321 set_bit(STATUS_RF_KILL_HW, &priv->status);
2322
2323 if (iwl_legacy_is_rfkill(priv)) {
2324 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2325
2326 iwl_legacy_enable_interrupts(priv);
2327 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2328 return 0;
2329 }
2330
2331 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2332
2333 /* must be initialised before iwl_hw_nic_init */
2334 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2335
2336 ret = iwl4965_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_legacy_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 /* Copy original ucode data image from disk into backup cache.
2356 * This will be used to initialize the on-board processor's
2357 * data SRAM for a clean start when the runtime program first loads. */
2358 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2359 priv->ucode_data.len);
2360
2361 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2362
2363 /* load bootstrap state machine,
2364 * load bootstrap program into processor's memory,
2365 * prepare to load the "initialize" uCode */
2366 ret = priv->cfg->ops->lib->load_ucode(priv);
2367
2368 if (ret) {
2369 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2370 ret);
2371 continue;
2372 }
2373
2374 /* start card; "initialize" will load runtime ucode */
2375 iwl4965_nic_start(priv);
2376
2377 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2378
2379 return 0;
2380 }
2381
2382 set_bit(STATUS_EXIT_PENDING, &priv->status);
2383 __iwl4965_down(priv);
2384 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2385
2386 /* tried to restart and config the device for as long as our
2387 * patience could withstand */
2388 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2389 return -EIO;
2390 }
2391
2392
2393 /*****************************************************************************
2394 *
2395 * Workqueue callbacks
2396 *
2397 *****************************************************************************/
2398
iwl4965_bg_init_alive_start(struct work_struct * data)2399 static void iwl4965_bg_init_alive_start(struct work_struct *data)
2400 {
2401 struct iwl_priv *priv =
2402 container_of(data, struct iwl_priv, init_alive_start.work);
2403
2404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2405 return;
2406
2407 mutex_lock(&priv->mutex);
2408 priv->cfg->ops->lib->init_alive_start(priv);
2409 mutex_unlock(&priv->mutex);
2410 }
2411
iwl4965_bg_alive_start(struct work_struct * data)2412 static void iwl4965_bg_alive_start(struct work_struct *data)
2413 {
2414 struct iwl_priv *priv =
2415 container_of(data, struct iwl_priv, alive_start.work);
2416
2417 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2418 return;
2419
2420 mutex_lock(&priv->mutex);
2421 iwl4965_alive_start(priv);
2422 mutex_unlock(&priv->mutex);
2423 }
2424
iwl4965_bg_run_time_calib_work(struct work_struct * work)2425 static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2426 {
2427 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2428 run_time_calib_work);
2429
2430 mutex_lock(&priv->mutex);
2431
2432 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2433 test_bit(STATUS_SCANNING, &priv->status)) {
2434 mutex_unlock(&priv->mutex);
2435 return;
2436 }
2437
2438 if (priv->start_calib) {
2439 iwl4965_chain_noise_calibration(priv,
2440 (void *)&priv->_4965.statistics);
2441 iwl4965_sensitivity_calibration(priv,
2442 (void *)&priv->_4965.statistics);
2443 }
2444
2445 mutex_unlock(&priv->mutex);
2446 }
2447
iwl4965_bg_restart(struct work_struct * data)2448 static void iwl4965_bg_restart(struct work_struct *data)
2449 {
2450 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2451
2452 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2453 return;
2454
2455 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2456 struct iwl_rxon_context *ctx;
2457
2458 mutex_lock(&priv->mutex);
2459 for_each_context(priv, ctx)
2460 ctx->vif = NULL;
2461 priv->is_open = 0;
2462
2463 __iwl4965_down(priv);
2464
2465 mutex_unlock(&priv->mutex);
2466 iwl4965_cancel_deferred_work(priv);
2467 ieee80211_restart_hw(priv->hw);
2468 } else {
2469 iwl4965_down(priv);
2470
2471 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2472 return;
2473
2474 mutex_lock(&priv->mutex);
2475 __iwl4965_up(priv);
2476 mutex_unlock(&priv->mutex);
2477 }
2478 }
2479
iwl4965_bg_rx_replenish(struct work_struct * data)2480 static void iwl4965_bg_rx_replenish(struct work_struct *data)
2481 {
2482 struct iwl_priv *priv =
2483 container_of(data, struct iwl_priv, rx_replenish);
2484
2485 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2486 return;
2487
2488 mutex_lock(&priv->mutex);
2489 iwl4965_rx_replenish(priv);
2490 mutex_unlock(&priv->mutex);
2491 }
2492
2493 /*****************************************************************************
2494 *
2495 * mac80211 entry point functions
2496 *
2497 *****************************************************************************/
2498
2499 #define UCODE_READY_TIMEOUT (4 * HZ)
2500
2501 /*
2502 * Not a mac80211 entry point function, but it fits in with all the
2503 * other mac80211 functions grouped here.
2504 */
iwl4965_mac_setup_register(struct iwl_priv * priv,u32 max_probe_length)2505 static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2506 u32 max_probe_length)
2507 {
2508 int ret;
2509 struct ieee80211_hw *hw = priv->hw;
2510 struct iwl_rxon_context *ctx;
2511
2512 hw->rate_control_algorithm = "iwl-4965-rs";
2513
2514 /* Tell mac80211 our characteristics */
2515 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2516 IEEE80211_HW_AMPDU_AGGREGATION |
2517 IEEE80211_HW_NEED_DTIM_PERIOD |
2518 IEEE80211_HW_SPECTRUM_MGMT |
2519 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2520
2521 if (priv->cfg->sku & IWL_SKU_N)
2522 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2523 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2524
2525 hw->sta_data_size = sizeof(struct iwl_station_priv);
2526 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2527
2528 for_each_context(priv, ctx) {
2529 hw->wiphy->interface_modes |= ctx->interface_modes;
2530 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2531 }
2532
2533 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2534 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2535
2536 /*
2537 * For now, disable PS by default because it affects
2538 * RX performance significantly.
2539 */
2540 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2541
2542 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2543 /* we create the 802.11 header and a zero-length SSID element */
2544 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2545
2546 /* Default value; 4 EDCA QOS priorities */
2547 hw->queues = 4;
2548
2549 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2550
2551 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2552 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2553 &priv->bands[IEEE80211_BAND_2GHZ];
2554 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2555 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2556 &priv->bands[IEEE80211_BAND_5GHZ];
2557
2558 iwl_legacy_leds_init(priv);
2559
2560 ret = ieee80211_register_hw(priv->hw);
2561 if (ret) {
2562 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2563 return ret;
2564 }
2565 priv->mac80211_registered = 1;
2566
2567 return 0;
2568 }
2569
2570
iwl4965_mac_start(struct ieee80211_hw * hw)2571 int iwl4965_mac_start(struct ieee80211_hw *hw)
2572 {
2573 struct iwl_priv *priv = hw->priv;
2574 int ret;
2575
2576 IWL_DEBUG_MAC80211(priv, "enter\n");
2577
2578 /* we should be verifying the device is ready to be opened */
2579 mutex_lock(&priv->mutex);
2580 ret = __iwl4965_up(priv);
2581 mutex_unlock(&priv->mutex);
2582
2583 if (ret)
2584 return ret;
2585
2586 if (iwl_legacy_is_rfkill(priv))
2587 goto out;
2588
2589 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2590
2591 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2592 * mac80211 will not be run successfully. */
2593 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
2594 test_bit(STATUS_READY, &priv->status),
2595 UCODE_READY_TIMEOUT);
2596 if (!ret) {
2597 if (!test_bit(STATUS_READY, &priv->status)) {
2598 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2599 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2600 return -ETIMEDOUT;
2601 }
2602 }
2603
2604 iwl4965_led_enable(priv);
2605
2606 out:
2607 priv->is_open = 1;
2608 IWL_DEBUG_MAC80211(priv, "leave\n");
2609 return 0;
2610 }
2611
iwl4965_mac_stop(struct ieee80211_hw * hw)2612 void iwl4965_mac_stop(struct ieee80211_hw *hw)
2613 {
2614 struct iwl_priv *priv = hw->priv;
2615
2616 IWL_DEBUG_MAC80211(priv, "enter\n");
2617
2618 if (!priv->is_open)
2619 return;
2620
2621 priv->is_open = 0;
2622
2623 iwl4965_down(priv);
2624
2625 flush_workqueue(priv->workqueue);
2626
2627 /* enable interrupts again in order to receive rfkill changes */
2628 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2629 iwl_legacy_enable_interrupts(priv);
2630
2631 IWL_DEBUG_MAC80211(priv, "leave\n");
2632 }
2633
iwl4965_mac_tx(struct ieee80211_hw * hw,struct sk_buff * skb)2634 void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2635 {
2636 struct iwl_priv *priv = hw->priv;
2637
2638 IWL_DEBUG_MACDUMP(priv, "enter\n");
2639
2640 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2641 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2642
2643 if (iwl4965_tx_skb(priv, skb))
2644 dev_kfree_skb_any(skb);
2645
2646 IWL_DEBUG_MACDUMP(priv, "leave\n");
2647 }
2648
iwl4965_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)2649 void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2650 struct ieee80211_vif *vif,
2651 struct ieee80211_key_conf *keyconf,
2652 struct ieee80211_sta *sta,
2653 u32 iv32, u16 *phase1key)
2654 {
2655 struct iwl_priv *priv = hw->priv;
2656 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2657
2658 IWL_DEBUG_MAC80211(priv, "enter\n");
2659
2660 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2661 iv32, phase1key);
2662
2663 IWL_DEBUG_MAC80211(priv, "leave\n");
2664 }
2665
iwl4965_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)2666 int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2667 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2668 struct ieee80211_key_conf *key)
2669 {
2670 struct iwl_priv *priv = hw->priv;
2671 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2672 struct iwl_rxon_context *ctx = vif_priv->ctx;
2673 int ret;
2674 u8 sta_id;
2675 bool is_default_wep_key = false;
2676
2677 IWL_DEBUG_MAC80211(priv, "enter\n");
2678
2679 if (priv->cfg->mod_params->sw_crypto) {
2680 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2681 return -EOPNOTSUPP;
2682 }
2683
2684 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2685 if (sta_id == IWL_INVALID_STATION)
2686 return -EINVAL;
2687
2688 mutex_lock(&priv->mutex);
2689 iwl_legacy_scan_cancel_timeout(priv, 100);
2690
2691 /*
2692 * If we are getting WEP group key and we didn't receive any key mapping
2693 * so far, we are in legacy wep mode (group key only), otherwise we are
2694 * in 1X mode.
2695 * In legacy wep mode, we use another host command to the uCode.
2696 */
2697 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2698 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2699 !sta) {
2700 if (cmd == SET_KEY)
2701 is_default_wep_key = !ctx->key_mapping_keys;
2702 else
2703 is_default_wep_key =
2704 (key->hw_key_idx == HW_KEY_DEFAULT);
2705 }
2706
2707 switch (cmd) {
2708 case SET_KEY:
2709 if (is_default_wep_key)
2710 ret = iwl4965_set_default_wep_key(priv,
2711 vif_priv->ctx, key);
2712 else
2713 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2714 key, sta_id);
2715
2716 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2717 break;
2718 case DISABLE_KEY:
2719 if (is_default_wep_key)
2720 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2721 else
2722 ret = iwl4965_remove_dynamic_key(priv, ctx,
2723 key, sta_id);
2724
2725 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2726 break;
2727 default:
2728 ret = -EINVAL;
2729 }
2730
2731 mutex_unlock(&priv->mutex);
2732 IWL_DEBUG_MAC80211(priv, "leave\n");
2733
2734 return ret;
2735 }
2736
iwl4965_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size)2737 int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2738 struct ieee80211_vif *vif,
2739 enum ieee80211_ampdu_mlme_action action,
2740 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2741 u8 buf_size)
2742 {
2743 struct iwl_priv *priv = hw->priv;
2744 int ret = -EINVAL;
2745
2746 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2747 sta->addr, tid);
2748
2749 if (!(priv->cfg->sku & IWL_SKU_N))
2750 return -EACCES;
2751
2752 mutex_lock(&priv->mutex);
2753
2754 switch (action) {
2755 case IEEE80211_AMPDU_RX_START:
2756 IWL_DEBUG_HT(priv, "start Rx\n");
2757 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2758 break;
2759 case IEEE80211_AMPDU_RX_STOP:
2760 IWL_DEBUG_HT(priv, "stop Rx\n");
2761 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2762 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2763 ret = 0;
2764 break;
2765 case IEEE80211_AMPDU_TX_START:
2766 IWL_DEBUG_HT(priv, "start Tx\n");
2767 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2768 if (ret == 0) {
2769 priv->_4965.agg_tids_count++;
2770 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2771 priv->_4965.agg_tids_count);
2772 }
2773 break;
2774 case IEEE80211_AMPDU_TX_STOP:
2775 IWL_DEBUG_HT(priv, "stop Tx\n");
2776 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2777 if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
2778 priv->_4965.agg_tids_count--;
2779 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2780 priv->_4965.agg_tids_count);
2781 }
2782 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2783 ret = 0;
2784 break;
2785 case IEEE80211_AMPDU_TX_OPERATIONAL:
2786 ret = 0;
2787 break;
2788 }
2789 mutex_unlock(&priv->mutex);
2790
2791 return ret;
2792 }
2793
iwl4965_mac_sta_add(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2794 int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2795 struct ieee80211_vif *vif,
2796 struct ieee80211_sta *sta)
2797 {
2798 struct iwl_priv *priv = hw->priv;
2799 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2800 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2801 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2802 int ret;
2803 u8 sta_id;
2804
2805 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2806 sta->addr);
2807 mutex_lock(&priv->mutex);
2808 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2809 sta->addr);
2810 sta_priv->common.sta_id = IWL_INVALID_STATION;
2811
2812 atomic_set(&sta_priv->pending_frames, 0);
2813
2814 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2815 is_ap, sta, &sta_id);
2816 if (ret) {
2817 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2818 sta->addr, ret);
2819 /* Should we return success if return code is EEXIST ? */
2820 mutex_unlock(&priv->mutex);
2821 return ret;
2822 }
2823
2824 sta_priv->common.sta_id = sta_id;
2825
2826 /* Initialize rate scaling */
2827 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2828 sta->addr);
2829 iwl4965_rs_rate_init(priv, sta, sta_id);
2830 mutex_unlock(&priv->mutex);
2831
2832 return 0;
2833 }
2834
iwl4965_mac_channel_switch(struct ieee80211_hw * hw,struct ieee80211_channel_switch * ch_switch)2835 void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2836 struct ieee80211_channel_switch *ch_switch)
2837 {
2838 struct iwl_priv *priv = hw->priv;
2839 const struct iwl_channel_info *ch_info;
2840 struct ieee80211_conf *conf = &hw->conf;
2841 struct ieee80211_channel *channel = ch_switch->channel;
2842 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2843
2844 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2845 u16 ch;
2846 unsigned long flags = 0;
2847
2848 IWL_DEBUG_MAC80211(priv, "enter\n");
2849
2850 if (iwl_legacy_is_rfkill(priv))
2851 goto out_exit;
2852
2853 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2854 test_bit(STATUS_SCANNING, &priv->status))
2855 goto out_exit;
2856
2857 if (!iwl_legacy_is_associated_ctx(ctx))
2858 goto out_exit;
2859
2860 /* channel switch in progress */
2861 if (priv->switch_rxon.switch_in_progress == true)
2862 goto out_exit;
2863
2864 mutex_lock(&priv->mutex);
2865 if (priv->cfg->ops->lib->set_channel_switch) {
2866
2867 ch = channel->hw_value;
2868 if (le16_to_cpu(ctx->active.channel) != ch) {
2869 ch_info = iwl_legacy_get_channel_info(priv,
2870 channel->band,
2871 ch);
2872 if (!iwl_legacy_is_channel_valid(ch_info)) {
2873 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2874 goto out;
2875 }
2876 spin_lock_irqsave(&priv->lock, flags);
2877
2878 priv->current_ht_config.smps = conf->smps_mode;
2879
2880 /* Configure HT40 channels */
2881 ctx->ht.enabled = conf_is_ht(conf);
2882 if (ctx->ht.enabled) {
2883 if (conf_is_ht40_minus(conf)) {
2884 ctx->ht.extension_chan_offset =
2885 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2886 ctx->ht.is_40mhz = true;
2887 } else if (conf_is_ht40_plus(conf)) {
2888 ctx->ht.extension_chan_offset =
2889 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2890 ctx->ht.is_40mhz = true;
2891 } else {
2892 ctx->ht.extension_chan_offset =
2893 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2894 ctx->ht.is_40mhz = false;
2895 }
2896 } else
2897 ctx->ht.is_40mhz = false;
2898
2899 if ((le16_to_cpu(ctx->staging.channel) != ch))
2900 ctx->staging.flags = 0;
2901
2902 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2903 iwl_legacy_set_rxon_ht(priv, ht_conf);
2904 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2905 ctx->vif);
2906 spin_unlock_irqrestore(&priv->lock, flags);
2907
2908 iwl_legacy_set_rate(priv);
2909 /*
2910 * at this point, staging_rxon has the
2911 * configuration for channel switch
2912 */
2913 if (priv->cfg->ops->lib->set_channel_switch(priv,
2914 ch_switch))
2915 priv->switch_rxon.switch_in_progress = false;
2916 }
2917 }
2918 out:
2919 mutex_unlock(&priv->mutex);
2920 out_exit:
2921 if (!priv->switch_rxon.switch_in_progress)
2922 ieee80211_chswitch_done(ctx->vif, false);
2923 IWL_DEBUG_MAC80211(priv, "leave\n");
2924 }
2925
iwl4965_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)2926 void iwl4965_configure_filter(struct ieee80211_hw *hw,
2927 unsigned int changed_flags,
2928 unsigned int *total_flags,
2929 u64 multicast)
2930 {
2931 struct iwl_priv *priv = hw->priv;
2932 __le32 filter_or = 0, filter_nand = 0;
2933 struct iwl_rxon_context *ctx;
2934
2935 #define CHK(test, flag) do { \
2936 if (*total_flags & (test)) \
2937 filter_or |= (flag); \
2938 else \
2939 filter_nand |= (flag); \
2940 } while (0)
2941
2942 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2943 changed_flags, *total_flags);
2944
2945 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2946 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2947 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2948 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2949
2950 #undef CHK
2951
2952 mutex_lock(&priv->mutex);
2953
2954 for_each_context(priv, ctx) {
2955 ctx->staging.filter_flags &= ~filter_nand;
2956 ctx->staging.filter_flags |= filter_or;
2957
2958 /*
2959 * Not committing directly because hardware can perform a scan,
2960 * but we'll eventually commit the filter flags change anyway.
2961 */
2962 }
2963
2964 mutex_unlock(&priv->mutex);
2965
2966 /*
2967 * Receiving all multicast frames is always enabled by the
2968 * default flags setup in iwl_legacy_connection_init_rx_config()
2969 * since we currently do not support programming multicast
2970 * filters into the device.
2971 */
2972 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2973 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2974 }
2975
2976 /*****************************************************************************
2977 *
2978 * driver setup and teardown
2979 *
2980 *****************************************************************************/
2981
iwl4965_bg_txpower_work(struct work_struct * work)2982 static void iwl4965_bg_txpower_work(struct work_struct *work)
2983 {
2984 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2985 txpower_work);
2986
2987 mutex_lock(&priv->mutex);
2988
2989 /* If a scan happened to start before we got here
2990 * then just return; the statistics notification will
2991 * kick off another scheduled work to compensate for
2992 * any temperature delta we missed here. */
2993 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2994 test_bit(STATUS_SCANNING, &priv->status))
2995 goto out;
2996
2997 /* Regardless of if we are associated, we must reconfigure the
2998 * TX power since frames can be sent on non-radar channels while
2999 * not associated */
3000 priv->cfg->ops->lib->send_tx_power(priv);
3001
3002 /* Update last_temperature to keep is_calib_needed from running
3003 * when it isn't needed... */
3004 priv->last_temperature = priv->temperature;
3005 out:
3006 mutex_unlock(&priv->mutex);
3007 }
3008
iwl4965_setup_deferred_work(struct iwl_priv * priv)3009 static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
3010 {
3011 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3012
3013 init_waitqueue_head(&priv->wait_command_queue);
3014
3015 INIT_WORK(&priv->restart, iwl4965_bg_restart);
3016 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
3017 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
3018 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
3019 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
3020
3021 iwl_legacy_setup_scan_deferred_work(priv);
3022
3023 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
3024
3025 init_timer(&priv->statistics_periodic);
3026 priv->statistics_periodic.data = (unsigned long)priv;
3027 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
3028
3029 init_timer(&priv->ucode_trace);
3030 priv->ucode_trace.data = (unsigned long)priv;
3031 priv->ucode_trace.function = iwl4965_bg_ucode_trace;
3032
3033 init_timer(&priv->watchdog);
3034 priv->watchdog.data = (unsigned long)priv;
3035 priv->watchdog.function = iwl_legacy_bg_watchdog;
3036
3037 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3038 iwl4965_irq_tasklet, (unsigned long)priv);
3039 }
3040
iwl4965_cancel_deferred_work(struct iwl_priv * priv)3041 static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
3042 {
3043 cancel_work_sync(&priv->txpower_work);
3044 cancel_delayed_work_sync(&priv->init_alive_start);
3045 cancel_delayed_work(&priv->alive_start);
3046 cancel_work_sync(&priv->run_time_calib_work);
3047
3048 iwl_legacy_cancel_scan_deferred_work(priv);
3049
3050 del_timer_sync(&priv->statistics_periodic);
3051 del_timer_sync(&priv->ucode_trace);
3052 }
3053
iwl4965_init_hw_rates(struct iwl_priv * priv,struct ieee80211_rate * rates)3054 static void iwl4965_init_hw_rates(struct iwl_priv *priv,
3055 struct ieee80211_rate *rates)
3056 {
3057 int i;
3058
3059 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3060 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
3061 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3062 rates[i].hw_value_short = i;
3063 rates[i].flags = 0;
3064 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3065 /*
3066 * If CCK != 1M then set short preamble rate flag.
3067 */
3068 rates[i].flags |=
3069 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3070 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3071 }
3072 }
3073 }
3074 /*
3075 * Acquire priv->lock before calling this function !
3076 */
iwl4965_set_wr_ptrs(struct iwl_priv * priv,int txq_id,u32 index)3077 void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
3078 {
3079 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
3080 (index & 0xff) | (txq_id << 8));
3081 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
3082 }
3083
iwl4965_tx_queue_set_status(struct iwl_priv * priv,struct iwl_tx_queue * txq,int tx_fifo_id,int scd_retry)3084 void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
3085 struct iwl_tx_queue *txq,
3086 int tx_fifo_id, int scd_retry)
3087 {
3088 int txq_id = txq->q.id;
3089
3090 /* Find out whether to activate Tx queue */
3091 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
3092
3093 /* Set up and activate */
3094 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
3095 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
3096 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
3097 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
3098 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
3099 IWL49_SCD_QUEUE_STTS_REG_MSK);
3100
3101 txq->sched_retry = scd_retry;
3102
3103 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
3104 active ? "Activate" : "Deactivate",
3105 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
3106 }
3107
3108
iwl4965_init_drv(struct iwl_priv * priv)3109 static int iwl4965_init_drv(struct iwl_priv *priv)
3110 {
3111 int ret;
3112
3113 spin_lock_init(&priv->sta_lock);
3114 spin_lock_init(&priv->hcmd_lock);
3115
3116 INIT_LIST_HEAD(&priv->free_frames);
3117
3118 mutex_init(&priv->mutex);
3119 mutex_init(&priv->sync_cmd_mutex);
3120
3121 priv->ieee_channels = NULL;
3122 priv->ieee_rates = NULL;
3123 priv->band = IEEE80211_BAND_2GHZ;
3124
3125 priv->iw_mode = NL80211_IFTYPE_STATION;
3126 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3127 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3128 priv->_4965.agg_tids_count = 0;
3129
3130 /* initialize force reset */
3131 priv->force_reset[IWL_RF_RESET].reset_duration =
3132 IWL_DELAY_NEXT_FORCE_RF_RESET;
3133 priv->force_reset[IWL_FW_RESET].reset_duration =
3134 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3135
3136 /* Choose which receivers/antennas to use */
3137 if (priv->cfg->ops->hcmd->set_rxon_chain)
3138 priv->cfg->ops->hcmd->set_rxon_chain(priv,
3139 &priv->contexts[IWL_RXON_CTX_BSS]);
3140
3141 iwl_legacy_init_scan_params(priv);
3142
3143 ret = iwl_legacy_init_channel_map(priv);
3144 if (ret) {
3145 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3146 goto err;
3147 }
3148
3149 ret = iwl_legacy_init_geos(priv);
3150 if (ret) {
3151 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3152 goto err_free_channel_map;
3153 }
3154 iwl4965_init_hw_rates(priv, priv->ieee_rates);
3155
3156 return 0;
3157
3158 err_free_channel_map:
3159 iwl_legacy_free_channel_map(priv);
3160 err:
3161 return ret;
3162 }
3163
iwl4965_uninit_drv(struct iwl_priv * priv)3164 static void iwl4965_uninit_drv(struct iwl_priv *priv)
3165 {
3166 iwl4965_calib_free_results(priv);
3167 iwl_legacy_free_geos(priv);
3168 iwl_legacy_free_channel_map(priv);
3169 kfree(priv->scan_cmd);
3170 }
3171
iwl4965_hw_detect(struct iwl_priv * priv)3172 static void iwl4965_hw_detect(struct iwl_priv *priv)
3173 {
3174 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
3175 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
3176 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
3177 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3178 }
3179
iwl4965_set_hw_params(struct iwl_priv * priv)3180 static int iwl4965_set_hw_params(struct iwl_priv *priv)
3181 {
3182 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3183 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3184 if (priv->cfg->mod_params->amsdu_size_8K)
3185 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3186 else
3187 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3188
3189 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3190
3191 if (priv->cfg->mod_params->disable_11n)
3192 priv->cfg->sku &= ~IWL_SKU_N;
3193
3194 /* Device-specific setup */
3195 return priv->cfg->ops->lib->set_hw_params(priv);
3196 }
3197
3198 static const u8 iwl4965_bss_ac_to_fifo[] = {
3199 IWL_TX_FIFO_VO,
3200 IWL_TX_FIFO_VI,
3201 IWL_TX_FIFO_BE,
3202 IWL_TX_FIFO_BK,
3203 };
3204
3205 static const u8 iwl4965_bss_ac_to_queue[] = {
3206 0, 1, 2, 3,
3207 };
3208
3209 static int
iwl4965_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)3210 iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3211 {
3212 int err = 0, i;
3213 struct iwl_priv *priv;
3214 struct ieee80211_hw *hw;
3215 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3216 unsigned long flags;
3217 u16 pci_cmd;
3218
3219 /************************
3220 * 1. Allocating HW data
3221 ************************/
3222
3223 hw = iwl_legacy_alloc_all(cfg);
3224 if (!hw) {
3225 err = -ENOMEM;
3226 goto out;
3227 }
3228 priv = hw->priv;
3229 /* At this point both hw and priv are allocated. */
3230
3231 /*
3232 * The default context is always valid,
3233 * more may be discovered when firmware
3234 * is loaded.
3235 */
3236 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3237
3238 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3239 priv->contexts[i].ctxid = i;
3240
3241 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3242 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3243 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3244 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3245 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3246 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3247 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3248 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3249 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
3250 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
3251 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3252 BIT(NL80211_IFTYPE_ADHOC);
3253 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3254 BIT(NL80211_IFTYPE_STATION);
3255 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3256 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3257 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3258 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3259
3260 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
3261
3262 SET_IEEE80211_DEV(hw, &pdev->dev);
3263
3264 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3265 priv->cfg = cfg;
3266 priv->pci_dev = pdev;
3267 priv->inta_mask = CSR_INI_SET_MASK;
3268
3269 if (iwl_legacy_alloc_traffic_mem(priv))
3270 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3271
3272 /**************************
3273 * 2. Initializing PCI bus
3274 **************************/
3275 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3276 PCIE_LINK_STATE_CLKPM);
3277
3278 if (pci_enable_device(pdev)) {
3279 err = -ENODEV;
3280 goto out_ieee80211_free_hw;
3281 }
3282
3283 pci_set_master(pdev);
3284
3285 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
3286 if (!err)
3287 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
3288 if (err) {
3289 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3290 if (!err)
3291 err = pci_set_consistent_dma_mask(pdev,
3292 DMA_BIT_MASK(32));
3293 /* both attempts failed: */
3294 if (err) {
3295 IWL_WARN(priv, "No suitable DMA available.\n");
3296 goto out_pci_disable_device;
3297 }
3298 }
3299
3300 err = pci_request_regions(pdev, DRV_NAME);
3301 if (err)
3302 goto out_pci_disable_device;
3303
3304 pci_set_drvdata(pdev, priv);
3305
3306
3307 /***********************
3308 * 3. Read REV register
3309 ***********************/
3310 priv->hw_base = pci_iomap(pdev, 0, 0);
3311 if (!priv->hw_base) {
3312 err = -ENODEV;
3313 goto out_pci_release_regions;
3314 }
3315
3316 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3317 (unsigned long long) pci_resource_len(pdev, 0));
3318 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3319
3320 /* these spin locks will be used in apm_ops.init and EEPROM access
3321 * we should init now
3322 */
3323 spin_lock_init(&priv->reg_lock);
3324 spin_lock_init(&priv->lock);
3325
3326 /*
3327 * stop and reset the on-board processor just in case it is in a
3328 * strange state ... like being left stranded by a primary kernel
3329 * and this is now the kdump kernel trying to start up
3330 */
3331 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3332
3333 iwl4965_hw_detect(priv);
3334 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3335 priv->cfg->name, priv->hw_rev);
3336
3337 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3338 * PCI Tx retries from interfering with C3 CPU state */
3339 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3340
3341 iwl4965_prepare_card_hw(priv);
3342 if (!priv->hw_ready) {
3343 IWL_WARN(priv, "Failed, HW not ready\n");
3344 goto out_iounmap;
3345 }
3346
3347 /*****************
3348 * 4. Read EEPROM
3349 *****************/
3350 /* Read the EEPROM */
3351 err = iwl_legacy_eeprom_init(priv);
3352 if (err) {
3353 IWL_ERR(priv, "Unable to init EEPROM\n");
3354 goto out_iounmap;
3355 }
3356 err = iwl4965_eeprom_check_version(priv);
3357 if (err)
3358 goto out_free_eeprom;
3359
3360 if (err)
3361 goto out_free_eeprom;
3362
3363 /* extract MAC Address */
3364 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3365 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3366 priv->hw->wiphy->addresses = priv->addresses;
3367 priv->hw->wiphy->n_addresses = 1;
3368
3369 /************************
3370 * 5. Setup HW constants
3371 ************************/
3372 if (iwl4965_set_hw_params(priv)) {
3373 IWL_ERR(priv, "failed to set hw parameters\n");
3374 goto out_free_eeprom;
3375 }
3376
3377 /*******************
3378 * 6. Setup priv
3379 *******************/
3380
3381 err = iwl4965_init_drv(priv);
3382 if (err)
3383 goto out_free_eeprom;
3384 /* At this point both hw and priv are initialized. */
3385
3386 /********************
3387 * 7. Setup services
3388 ********************/
3389 spin_lock_irqsave(&priv->lock, flags);
3390 iwl_legacy_disable_interrupts(priv);
3391 spin_unlock_irqrestore(&priv->lock, flags);
3392
3393 pci_enable_msi(priv->pci_dev);
3394
3395 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3396 IRQF_SHARED, DRV_NAME, priv);
3397 if (err) {
3398 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3399 goto out_disable_msi;
3400 }
3401
3402 iwl4965_setup_deferred_work(priv);
3403 iwl4965_setup_rx_handlers(priv);
3404
3405 /*********************************************
3406 * 8. Enable interrupts and read RFKILL state
3407 *********************************************/
3408
3409 /* enable interrupts if needed: hw bug w/a */
3410 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3411 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3412 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3413 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3414 }
3415
3416 iwl_legacy_enable_interrupts(priv);
3417
3418 /* If platform's RF_KILL switch is NOT set to KILL */
3419 if (iwl_read32(priv, CSR_GP_CNTRL) &
3420 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3421 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3422 else
3423 set_bit(STATUS_RF_KILL_HW, &priv->status);
3424
3425 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3426 test_bit(STATUS_RF_KILL_HW, &priv->status));
3427
3428 iwl_legacy_power_initialize(priv);
3429
3430 init_completion(&priv->_4965.firmware_loading_complete);
3431
3432 err = iwl4965_request_firmware(priv, true);
3433 if (err)
3434 goto out_destroy_workqueue;
3435
3436 return 0;
3437
3438 out_destroy_workqueue:
3439 destroy_workqueue(priv->workqueue);
3440 priv->workqueue = NULL;
3441 free_irq(priv->pci_dev->irq, priv);
3442 out_disable_msi:
3443 pci_disable_msi(priv->pci_dev);
3444 iwl4965_uninit_drv(priv);
3445 out_free_eeprom:
3446 iwl_legacy_eeprom_free(priv);
3447 out_iounmap:
3448 pci_iounmap(pdev, priv->hw_base);
3449 out_pci_release_regions:
3450 pci_set_drvdata(pdev, NULL);
3451 pci_release_regions(pdev);
3452 out_pci_disable_device:
3453 pci_disable_device(pdev);
3454 out_ieee80211_free_hw:
3455 iwl_legacy_free_traffic_mem(priv);
3456 ieee80211_free_hw(priv->hw);
3457 out:
3458 return err;
3459 }
3460
iwl4965_pci_remove(struct pci_dev * pdev)3461 static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3462 {
3463 struct iwl_priv *priv = pci_get_drvdata(pdev);
3464 unsigned long flags;
3465
3466 if (!priv)
3467 return;
3468
3469 wait_for_completion(&priv->_4965.firmware_loading_complete);
3470
3471 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3472
3473 iwl_legacy_dbgfs_unregister(priv);
3474 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3475
3476 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3477 * to be called and iwl4965_down since we are removing the device
3478 * we need to set STATUS_EXIT_PENDING bit.
3479 */
3480 set_bit(STATUS_EXIT_PENDING, &priv->status);
3481
3482 iwl_legacy_leds_exit(priv);
3483
3484 if (priv->mac80211_registered) {
3485 ieee80211_unregister_hw(priv->hw);
3486 priv->mac80211_registered = 0;
3487 } else {
3488 iwl4965_down(priv);
3489 }
3490
3491 /*
3492 * Make sure device is reset to low power before unloading driver.
3493 * This may be redundant with iwl4965_down(), but there are paths to
3494 * run iwl4965_down() without calling apm_ops.stop(), and there are
3495 * paths to avoid running iwl4965_down() at all before leaving driver.
3496 * This (inexpensive) call *makes sure* device is reset.
3497 */
3498 iwl_legacy_apm_stop(priv);
3499
3500 /* make sure we flush any pending irq or
3501 * tasklet for the driver
3502 */
3503 spin_lock_irqsave(&priv->lock, flags);
3504 iwl_legacy_disable_interrupts(priv);
3505 spin_unlock_irqrestore(&priv->lock, flags);
3506
3507 iwl4965_synchronize_irq(priv);
3508
3509 iwl4965_dealloc_ucode_pci(priv);
3510
3511 if (priv->rxq.bd)
3512 iwl4965_rx_queue_free(priv, &priv->rxq);
3513 iwl4965_hw_txq_ctx_free(priv);
3514
3515 iwl_legacy_eeprom_free(priv);
3516
3517
3518 /*netif_stop_queue(dev); */
3519 flush_workqueue(priv->workqueue);
3520
3521 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3522 * priv->workqueue... so we can't take down the workqueue
3523 * until now... */
3524 destroy_workqueue(priv->workqueue);
3525 priv->workqueue = NULL;
3526 iwl_legacy_free_traffic_mem(priv);
3527
3528 free_irq(priv->pci_dev->irq, priv);
3529 pci_disable_msi(priv->pci_dev);
3530 pci_iounmap(pdev, priv->hw_base);
3531 pci_release_regions(pdev);
3532 pci_disable_device(pdev);
3533 pci_set_drvdata(pdev, NULL);
3534
3535 iwl4965_uninit_drv(priv);
3536
3537 dev_kfree_skb(priv->beacon_skb);
3538
3539 ieee80211_free_hw(priv->hw);
3540 }
3541
3542 /*
3543 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3544 * must be called under priv->lock and mac access
3545 */
iwl4965_txq_set_sched(struct iwl_priv * priv,u32 mask)3546 void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3547 {
3548 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3549 }
3550
3551 /*****************************************************************************
3552 *
3553 * driver and module entry point
3554 *
3555 *****************************************************************************/
3556
3557 /* Hardware specific file defines the PCI IDs table for that hardware module */
3558 static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3559 #if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3560 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3561 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3562 #endif /* CONFIG_IWL4965 */
3563
3564 {0}
3565 };
3566 MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3567
3568 static struct pci_driver iwl4965_driver = {
3569 .name = DRV_NAME,
3570 .id_table = iwl4965_hw_card_ids,
3571 .probe = iwl4965_pci_probe,
3572 .remove = __devexit_p(iwl4965_pci_remove),
3573 .driver.pm = IWL_LEGACY_PM_OPS,
3574 };
3575
iwl4965_init(void)3576 static int __init iwl4965_init(void)
3577 {
3578
3579 int ret;
3580 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3581 pr_info(DRV_COPYRIGHT "\n");
3582
3583 ret = iwl4965_rate_control_register();
3584 if (ret) {
3585 pr_err("Unable to register rate control algorithm: %d\n", ret);
3586 return ret;
3587 }
3588
3589 ret = pci_register_driver(&iwl4965_driver);
3590 if (ret) {
3591 pr_err("Unable to initialize PCI module\n");
3592 goto error_register;
3593 }
3594
3595 return ret;
3596
3597 error_register:
3598 iwl4965_rate_control_unregister();
3599 return ret;
3600 }
3601
iwl4965_exit(void)3602 static void __exit iwl4965_exit(void)
3603 {
3604 pci_unregister_driver(&iwl4965_driver);
3605 iwl4965_rate_control_unregister();
3606 }
3607
3608 module_exit(iwl4965_exit);
3609 module_init(iwl4965_init);
3610
3611 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3612 module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3613 MODULE_PARM_DESC(debug, "debug output mask");
3614 #endif
3615
3616 module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3617 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3618 module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3619 MODULE_PARM_DESC(queues_num, "number of hw queues.");
3620 module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3621 MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3622 module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3623 int, S_IRUGO);
3624 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3625 module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3626 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3627