1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38
39
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION IPW2200_VERSION
80
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
113 #endif
114
115 static struct ieee80211_rate ipw2200_rates[] = {
116 { .bitrate = 10 },
117 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 { .bitrate = 60 },
121 { .bitrate = 90 },
122 { .bitrate = 120 },
123 { .bitrate = 180 },
124 { .bitrate = 240 },
125 { .bitrate = 360 },
126 { .bitrate = 480 },
127 { .bitrate = 540 }
128 };
129
130 #define ipw2200_a_rates (ipw2200_rates + 4)
131 #define ipw2200_num_a_rates 8
132 #define ipw2200_bg_rates (ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates 12
134
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136 * There are certianly some conditions that will break this (like feeding it '30')
137 * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 (((x) <= 14) ? \
140 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 ((x) + 1000) * 5)
142
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 QOS_TX3_CW_MIN_OFDM},
153 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 QOS_TX3_CW_MAX_OFDM},
155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 QOS_TX3_CW_MIN_CCK},
164 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 QOS_TX3_CW_MAX_CCK},
166 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 DEF_TX3_CW_MIN_OFDM},
175 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 DEF_TX3_CW_MAX_OFDM},
177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 DEF_TX3_CW_MIN_CCK},
186 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 DEF_TX3_CW_MAX_CCK},
188 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195
196 static int from_priority_to_tx_queue[] = {
197 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 *qos_param);
207 #endif /* CONFIG_IPW2200_QOS */
208
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 int len, int sync);
218
219 static void ipw_tx_queue_free(struct ipw_priv *);
220
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233
snprint_line(char * buf,size_t count,const u8 * data,u32 len,u32 ofs)234 static int snprint_line(char *buf, size_t count,
235 const u8 * data, u32 len, u32 ofs)
236 {
237 int out, i, j, l;
238 char c;
239
240 out = snprintf(buf, count, "%08X", ofs);
241
242 for (l = 0, i = 0; i < 2; i++) {
243 out += snprintf(buf + out, count - out, " ");
244 for (j = 0; j < 8 && l < len; j++, l++)
245 out += snprintf(buf + out, count - out, "%02X ",
246 data[(i * 8 + j)]);
247 for (; j < 8; j++)
248 out += snprintf(buf + out, count - out, " ");
249 }
250
251 out += snprintf(buf + out, count - out, " ");
252 for (l = 0, i = 0; i < 2; i++) {
253 out += snprintf(buf + out, count - out, " ");
254 for (j = 0; j < 8 && l < len; j++, l++) {
255 c = data[(i * 8 + j)];
256 if (!isascii(c) || !isprint(c))
257 c = '.';
258
259 out += snprintf(buf + out, count - out, "%c", c);
260 }
261
262 for (; j < 8; j++)
263 out += snprintf(buf + out, count - out, " ");
264 }
265
266 return out;
267 }
268
printk_buf(int level,const u8 * data,u32 len)269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 char line[81];
272 u32 ofs = 0;
273 if (!(ipw_debug_level & level))
274 return;
275
276 while (len) {
277 snprint_line(line, sizeof(line), &data[ofs],
278 min(len, 16U), ofs);
279 printk(KERN_DEBUG "%s\n", line);
280 ofs += 16;
281 len -= min(len, 16U);
282 }
283 }
284
snprintk_buf(u8 * output,size_t size,const u8 * data,size_t len)285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 size_t out = size;
288 u32 ofs = 0;
289 int total = 0;
290
291 while (size && len) {
292 out = snprint_line(output, size, &data[ofs],
293 min_t(size_t, len, 16U), ofs);
294
295 ofs += 16;
296 output += out;
297 size -= out;
298 len -= min_t(size_t, len, 16U);
299 total += out;
300 }
301 return total;
302 }
303
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
ipw_write_reg8(struct ipw_priv * a,u32 b,u8 c)314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 __LINE__, (u32) (b), (u32) (c));
318 _ipw_write_reg8(a, b, c);
319 }
320
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
ipw_write_reg16(struct ipw_priv * a,u32 b,u16 c)323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 __LINE__, (u32) (b), (u32) (c));
327 _ipw_write_reg16(a, b, c);
328 }
329
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
ipw_write_reg32(struct ipw_priv * a,u32 b,u32 c)332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 __LINE__, (u32) (b), (u32) (c));
336 _ipw_write_reg32(a, b, c);
337 }
338
339 /* 8-bit direct write (low 4K) */
_ipw_write8(struct ipw_priv * ipw,unsigned long ofs,u8 val)340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 u8 val)
342 {
343 writeb(val, ipw->hw_base + ofs);
344 }
345
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 __LINE__, (u32)(ofs), (u32)(val)); \
350 _ipw_write8(ipw, ofs, val); \
351 } while (0)
352
353 /* 16-bit direct write (low 4K) */
_ipw_write16(struct ipw_priv * ipw,unsigned long ofs,u16 val)354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 u16 val)
356 {
357 writew(val, ipw->hw_base + ofs);
358 }
359
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 __LINE__, (u32)(ofs), (u32)(val)); \
364 _ipw_write16(ipw, ofs, val); \
365 } while (0)
366
367 /* 32-bit direct write (low 4K) */
_ipw_write32(struct ipw_priv * ipw,unsigned long ofs,u32 val)368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 u32 val)
370 {
371 writel(val, ipw->hw_base + ofs);
372 }
373
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 __LINE__, (u32)(ofs), (u32)(val)); \
378 _ipw_write32(ipw, ofs, val); \
379 } while (0)
380
381 /* 8-bit direct read (low 4K) */
_ipw_read8(struct ipw_priv * ipw,unsigned long ofs)382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 return readb(ipw->hw_base + ofs);
385 }
386
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 (u32)(ofs)); \
391 _ipw_read8(ipw, ofs); \
392 })
393
394 /* 16-bit direct read (low 4K) */
_ipw_read16(struct ipw_priv * ipw,unsigned long ofs)395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 return readw(ipw->hw_base + ofs);
398 }
399
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 (u32)(ofs)); \
404 _ipw_read16(ipw, ofs); \
405 })
406
407 /* 32-bit direct read (low 4K) */
_ipw_read32(struct ipw_priv * ipw,unsigned long ofs)408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 return readl(ipw->hw_base + ofs);
411 }
412
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 (u32)(ofs)); \
417 _ipw_read32(ipw, ofs); \
418 })
419
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 __LINE__, (u32)(b), (u32)(d)); \
425 _ipw_read_indirect(a, b, c, d); \
426 })
427
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 __LINE__, (u32)(b), (u32)(d)); \
434 _ipw_write_indirect(a, b, c, d); \
435 } while (0)
436
437 /* 32-bit indirect write (above 4K) */
_ipw_write_reg32(struct ipw_priv * priv,u32 reg,u32 value)438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444
445 /* 8-bit indirect write (above 4K) */
_ipw_write_reg8(struct ipw_priv * priv,u32 reg,u8 value)446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
449 u32 dif_len = reg - aligned_addr;
450
451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455
456 /* 16-bit indirect write (above 4K) */
_ipw_write_reg16(struct ipw_priv * priv,u32 reg,u16 value)457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
460 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461
462 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466
467 /* 8-bit indirect read (above 4K) */
_ipw_read_reg8(struct ipw_priv * priv,u32 reg)468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 u32 word;
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476
477 /* 32-bit indirect read (above 4K) */
_ipw_read_reg32(struct ipw_priv * priv,u32 reg)478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 u32 value;
481
482 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483
484 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 return value;
488 }
489
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /* for area above 1st 4K of SRAM/reg space */
_ipw_read_indirect(struct ipw_priv * priv,u32 addr,u8 * buf,int num)492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 int num)
494 {
495 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
496 u32 dif_len = addr - aligned_addr;
497 u32 i;
498
499 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500
501 if (num <= 0) {
502 return;
503 }
504
505 /* Read the first dword (or portion) byte by byte */
506 if (unlikely(dif_len)) {
507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 /* Start reading at aligned_addr + dif_len */
509 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 aligned_addr += 4;
512 }
513
514 /* Read all of the middle dwords as dwords, with auto-increment */
515 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518
519 /* Read the last dword (or portion) byte by byte */
520 if (unlikely(num)) {
521 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 for (i = 0; num > 0; i++, num--)
523 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 }
525 }
526
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /* for area above 1st 4K of SRAM/reg space */
_ipw_write_indirect(struct ipw_priv * priv,u32 addr,u8 * buf,int num)529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 int num)
531 {
532 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
533 u32 dif_len = addr - aligned_addr;
534 u32 i;
535
536 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537
538 if (num <= 0) {
539 return;
540 }
541
542 /* Write the first dword (or portion) byte by byte */
543 if (unlikely(dif_len)) {
544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 /* Start writing at aligned_addr + dif_len */
546 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 aligned_addr += 4;
549 }
550
551 /* Write all of the middle dwords as dwords, with auto-increment */
552 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555
556 /* Write the last dword (or portion) byte by byte */
557 if (unlikely(num)) {
558 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 for (i = 0; num > 0; i++, num--, buf++)
560 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 }
562 }
563
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /* for 1st 4K of SRAM/regs space */
ipw_write_direct(struct ipw_priv * priv,u32 addr,void * buf,int num)566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 int num)
568 {
569 memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571
572 /* Set bit(s) in low 4K of SRAM/regs */
ipw_set_bit(struct ipw_priv * priv,u32 reg,u32 mask)573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577
578 /* Clear bit(s) in low 4K of SRAM/regs */
ipw_clear_bit(struct ipw_priv * priv,u32 reg,u32 mask)579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583
__ipw_enable_interrupts(struct ipw_priv * priv)584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 if (priv->status & STATUS_INT_ENABLED)
587 return;
588 priv->status |= STATUS_INT_ENABLED;
589 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591
__ipw_disable_interrupts(struct ipw_priv * priv)592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 if (!(priv->status & STATUS_INT_ENABLED))
595 return;
596 priv->status &= ~STATUS_INT_ENABLED;
597 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599
ipw_enable_interrupts(struct ipw_priv * priv)600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 unsigned long flags;
603
604 spin_lock_irqsave(&priv->irq_lock, flags);
605 __ipw_enable_interrupts(priv);
606 spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608
ipw_disable_interrupts(struct ipw_priv * priv)609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 unsigned long flags;
612
613 spin_lock_irqsave(&priv->irq_lock, flags);
614 __ipw_disable_interrupts(priv);
615 spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617
ipw_error_desc(u32 val)618 static char *ipw_error_desc(u32 val)
619 {
620 switch (val) {
621 case IPW_FW_ERROR_OK:
622 return "ERROR_OK";
623 case IPW_FW_ERROR_FAIL:
624 return "ERROR_FAIL";
625 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 return "MEMORY_UNDERFLOW";
627 case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 return "MEMORY_OVERFLOW";
629 case IPW_FW_ERROR_BAD_PARAM:
630 return "BAD_PARAM";
631 case IPW_FW_ERROR_BAD_CHECKSUM:
632 return "BAD_CHECKSUM";
633 case IPW_FW_ERROR_NMI_INTERRUPT:
634 return "NMI_INTERRUPT";
635 case IPW_FW_ERROR_BAD_DATABASE:
636 return "BAD_DATABASE";
637 case IPW_FW_ERROR_ALLOC_FAIL:
638 return "ALLOC_FAIL";
639 case IPW_FW_ERROR_DMA_UNDERRUN:
640 return "DMA_UNDERRUN";
641 case IPW_FW_ERROR_DMA_STATUS:
642 return "DMA_STATUS";
643 case IPW_FW_ERROR_DINO_ERROR:
644 return "DINO_ERROR";
645 case IPW_FW_ERROR_EEPROM_ERROR:
646 return "EEPROM_ERROR";
647 case IPW_FW_ERROR_SYSASSERT:
648 return "SYSASSERT";
649 case IPW_FW_ERROR_FATAL_ERROR:
650 return "FATAL_ERROR";
651 default:
652 return "UNKNOWN_ERROR";
653 }
654 }
655
ipw_dump_error_log(struct ipw_priv * priv,struct ipw_fw_error * error)656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 struct ipw_fw_error *error)
658 {
659 u32 i;
660
661 if (!error) {
662 IPW_ERROR("Error allocating and capturing error log. "
663 "Nothing to dump.\n");
664 return;
665 }
666
667 IPW_ERROR("Start IPW Error Log Dump:\n");
668 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 error->status, error->config);
670
671 for (i = 0; i < error->elem_len; i++)
672 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
673 ipw_error_desc(error->elem[i].desc),
674 error->elem[i].time,
675 error->elem[i].blink1,
676 error->elem[i].blink2,
677 error->elem[i].link1,
678 error->elem[i].link2, error->elem[i].data);
679 for (i = 0; i < error->log_len; i++)
680 IPW_ERROR("%i\t0x%08x\t%i\n",
681 error->log[i].time,
682 error->log[i].data, error->log[i].event);
683 }
684
ipw_is_init(struct ipw_priv * priv)685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689
ipw_get_ordinal(struct ipw_priv * priv,u32 ord,void * val,u32 * len)690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 u32 addr, field_info, field_len, field_count, total_len;
693
694 IPW_DEBUG_ORD("ordinal = %i\n", ord);
695
696 if (!priv || !val || !len) {
697 IPW_DEBUG_ORD("Invalid argument\n");
698 return -EINVAL;
699 }
700
701 /* verify device ordinal tables have been initialized */
702 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 return -EINVAL;
705 }
706
707 switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 case IPW_ORD_TABLE_0_MASK:
709 /*
710 * TABLE 0: Direct access to a table of 32 bit values
711 *
712 * This is a very simple table with the data directly
713 * read from the table
714 */
715
716 /* remove the table id from the ordinal */
717 ord &= IPW_ORD_TABLE_VALUE_MASK;
718
719 /* boundary check */
720 if (ord > priv->table0_len) {
721 IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 "max (%i)\n", ord, priv->table0_len);
723 return -EINVAL;
724 }
725
726 /* verify we have enough room to store the value */
727 if (*len < sizeof(u32)) {
728 IPW_DEBUG_ORD("ordinal buffer length too small, "
729 "need %zd\n", sizeof(u32));
730 return -EINVAL;
731 }
732
733 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 ord, priv->table0_addr + (ord << 2));
735
736 *len = sizeof(u32);
737 ord <<= 2;
738 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 break;
740
741 case IPW_ORD_TABLE_1_MASK:
742 /*
743 * TABLE 1: Indirect access to a table of 32 bit values
744 *
745 * This is a fairly large table of u32 values each
746 * representing starting addr for the data (which is
747 * also a u32)
748 */
749
750 /* remove the table id from the ordinal */
751 ord &= IPW_ORD_TABLE_VALUE_MASK;
752
753 /* boundary check */
754 if (ord > priv->table1_len) {
755 IPW_DEBUG_ORD("ordinal value too long\n");
756 return -EINVAL;
757 }
758
759 /* verify we have enough room to store the value */
760 if (*len < sizeof(u32)) {
761 IPW_DEBUG_ORD("ordinal buffer length too small, "
762 "need %zd\n", sizeof(u32));
763 return -EINVAL;
764 }
765
766 *((u32 *) val) =
767 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 *len = sizeof(u32);
769 break;
770
771 case IPW_ORD_TABLE_2_MASK:
772 /*
773 * TABLE 2: Indirect access to a table of variable sized values
774 *
775 * This table consist of six values, each containing
776 * - dword containing the starting offset of the data
777 * - dword containing the lengh in the first 16bits
778 * and the count in the second 16bits
779 */
780
781 /* remove the table id from the ordinal */
782 ord &= IPW_ORD_TABLE_VALUE_MASK;
783
784 /* boundary check */
785 if (ord > priv->table2_len) {
786 IPW_DEBUG_ORD("ordinal value too long\n");
787 return -EINVAL;
788 }
789
790 /* get the address of statistic */
791 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792
793 /* get the second DW of statistics ;
794 * two 16-bit words - first is length, second is count */
795 field_info =
796 ipw_read_reg32(priv,
797 priv->table2_addr + (ord << 3) +
798 sizeof(u32));
799
800 /* get each entry length */
801 field_len = *((u16 *) & field_info);
802
803 /* get number of entries */
804 field_count = *(((u16 *) & field_info) + 1);
805
806 /* abort if not enough memory */
807 total_len = field_len * field_count;
808 if (total_len > *len) {
809 *len = total_len;
810 return -EINVAL;
811 }
812
813 *len = total_len;
814 if (!total_len)
815 return 0;
816
817 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 "field_info = 0x%08x\n",
819 addr, total_len, field_info);
820 ipw_read_indirect(priv, addr, val, total_len);
821 break;
822
823 default:
824 IPW_DEBUG_ORD("Invalid ordinal!\n");
825 return -EINVAL;
826
827 }
828
829 return 0;
830 }
831
ipw_init_ordinals(struct ipw_priv * priv)832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 priv->table0_len = ipw_read32(priv, priv->table0_addr);
836
837 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 priv->table0_addr, priv->table0_len);
839
840 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842
843 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 priv->table1_addr, priv->table1_len);
845
846 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 priv->table2_len &= 0x0000ffff; /* use first two bytes */
849
850 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 priv->table2_addr, priv->table2_len);
852
853 }
854
ipw_register_toggle(u32 reg)855 static u32 ipw_register_toggle(u32 reg)
856 {
857 reg &= ~IPW_START_STANDBY;
858 if (reg & IPW_GATE_ODMA)
859 reg &= ~IPW_GATE_ODMA;
860 if (reg & IPW_GATE_IDMA)
861 reg &= ~IPW_GATE_IDMA;
862 if (reg & IPW_GATE_ADMA)
863 reg &= ~IPW_GATE_ADMA;
864 return reg;
865 }
866
867 /*
868 * LED behavior:
869 * - On radio ON, turn on any LEDs that require to be on during start
870 * - On initialization, start unassociated blink
871 * - On association, disable unassociated blink
872 * - On disassociation, start unassociated blink
873 * - On radio OFF, turn off any LEDs started during radio on
874 *
875 */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879
ipw_led_link_on(struct ipw_priv * priv)880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 unsigned long flags;
883 u32 led;
884
885 /* If configured to not use LEDs, or nic_type is 1,
886 * then we don't toggle a LINK led */
887 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 return;
889
890 spin_lock_irqsave(&priv->lock, flags);
891
892 if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 !(priv->status & STATUS_LED_LINK_ON)) {
894 IPW_DEBUG_LED("Link LED On\n");
895 led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 led |= priv->led_association_on;
897
898 led = ipw_register_toggle(led);
899
900 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 ipw_write_reg32(priv, IPW_EVENT_REG, led);
902
903 priv->status |= STATUS_LED_LINK_ON;
904
905 /* If we aren't associated, schedule turning the LED off */
906 if (!(priv->status & STATUS_ASSOCIATED))
907 schedule_delayed_work(&priv->led_link_off,
908 LD_TIME_LINK_ON);
909 }
910
911 spin_unlock_irqrestore(&priv->lock, flags);
912 }
913
ipw_bg_led_link_on(struct work_struct * work)914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 struct ipw_priv *priv =
917 container_of(work, struct ipw_priv, led_link_on.work);
918 mutex_lock(&priv->mutex);
919 ipw_led_link_on(priv);
920 mutex_unlock(&priv->mutex);
921 }
922
ipw_led_link_off(struct ipw_priv * priv)923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 unsigned long flags;
926 u32 led;
927
928 /* If configured not to use LEDs, or nic type is 1,
929 * then we don't goggle the LINK led. */
930 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 return;
932
933 spin_lock_irqsave(&priv->lock, flags);
934
935 if (priv->status & STATUS_LED_LINK_ON) {
936 led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 led &= priv->led_association_off;
938 led = ipw_register_toggle(led);
939
940 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 ipw_write_reg32(priv, IPW_EVENT_REG, led);
942
943 IPW_DEBUG_LED("Link LED Off\n");
944
945 priv->status &= ~STATUS_LED_LINK_ON;
946
947 /* If we aren't associated and the radio is on, schedule
948 * turning the LED on (blink while unassociated) */
949 if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 !(priv->status & STATUS_ASSOCIATED))
951 schedule_delayed_work(&priv->led_link_on,
952 LD_TIME_LINK_OFF);
953
954 }
955
956 spin_unlock_irqrestore(&priv->lock, flags);
957 }
958
ipw_bg_led_link_off(struct work_struct * work)959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 struct ipw_priv *priv =
962 container_of(work, struct ipw_priv, led_link_off.work);
963 mutex_lock(&priv->mutex);
964 ipw_led_link_off(priv);
965 mutex_unlock(&priv->mutex);
966 }
967
__ipw_led_activity_on(struct ipw_priv * priv)968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 u32 led;
971
972 if (priv->config & CFG_NO_LED)
973 return;
974
975 if (priv->status & STATUS_RF_KILL_MASK)
976 return;
977
978 if (!(priv->status & STATUS_LED_ACT_ON)) {
979 led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 led |= priv->led_activity_on;
981
982 led = ipw_register_toggle(led);
983
984 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 ipw_write_reg32(priv, IPW_EVENT_REG, led);
986
987 IPW_DEBUG_LED("Activity LED On\n");
988
989 priv->status |= STATUS_LED_ACT_ON;
990
991 cancel_delayed_work(&priv->led_act_off);
992 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 } else {
994 /* Reschedule LED off for full time period */
995 cancel_delayed_work(&priv->led_act_off);
996 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 }
998 }
999
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 unsigned long flags;
1004 spin_lock_irqsave(&priv->lock, flags);
1005 __ipw_led_activity_on(priv);
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif /* 0 */
1009
ipw_led_activity_off(struct ipw_priv * priv)1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 unsigned long flags;
1013 u32 led;
1014
1015 if (priv->config & CFG_NO_LED)
1016 return;
1017
1018 spin_lock_irqsave(&priv->lock, flags);
1019
1020 if (priv->status & STATUS_LED_ACT_ON) {
1021 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 led &= priv->led_activity_off;
1023
1024 led = ipw_register_toggle(led);
1025
1026 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028
1029 IPW_DEBUG_LED("Activity LED Off\n");
1030
1031 priv->status &= ~STATUS_LED_ACT_ON;
1032 }
1033
1034 spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036
ipw_bg_led_activity_off(struct work_struct * work)1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 struct ipw_priv *priv =
1040 container_of(work, struct ipw_priv, led_act_off.work);
1041 mutex_lock(&priv->mutex);
1042 ipw_led_activity_off(priv);
1043 mutex_unlock(&priv->mutex);
1044 }
1045
ipw_led_band_on(struct ipw_priv * priv)1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 unsigned long flags;
1049 u32 led;
1050
1051 /* Only nic type 1 supports mode LEDs */
1052 if (priv->config & CFG_NO_LED ||
1053 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 return;
1055
1056 spin_lock_irqsave(&priv->lock, flags);
1057
1058 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 if (priv->assoc_network->mode == IEEE_A) {
1060 led |= priv->led_ofdm_on;
1061 led &= priv->led_association_off;
1062 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 } else if (priv->assoc_network->mode == IEEE_G) {
1064 led |= priv->led_ofdm_on;
1065 led |= priv->led_association_on;
1066 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 } else {
1068 led &= priv->led_ofdm_off;
1069 led |= priv->led_association_on;
1070 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 }
1072
1073 led = ipw_register_toggle(led);
1074
1075 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077
1078 spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080
ipw_led_band_off(struct ipw_priv * priv)1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 unsigned long flags;
1084 u32 led;
1085
1086 /* Only nic type 1 supports mode LEDs */
1087 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 return;
1089
1090 spin_lock_irqsave(&priv->lock, flags);
1091
1092 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 led &= priv->led_ofdm_off;
1094 led &= priv->led_association_off;
1095
1096 led = ipw_register_toggle(led);
1097
1098 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100
1101 spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103
ipw_led_radio_on(struct ipw_priv * priv)1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 ipw_led_link_on(priv);
1107 }
1108
ipw_led_radio_off(struct ipw_priv * priv)1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 ipw_led_activity_off(priv);
1112 ipw_led_link_off(priv);
1113 }
1114
ipw_led_link_up(struct ipw_priv * priv)1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 /* Set the Link Led on for all nic types */
1118 ipw_led_link_on(priv);
1119 }
1120
ipw_led_link_down(struct ipw_priv * priv)1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 ipw_led_activity_off(priv);
1124 ipw_led_link_off(priv);
1125
1126 if (priv->status & STATUS_RF_KILL_MASK)
1127 ipw_led_radio_off(priv);
1128 }
1129
ipw_led_init(struct ipw_priv * priv)1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133
1134 /* Set the default PINs for the link and activity leds */
1135 priv->led_activity_on = IPW_ACTIVITY_LED;
1136 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137
1138 priv->led_association_on = IPW_ASSOCIATED_LED;
1139 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140
1141 /* Set the default PINs for the OFDM leds */
1142 priv->led_ofdm_on = IPW_OFDM_LED;
1143 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144
1145 switch (priv->nic_type) {
1146 case EEPROM_NIC_TYPE_1:
1147 /* In this NIC type, the LEDs are reversed.... */
1148 priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 priv->led_association_on = IPW_ACTIVITY_LED;
1151 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152
1153 if (!(priv->config & CFG_NO_LED))
1154 ipw_led_band_on(priv);
1155
1156 /* And we don't blink link LEDs for this nic, so
1157 * just return here */
1158 return;
1159
1160 case EEPROM_NIC_TYPE_3:
1161 case EEPROM_NIC_TYPE_2:
1162 case EEPROM_NIC_TYPE_4:
1163 case EEPROM_NIC_TYPE_0:
1164 break;
1165
1166 default:
1167 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 priv->nic_type);
1169 priv->nic_type = EEPROM_NIC_TYPE_0;
1170 break;
1171 }
1172
1173 if (!(priv->config & CFG_NO_LED)) {
1174 if (priv->status & STATUS_ASSOCIATED)
1175 ipw_led_link_on(priv);
1176 else
1177 ipw_led_link_off(priv);
1178 }
1179 }
1180
ipw_led_shutdown(struct ipw_priv * priv)1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 ipw_led_activity_off(priv);
1184 ipw_led_link_off(priv);
1185 ipw_led_band_off(priv);
1186 cancel_delayed_work(&priv->led_link_on);
1187 cancel_delayed_work(&priv->led_link_off);
1188 cancel_delayed_work(&priv->led_act_off);
1189 }
1190
1191 /*
1192 * The following adds a new attribute to the sysfs representation
1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194 * used for controlling the debug level.
1195 *
1196 * See the level definitions in ipw for details.
1197 */
show_debug_level(struct device_driver * d,char * buf)1198 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199 {
1200 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202
store_debug_level(struct device_driver * d,const char * buf,size_t count)1203 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204 size_t count)
1205 {
1206 char *p = (char *)buf;
1207 u32 val;
1208
1209 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 p++;
1211 if (p[0] == 'x' || p[0] == 'X')
1212 p++;
1213 val = simple_strtoul(p, &p, 16);
1214 } else
1215 val = simple_strtoul(p, &p, 10);
1216 if (p == buf)
1217 printk(KERN_INFO DRV_NAME
1218 ": %s is not in hex or decimal form.\n", buf);
1219 else
1220 ipw_debug_level = val;
1221
1222 return strnlen(buf, count);
1223 }
1224
1225 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226 show_debug_level, store_debug_level);
1227
ipw_get_event_log_len(struct ipw_priv * priv)1228 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229 {
1230 /* length = 1st dword in log */
1231 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232 }
1233
ipw_capture_event_log(struct ipw_priv * priv,u32 log_len,struct ipw_event * log)1234 static void ipw_capture_event_log(struct ipw_priv *priv,
1235 u32 log_len, struct ipw_event *log)
1236 {
1237 u32 base;
1238
1239 if (log_len) {
1240 base = ipw_read32(priv, IPW_EVENT_LOG);
1241 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242 (u8 *) log, sizeof(*log) * log_len);
1243 }
1244 }
1245
ipw_alloc_error_log(struct ipw_priv * priv)1246 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247 {
1248 struct ipw_fw_error *error;
1249 u32 log_len = ipw_get_event_log_len(priv);
1250 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251 u32 elem_len = ipw_read_reg32(priv, base);
1252
1253 error = kmalloc(sizeof(*error) +
1254 sizeof(*error->elem) * elem_len +
1255 sizeof(*error->log) * log_len, GFP_ATOMIC);
1256 if (!error) {
1257 IPW_ERROR("Memory allocation for firmware error log "
1258 "failed.\n");
1259 return NULL;
1260 }
1261 error->jiffies = jiffies;
1262 error->status = priv->status;
1263 error->config = priv->config;
1264 error->elem_len = elem_len;
1265 error->log_len = log_len;
1266 error->elem = (struct ipw_error_elem *)error->payload;
1267 error->log = (struct ipw_event *)(error->elem + elem_len);
1268
1269 ipw_capture_event_log(priv, log_len, error->log);
1270
1271 if (elem_len)
1272 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273 sizeof(*error->elem) * elem_len);
1274
1275 return error;
1276 }
1277
show_event_log(struct device * d,struct device_attribute * attr,char * buf)1278 static ssize_t show_event_log(struct device *d,
1279 struct device_attribute *attr, char *buf)
1280 {
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 u32 log_len = ipw_get_event_log_len(priv);
1283 u32 log_size;
1284 struct ipw_event *log;
1285 u32 len = 0, i;
1286
1287 /* not using min() because of its strict type checking */
1288 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289 sizeof(*log) * log_len : PAGE_SIZE;
1290 log = kzalloc(log_size, GFP_KERNEL);
1291 if (!log) {
1292 IPW_ERROR("Unable to allocate memory for log\n");
1293 return 0;
1294 }
1295 log_len = log_size / sizeof(*log);
1296 ipw_capture_event_log(priv, log_len, log);
1297
1298 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299 for (i = 0; i < log_len; i++)
1300 len += snprintf(buf + len, PAGE_SIZE - len,
1301 "\n%08X%08X%08X",
1302 log[i].time, log[i].event, log[i].data);
1303 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304 kfree(log);
1305 return len;
1306 }
1307
1308 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309
show_error(struct device * d,struct device_attribute * attr,char * buf)1310 static ssize_t show_error(struct device *d,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct ipw_priv *priv = dev_get_drvdata(d);
1314 u32 len = 0, i;
1315 if (!priv->error)
1316 return 0;
1317 len += snprintf(buf + len, PAGE_SIZE - len,
1318 "%08lX%08X%08X%08X",
1319 priv->error->jiffies,
1320 priv->error->status,
1321 priv->error->config, priv->error->elem_len);
1322 for (i = 0; i < priv->error->elem_len; i++)
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X%08X%08X%08X%08X%08X%08X",
1325 priv->error->elem[i].time,
1326 priv->error->elem[i].desc,
1327 priv->error->elem[i].blink1,
1328 priv->error->elem[i].blink2,
1329 priv->error->elem[i].link1,
1330 priv->error->elem[i].link2,
1331 priv->error->elem[i].data);
1332
1333 len += snprintf(buf + len, PAGE_SIZE - len,
1334 "\n%08X", priv->error->log_len);
1335 for (i = 0; i < priv->error->log_len; i++)
1336 len += snprintf(buf + len, PAGE_SIZE - len,
1337 "\n%08X%08X%08X",
1338 priv->error->log[i].time,
1339 priv->error->log[i].event,
1340 priv->error->log[i].data);
1341 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342 return len;
1343 }
1344
clear_error(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1345 static ssize_t clear_error(struct device *d,
1346 struct device_attribute *attr,
1347 const char *buf, size_t count)
1348 {
1349 struct ipw_priv *priv = dev_get_drvdata(d);
1350
1351 kfree(priv->error);
1352 priv->error = NULL;
1353 return count;
1354 }
1355
1356 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357
show_cmd_log(struct device * d,struct device_attribute * attr,char * buf)1358 static ssize_t show_cmd_log(struct device *d,
1359 struct device_attribute *attr, char *buf)
1360 {
1361 struct ipw_priv *priv = dev_get_drvdata(d);
1362 u32 len = 0, i;
1363 if (!priv->cmdlog)
1364 return 0;
1365 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1367 i = (i + 1) % priv->cmdlog_len) {
1368 len +=
1369 snprintf(buf + len, PAGE_SIZE - len,
1370 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372 priv->cmdlog[i].cmd.len);
1373 len +=
1374 snprintk_buf(buf + len, PAGE_SIZE - len,
1375 (u8 *) priv->cmdlog[i].cmd.param,
1376 priv->cmdlog[i].cmd.len);
1377 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 }
1379 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380 return len;
1381 }
1382
1383 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384
1385 #ifdef CONFIG_IPW2200_PROMISCUOUS
1386 static void ipw_prom_free(struct ipw_priv *priv);
1387 static int ipw_prom_alloc(struct ipw_priv *priv);
store_rtap_iface(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1388 static ssize_t store_rtap_iface(struct device *d,
1389 struct device_attribute *attr,
1390 const char *buf, size_t count)
1391 {
1392 struct ipw_priv *priv = dev_get_drvdata(d);
1393 int rc = 0;
1394
1395 if (count < 1)
1396 return -EINVAL;
1397
1398 switch (buf[0]) {
1399 case '0':
1400 if (!rtap_iface)
1401 return count;
1402
1403 if (netif_running(priv->prom_net_dev)) {
1404 IPW_WARNING("Interface is up. Cannot unregister.\n");
1405 return count;
1406 }
1407
1408 ipw_prom_free(priv);
1409 rtap_iface = 0;
1410 break;
1411
1412 case '1':
1413 if (rtap_iface)
1414 return count;
1415
1416 rc = ipw_prom_alloc(priv);
1417 if (!rc)
1418 rtap_iface = 1;
1419 break;
1420
1421 default:
1422 return -EINVAL;
1423 }
1424
1425 if (rc) {
1426 IPW_ERROR("Failed to register promiscuous network "
1427 "device (error %d).\n", rc);
1428 }
1429
1430 return count;
1431 }
1432
show_rtap_iface(struct device * d,struct device_attribute * attr,char * buf)1433 static ssize_t show_rtap_iface(struct device *d,
1434 struct device_attribute *attr,
1435 char *buf)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 if (rtap_iface)
1439 return sprintf(buf, "%s", priv->prom_net_dev->name);
1440 else {
1441 buf[0] = '-';
1442 buf[1] = '1';
1443 buf[2] = '\0';
1444 return 3;
1445 }
1446 }
1447
1448 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449 store_rtap_iface);
1450
store_rtap_filter(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1451 static ssize_t store_rtap_filter(struct device *d,
1452 struct device_attribute *attr,
1453 const char *buf, size_t count)
1454 {
1455 struct ipw_priv *priv = dev_get_drvdata(d);
1456
1457 if (!priv->prom_priv) {
1458 IPW_ERROR("Attempting to set filter without "
1459 "rtap_iface enabled.\n");
1460 return -EPERM;
1461 }
1462
1463 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464
1465 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466 BIT_ARG16(priv->prom_priv->filter));
1467
1468 return count;
1469 }
1470
show_rtap_filter(struct device * d,struct device_attribute * attr,char * buf)1471 static ssize_t show_rtap_filter(struct device *d,
1472 struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "0x%04X",
1477 priv->prom_priv ? priv->prom_priv->filter : 0);
1478 }
1479
1480 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481 store_rtap_filter);
1482 #endif
1483
show_scan_age(struct device * d,struct device_attribute * attr,char * buf)1484 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485 char *buf)
1486 {
1487 struct ipw_priv *priv = dev_get_drvdata(d);
1488 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489 }
1490
store_scan_age(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1491 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492 const char *buf, size_t count)
1493 {
1494 struct ipw_priv *priv = dev_get_drvdata(d);
1495 struct net_device *dev = priv->net_dev;
1496 char buffer[] = "00000000";
1497 unsigned long len =
1498 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499 unsigned long val;
1500 char *p = buffer;
1501
1502 IPW_DEBUG_INFO("enter\n");
1503
1504 strncpy(buffer, buf, len);
1505 buffer[len] = 0;
1506
1507 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508 p++;
1509 if (p[0] == 'x' || p[0] == 'X')
1510 p++;
1511 val = simple_strtoul(p, &p, 16);
1512 } else
1513 val = simple_strtoul(p, &p, 10);
1514 if (p == buffer) {
1515 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516 } else {
1517 priv->ieee->scan_age = val;
1518 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519 }
1520
1521 IPW_DEBUG_INFO("exit\n");
1522 return len;
1523 }
1524
1525 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526
show_led(struct device * d,struct device_attribute * attr,char * buf)1527 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct ipw_priv *priv = dev_get_drvdata(d);
1531 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532 }
1533
store_led(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1534 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535 const char *buf, size_t count)
1536 {
1537 struct ipw_priv *priv = dev_get_drvdata(d);
1538
1539 IPW_DEBUG_INFO("enter\n");
1540
1541 if (count == 0)
1542 return 0;
1543
1544 if (*buf == 0) {
1545 IPW_DEBUG_LED("Disabling LED control.\n");
1546 priv->config |= CFG_NO_LED;
1547 ipw_led_shutdown(priv);
1548 } else {
1549 IPW_DEBUG_LED("Enabling LED control.\n");
1550 priv->config &= ~CFG_NO_LED;
1551 ipw_led_init(priv);
1552 }
1553
1554 IPW_DEBUG_INFO("exit\n");
1555 return count;
1556 }
1557
1558 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559
show_status(struct device * d,struct device_attribute * attr,char * buf)1560 static ssize_t show_status(struct device *d,
1561 struct device_attribute *attr, char *buf)
1562 {
1563 struct ipw_priv *p = dev_get_drvdata(d);
1564 return sprintf(buf, "0x%08x\n", (int)p->status);
1565 }
1566
1567 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568
show_cfg(struct device * d,struct device_attribute * attr,char * buf)1569 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570 char *buf)
1571 {
1572 struct ipw_priv *p = dev_get_drvdata(d);
1573 return sprintf(buf, "0x%08x\n", (int)p->config);
1574 }
1575
1576 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577
show_nic_type(struct device * d,struct device_attribute * attr,char * buf)1578 static ssize_t show_nic_type(struct device *d,
1579 struct device_attribute *attr, char *buf)
1580 {
1581 struct ipw_priv *priv = dev_get_drvdata(d);
1582 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583 }
1584
1585 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586
show_ucode_version(struct device * d,struct device_attribute * attr,char * buf)1587 static ssize_t show_ucode_version(struct device *d,
1588 struct device_attribute *attr, char *buf)
1589 {
1590 u32 len = sizeof(u32), tmp = 0;
1591 struct ipw_priv *p = dev_get_drvdata(d);
1592
1593 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594 return 0;
1595
1596 return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598
1599 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600
show_rtc(struct device * d,struct device_attribute * attr,char * buf)1601 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 u32 len = sizeof(u32), tmp = 0;
1605 struct ipw_priv *p = dev_get_drvdata(d);
1606
1607 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608 return 0;
1609
1610 return sprintf(buf, "0x%08x\n", tmp);
1611 }
1612
1613 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614
1615 /*
1616 * Add a device attribute to view/control the delay between eeprom
1617 * operations.
1618 */
show_eeprom_delay(struct device * d,struct device_attribute * attr,char * buf)1619 static ssize_t show_eeprom_delay(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621 {
1622 struct ipw_priv *p = dev_get_drvdata(d);
1623 int n = p->eeprom_delay;
1624 return sprintf(buf, "%i\n", n);
1625 }
store_eeprom_delay(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1626 static ssize_t store_eeprom_delay(struct device *d,
1627 struct device_attribute *attr,
1628 const char *buf, size_t count)
1629 {
1630 struct ipw_priv *p = dev_get_drvdata(d);
1631 sscanf(buf, "%i", &p->eeprom_delay);
1632 return strnlen(buf, count);
1633 }
1634
1635 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636 show_eeprom_delay, store_eeprom_delay);
1637
show_command_event_reg(struct device * d,struct device_attribute * attr,char * buf)1638 static ssize_t show_command_event_reg(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640 {
1641 u32 reg = 0;
1642 struct ipw_priv *p = dev_get_drvdata(d);
1643
1644 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645 return sprintf(buf, "0x%08x\n", reg);
1646 }
store_command_event_reg(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1647 static ssize_t store_command_event_reg(struct device *d,
1648 struct device_attribute *attr,
1649 const char *buf, size_t count)
1650 {
1651 u32 reg;
1652 struct ipw_priv *p = dev_get_drvdata(d);
1653
1654 sscanf(buf, "%x", ®);
1655 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656 return strnlen(buf, count);
1657 }
1658
1659 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660 show_command_event_reg, store_command_event_reg);
1661
show_mem_gpio_reg(struct device * d,struct device_attribute * attr,char * buf)1662 static ssize_t show_mem_gpio_reg(struct device *d,
1663 struct device_attribute *attr, char *buf)
1664 {
1665 u32 reg = 0;
1666 struct ipw_priv *p = dev_get_drvdata(d);
1667
1668 reg = ipw_read_reg32(p, 0x301100);
1669 return sprintf(buf, "0x%08x\n", reg);
1670 }
store_mem_gpio_reg(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1671 static ssize_t store_mem_gpio_reg(struct device *d,
1672 struct device_attribute *attr,
1673 const char *buf, size_t count)
1674 {
1675 u32 reg;
1676 struct ipw_priv *p = dev_get_drvdata(d);
1677
1678 sscanf(buf, "%x", ®);
1679 ipw_write_reg32(p, 0x301100, reg);
1680 return strnlen(buf, count);
1681 }
1682
1683 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684 show_mem_gpio_reg, store_mem_gpio_reg);
1685
show_indirect_dword(struct device * d,struct device_attribute * attr,char * buf)1686 static ssize_t show_indirect_dword(struct device *d,
1687 struct device_attribute *attr, char *buf)
1688 {
1689 u32 reg = 0;
1690 struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692 if (priv->status & STATUS_INDIRECT_DWORD)
1693 reg = ipw_read_reg32(priv, priv->indirect_dword);
1694 else
1695 reg = 0;
1696
1697 return sprintf(buf, "0x%08x\n", reg);
1698 }
store_indirect_dword(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1699 static ssize_t store_indirect_dword(struct device *d,
1700 struct device_attribute *attr,
1701 const char *buf, size_t count)
1702 {
1703 struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705 sscanf(buf, "%x", &priv->indirect_dword);
1706 priv->status |= STATUS_INDIRECT_DWORD;
1707 return strnlen(buf, count);
1708 }
1709
1710 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711 show_indirect_dword, store_indirect_dword);
1712
show_indirect_byte(struct device * d,struct device_attribute * attr,char * buf)1713 static ssize_t show_indirect_byte(struct device *d,
1714 struct device_attribute *attr, char *buf)
1715 {
1716 u8 reg = 0;
1717 struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719 if (priv->status & STATUS_INDIRECT_BYTE)
1720 reg = ipw_read_reg8(priv, priv->indirect_byte);
1721 else
1722 reg = 0;
1723
1724 return sprintf(buf, "0x%02x\n", reg);
1725 }
store_indirect_byte(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1726 static ssize_t store_indirect_byte(struct device *d,
1727 struct device_attribute *attr,
1728 const char *buf, size_t count)
1729 {
1730 struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732 sscanf(buf, "%x", &priv->indirect_byte);
1733 priv->status |= STATUS_INDIRECT_BYTE;
1734 return strnlen(buf, count);
1735 }
1736
1737 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738 show_indirect_byte, store_indirect_byte);
1739
show_direct_dword(struct device * d,struct device_attribute * attr,char * buf)1740 static ssize_t show_direct_dword(struct device *d,
1741 struct device_attribute *attr, char *buf)
1742 {
1743 u32 reg = 0;
1744 struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746 if (priv->status & STATUS_DIRECT_DWORD)
1747 reg = ipw_read32(priv, priv->direct_dword);
1748 else
1749 reg = 0;
1750
1751 return sprintf(buf, "0x%08x\n", reg);
1752 }
store_direct_dword(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1753 static ssize_t store_direct_dword(struct device *d,
1754 struct device_attribute *attr,
1755 const char *buf, size_t count)
1756 {
1757 struct ipw_priv *priv = dev_get_drvdata(d);
1758
1759 sscanf(buf, "%x", &priv->direct_dword);
1760 priv->status |= STATUS_DIRECT_DWORD;
1761 return strnlen(buf, count);
1762 }
1763
1764 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765 show_direct_dword, store_direct_dword);
1766
rf_kill_active(struct ipw_priv * priv)1767 static int rf_kill_active(struct ipw_priv *priv)
1768 {
1769 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770 priv->status |= STATUS_RF_KILL_HW;
1771 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772 } else {
1773 priv->status &= ~STATUS_RF_KILL_HW;
1774 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775 }
1776
1777 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778 }
1779
show_rf_kill(struct device * d,struct device_attribute * attr,char * buf)1780 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781 char *buf)
1782 {
1783 /* 0 - RF kill not enabled
1784 1 - SW based RF kill active (sysfs)
1785 2 - HW based RF kill active
1786 3 - Both HW and SW baed RF kill active */
1787 struct ipw_priv *priv = dev_get_drvdata(d);
1788 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789 (rf_kill_active(priv) ? 0x2 : 0x0);
1790 return sprintf(buf, "%i\n", val);
1791 }
1792
ipw_radio_kill_sw(struct ipw_priv * priv,int disable_radio)1793 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794 {
1795 if ((disable_radio ? 1 : 0) ==
1796 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797 return 0;
1798
1799 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1800 disable_radio ? "OFF" : "ON");
1801
1802 if (disable_radio) {
1803 priv->status |= STATUS_RF_KILL_SW;
1804
1805 cancel_delayed_work(&priv->request_scan);
1806 cancel_delayed_work(&priv->request_direct_scan);
1807 cancel_delayed_work(&priv->request_passive_scan);
1808 cancel_delayed_work(&priv->scan_event);
1809 schedule_work(&priv->down);
1810 } else {
1811 priv->status &= ~STATUS_RF_KILL_SW;
1812 if (rf_kill_active(priv)) {
1813 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814 "disabled by HW switch\n");
1815 /* Make sure the RF_KILL check timer is running */
1816 cancel_delayed_work(&priv->rf_kill);
1817 schedule_delayed_work(&priv->rf_kill,
1818 round_jiffies_relative(2 * HZ));
1819 } else
1820 schedule_work(&priv->up);
1821 }
1822
1823 return 1;
1824 }
1825
store_rf_kill(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1826 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827 const char *buf, size_t count)
1828 {
1829 struct ipw_priv *priv = dev_get_drvdata(d);
1830
1831 ipw_radio_kill_sw(priv, buf[0] == '1');
1832
1833 return count;
1834 }
1835
1836 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837
show_speed_scan(struct device * d,struct device_attribute * attr,char * buf)1838 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839 char *buf)
1840 {
1841 struct ipw_priv *priv = dev_get_drvdata(d);
1842 int pos = 0, len = 0;
1843 if (priv->config & CFG_SPEED_SCAN) {
1844 while (priv->speed_scan[pos] != 0)
1845 len += sprintf(&buf[len], "%d ",
1846 priv->speed_scan[pos++]);
1847 return len + sprintf(&buf[len], "\n");
1848 }
1849
1850 return sprintf(buf, "0\n");
1851 }
1852
store_speed_scan(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1853 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854 const char *buf, size_t count)
1855 {
1856 struct ipw_priv *priv = dev_get_drvdata(d);
1857 int channel, pos = 0;
1858 const char *p = buf;
1859
1860 /* list of space separated channels to scan, optionally ending with 0 */
1861 while ((channel = simple_strtol(p, NULL, 0))) {
1862 if (pos == MAX_SPEED_SCAN - 1) {
1863 priv->speed_scan[pos] = 0;
1864 break;
1865 }
1866
1867 if (libipw_is_valid_channel(priv->ieee, channel))
1868 priv->speed_scan[pos++] = channel;
1869 else
1870 IPW_WARNING("Skipping invalid channel request: %d\n",
1871 channel);
1872 p = strchr(p, ' ');
1873 if (!p)
1874 break;
1875 while (*p == ' ' || *p == '\t')
1876 p++;
1877 }
1878
1879 if (pos == 0)
1880 priv->config &= ~CFG_SPEED_SCAN;
1881 else {
1882 priv->speed_scan_pos = 0;
1883 priv->config |= CFG_SPEED_SCAN;
1884 }
1885
1886 return count;
1887 }
1888
1889 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890 store_speed_scan);
1891
show_net_stats(struct device * d,struct device_attribute * attr,char * buf)1892 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893 char *buf)
1894 {
1895 struct ipw_priv *priv = dev_get_drvdata(d);
1896 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897 }
1898
store_net_stats(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1899 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900 const char *buf, size_t count)
1901 {
1902 struct ipw_priv *priv = dev_get_drvdata(d);
1903 if (buf[0] == '1')
1904 priv->config |= CFG_NET_STATS;
1905 else
1906 priv->config &= ~CFG_NET_STATS;
1907
1908 return count;
1909 }
1910
1911 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912 show_net_stats, store_net_stats);
1913
show_channels(struct device * d,struct device_attribute * attr,char * buf)1914 static ssize_t show_channels(struct device *d,
1915 struct device_attribute *attr,
1916 char *buf)
1917 {
1918 struct ipw_priv *priv = dev_get_drvdata(d);
1919 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920 int len = 0, i;
1921
1922 len = sprintf(&buf[len],
1923 "Displaying %d channels in 2.4Ghz band "
1924 "(802.11bg):\n", geo->bg_channels);
1925
1926 for (i = 0; i < geo->bg_channels; i++) {
1927 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928 geo->bg[i].channel,
1929 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930 " (radar spectrum)" : "",
1931 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933 ? "" : ", IBSS",
1934 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935 "passive only" : "active/passive",
1936 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937 "B" : "B/G");
1938 }
1939
1940 len += sprintf(&buf[len],
1941 "Displaying %d channels in 5.2Ghz band "
1942 "(802.11a):\n", geo->a_channels);
1943 for (i = 0; i < geo->a_channels; i++) {
1944 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945 geo->a[i].channel,
1946 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947 " (radar spectrum)" : "",
1948 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950 ? "" : ", IBSS",
1951 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952 "passive only" : "active/passive");
1953 }
1954
1955 return len;
1956 }
1957
1958 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959
notify_wx_assoc_event(struct ipw_priv * priv)1960 static void notify_wx_assoc_event(struct ipw_priv *priv)
1961 {
1962 union iwreq_data wrqu;
1963 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964 if (priv->status & STATUS_ASSOCIATED)
1965 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966 else
1967 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1968 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969 }
1970
ipw_irq_tasklet(struct ipw_priv * priv)1971 static void ipw_irq_tasklet(struct ipw_priv *priv)
1972 {
1973 u32 inta, inta_mask, handled = 0;
1974 unsigned long flags;
1975 int rc = 0;
1976
1977 spin_lock_irqsave(&priv->irq_lock, flags);
1978
1979 inta = ipw_read32(priv, IPW_INTA_RW);
1980 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1981
1982 if (inta == 0xFFFFFFFF) {
1983 /* Hardware disappeared */
1984 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1985 /* Only handle the cached INTA values */
1986 inta = 0;
1987 }
1988 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1989
1990 /* Add any cached INTA values that need to be handled */
1991 inta |= priv->isr_inta;
1992
1993 spin_unlock_irqrestore(&priv->irq_lock, flags);
1994
1995 spin_lock_irqsave(&priv->lock, flags);
1996
1997 /* handle all the justifications for the interrupt */
1998 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1999 ipw_rx(priv);
2000 handled |= IPW_INTA_BIT_RX_TRANSFER;
2001 }
2002
2003 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2004 IPW_DEBUG_HC("Command completed.\n");
2005 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2006 priv->status &= ~STATUS_HCMD_ACTIVE;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2009 }
2010
2011 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2012 IPW_DEBUG_TX("TX_QUEUE_1\n");
2013 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2014 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2015 }
2016
2017 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2018 IPW_DEBUG_TX("TX_QUEUE_2\n");
2019 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2020 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2021 }
2022
2023 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2024 IPW_DEBUG_TX("TX_QUEUE_3\n");
2025 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2026 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2027 }
2028
2029 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2030 IPW_DEBUG_TX("TX_QUEUE_4\n");
2031 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2032 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2033 }
2034
2035 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2036 IPW_WARNING("STATUS_CHANGE\n");
2037 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2041 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2042 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2043 }
2044
2045 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2046 IPW_WARNING("HOST_CMD_DONE\n");
2047 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2048 }
2049
2050 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2051 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2052 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2053 }
2054
2055 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2056 IPW_WARNING("PHY_OFF_DONE\n");
2057 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2058 }
2059
2060 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2061 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2062 priv->status |= STATUS_RF_KILL_HW;
2063 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2064 wake_up_interruptible(&priv->wait_command_queue);
2065 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2066 cancel_delayed_work(&priv->request_scan);
2067 cancel_delayed_work(&priv->request_direct_scan);
2068 cancel_delayed_work(&priv->request_passive_scan);
2069 cancel_delayed_work(&priv->scan_event);
2070 schedule_work(&priv->link_down);
2071 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2072 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2073 }
2074
2075 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2076 IPW_WARNING("Firmware error detected. Restarting.\n");
2077 if (priv->error) {
2078 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2079 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2080 struct ipw_fw_error *error =
2081 ipw_alloc_error_log(priv);
2082 ipw_dump_error_log(priv, error);
2083 kfree(error);
2084 }
2085 } else {
2086 priv->error = ipw_alloc_error_log(priv);
2087 if (priv->error)
2088 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2089 else
2090 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2091 "log.\n");
2092 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2093 ipw_dump_error_log(priv, priv->error);
2094 }
2095
2096 /* XXX: If hardware encryption is for WPA/WPA2,
2097 * we have to notify the supplicant. */
2098 if (priv->ieee->sec.encrypt) {
2099 priv->status &= ~STATUS_ASSOCIATED;
2100 notify_wx_assoc_event(priv);
2101 }
2102
2103 /* Keep the restart process from trying to send host
2104 * commands by clearing the INIT status bit */
2105 priv->status &= ~STATUS_INIT;
2106
2107 /* Cancel currently queued command. */
2108 priv->status &= ~STATUS_HCMD_ACTIVE;
2109 wake_up_interruptible(&priv->wait_command_queue);
2110
2111 schedule_work(&priv->adapter_restart);
2112 handled |= IPW_INTA_BIT_FATAL_ERROR;
2113 }
2114
2115 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2116 IPW_ERROR("Parity error\n");
2117 handled |= IPW_INTA_BIT_PARITY_ERROR;
2118 }
2119
2120 if (handled != inta) {
2121 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2122 }
2123
2124 spin_unlock_irqrestore(&priv->lock, flags);
2125
2126 /* enable all interrupts */
2127 ipw_enable_interrupts(priv);
2128 }
2129
2130 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
get_cmd_string(u8 cmd)2131 static char *get_cmd_string(u8 cmd)
2132 {
2133 switch (cmd) {
2134 IPW_CMD(HOST_COMPLETE);
2135 IPW_CMD(POWER_DOWN);
2136 IPW_CMD(SYSTEM_CONFIG);
2137 IPW_CMD(MULTICAST_ADDRESS);
2138 IPW_CMD(SSID);
2139 IPW_CMD(ADAPTER_ADDRESS);
2140 IPW_CMD(PORT_TYPE);
2141 IPW_CMD(RTS_THRESHOLD);
2142 IPW_CMD(FRAG_THRESHOLD);
2143 IPW_CMD(POWER_MODE);
2144 IPW_CMD(WEP_KEY);
2145 IPW_CMD(TGI_TX_KEY);
2146 IPW_CMD(SCAN_REQUEST);
2147 IPW_CMD(SCAN_REQUEST_EXT);
2148 IPW_CMD(ASSOCIATE);
2149 IPW_CMD(SUPPORTED_RATES);
2150 IPW_CMD(SCAN_ABORT);
2151 IPW_CMD(TX_FLUSH);
2152 IPW_CMD(QOS_PARAMETERS);
2153 IPW_CMD(DINO_CONFIG);
2154 IPW_CMD(RSN_CAPABILITIES);
2155 IPW_CMD(RX_KEY);
2156 IPW_CMD(CARD_DISABLE);
2157 IPW_CMD(SEED_NUMBER);
2158 IPW_CMD(TX_POWER);
2159 IPW_CMD(COUNTRY_INFO);
2160 IPW_CMD(AIRONET_INFO);
2161 IPW_CMD(AP_TX_POWER);
2162 IPW_CMD(CCKM_INFO);
2163 IPW_CMD(CCX_VER_INFO);
2164 IPW_CMD(SET_CALIBRATION);
2165 IPW_CMD(SENSITIVITY_CALIB);
2166 IPW_CMD(RETRY_LIMIT);
2167 IPW_CMD(IPW_PRE_POWER_DOWN);
2168 IPW_CMD(VAP_BEACON_TEMPLATE);
2169 IPW_CMD(VAP_DTIM_PERIOD);
2170 IPW_CMD(EXT_SUPPORTED_RATES);
2171 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2172 IPW_CMD(VAP_QUIET_INTERVALS);
2173 IPW_CMD(VAP_CHANNEL_SWITCH);
2174 IPW_CMD(VAP_MANDATORY_CHANNELS);
2175 IPW_CMD(VAP_CELL_PWR_LIMIT);
2176 IPW_CMD(VAP_CF_PARAM_SET);
2177 IPW_CMD(VAP_SET_BEACONING_STATE);
2178 IPW_CMD(MEASUREMENT);
2179 IPW_CMD(POWER_CAPABILITY);
2180 IPW_CMD(SUPPORTED_CHANNELS);
2181 IPW_CMD(TPC_REPORT);
2182 IPW_CMD(WME_INFO);
2183 IPW_CMD(PRODUCTION_COMMAND);
2184 default:
2185 return "UNKNOWN";
2186 }
2187 }
2188
2189 #define HOST_COMPLETE_TIMEOUT HZ
2190
__ipw_send_cmd(struct ipw_priv * priv,struct host_cmd * cmd)2191 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2192 {
2193 int rc = 0;
2194 unsigned long flags;
2195 unsigned long now, end;
2196
2197 spin_lock_irqsave(&priv->lock, flags);
2198 if (priv->status & STATUS_HCMD_ACTIVE) {
2199 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2200 get_cmd_string(cmd->cmd));
2201 spin_unlock_irqrestore(&priv->lock, flags);
2202 return -EAGAIN;
2203 }
2204
2205 priv->status |= STATUS_HCMD_ACTIVE;
2206
2207 if (priv->cmdlog) {
2208 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2209 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2210 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2211 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2212 cmd->len);
2213 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2214 }
2215
2216 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2217 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2218 priv->status);
2219
2220 #ifndef DEBUG_CMD_WEP_KEY
2221 if (cmd->cmd == IPW_CMD_WEP_KEY)
2222 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2223 else
2224 #endif
2225 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2226
2227 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2228 if (rc) {
2229 priv->status &= ~STATUS_HCMD_ACTIVE;
2230 IPW_ERROR("Failed to send %s: Reason %d\n",
2231 get_cmd_string(cmd->cmd), rc);
2232 spin_unlock_irqrestore(&priv->lock, flags);
2233 goto exit;
2234 }
2235 spin_unlock_irqrestore(&priv->lock, flags);
2236
2237 now = jiffies;
2238 end = now + HOST_COMPLETE_TIMEOUT;
2239 again:
2240 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2241 !(priv->
2242 status & STATUS_HCMD_ACTIVE),
2243 end - now);
2244 if (rc < 0) {
2245 now = jiffies;
2246 if (time_before(now, end))
2247 goto again;
2248 rc = 0;
2249 }
2250
2251 if (rc == 0) {
2252 spin_lock_irqsave(&priv->lock, flags);
2253 if (priv->status & STATUS_HCMD_ACTIVE) {
2254 IPW_ERROR("Failed to send %s: Command timed out.\n",
2255 get_cmd_string(cmd->cmd));
2256 priv->status &= ~STATUS_HCMD_ACTIVE;
2257 spin_unlock_irqrestore(&priv->lock, flags);
2258 rc = -EIO;
2259 goto exit;
2260 }
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262 } else
2263 rc = 0;
2264
2265 if (priv->status & STATUS_RF_KILL_HW) {
2266 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2267 get_cmd_string(cmd->cmd));
2268 rc = -EIO;
2269 goto exit;
2270 }
2271
2272 exit:
2273 if (priv->cmdlog) {
2274 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2275 priv->cmdlog_pos %= priv->cmdlog_len;
2276 }
2277 return rc;
2278 }
2279
ipw_send_cmd_simple(struct ipw_priv * priv,u8 command)2280 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2281 {
2282 struct host_cmd cmd = {
2283 .cmd = command,
2284 };
2285
2286 return __ipw_send_cmd(priv, &cmd);
2287 }
2288
ipw_send_cmd_pdu(struct ipw_priv * priv,u8 command,u8 len,void * data)2289 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2290 void *data)
2291 {
2292 struct host_cmd cmd = {
2293 .cmd = command,
2294 .len = len,
2295 .param = data,
2296 };
2297
2298 return __ipw_send_cmd(priv, &cmd);
2299 }
2300
ipw_send_host_complete(struct ipw_priv * priv)2301 static int ipw_send_host_complete(struct ipw_priv *priv)
2302 {
2303 if (!priv) {
2304 IPW_ERROR("Invalid args\n");
2305 return -1;
2306 }
2307
2308 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2309 }
2310
ipw_send_system_config(struct ipw_priv * priv)2311 static int ipw_send_system_config(struct ipw_priv *priv)
2312 {
2313 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2314 sizeof(priv->sys_config),
2315 &priv->sys_config);
2316 }
2317
ipw_send_ssid(struct ipw_priv * priv,u8 * ssid,int len)2318 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2319 {
2320 if (!priv || !ssid) {
2321 IPW_ERROR("Invalid args\n");
2322 return -1;
2323 }
2324
2325 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2326 ssid);
2327 }
2328
ipw_send_adapter_address(struct ipw_priv * priv,u8 * mac)2329 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2330 {
2331 if (!priv || !mac) {
2332 IPW_ERROR("Invalid args\n");
2333 return -1;
2334 }
2335
2336 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2337 priv->net_dev->name, mac);
2338
2339 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2340 }
2341
ipw_adapter_restart(void * adapter)2342 static void ipw_adapter_restart(void *adapter)
2343 {
2344 struct ipw_priv *priv = adapter;
2345
2346 if (priv->status & STATUS_RF_KILL_MASK)
2347 return;
2348
2349 ipw_down(priv);
2350
2351 if (priv->assoc_network &&
2352 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2353 ipw_remove_current_network(priv);
2354
2355 if (ipw_up(priv)) {
2356 IPW_ERROR("Failed to up device\n");
2357 return;
2358 }
2359 }
2360
ipw_bg_adapter_restart(struct work_struct * work)2361 static void ipw_bg_adapter_restart(struct work_struct *work)
2362 {
2363 struct ipw_priv *priv =
2364 container_of(work, struct ipw_priv, adapter_restart);
2365 mutex_lock(&priv->mutex);
2366 ipw_adapter_restart(priv);
2367 mutex_unlock(&priv->mutex);
2368 }
2369
2370 static void ipw_abort_scan(struct ipw_priv *priv);
2371
2372 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2373
ipw_scan_check(void * data)2374 static void ipw_scan_check(void *data)
2375 {
2376 struct ipw_priv *priv = data;
2377
2378 if (priv->status & STATUS_SCAN_ABORTING) {
2379 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2380 "adapter after (%dms).\n",
2381 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2382 schedule_work(&priv->adapter_restart);
2383 } else if (priv->status & STATUS_SCANNING) {
2384 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2385 "after (%dms).\n",
2386 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2387 ipw_abort_scan(priv);
2388 schedule_delayed_work(&priv->scan_check, HZ);
2389 }
2390 }
2391
ipw_bg_scan_check(struct work_struct * work)2392 static void ipw_bg_scan_check(struct work_struct *work)
2393 {
2394 struct ipw_priv *priv =
2395 container_of(work, struct ipw_priv, scan_check.work);
2396 mutex_lock(&priv->mutex);
2397 ipw_scan_check(priv);
2398 mutex_unlock(&priv->mutex);
2399 }
2400
ipw_send_scan_request_ext(struct ipw_priv * priv,struct ipw_scan_request_ext * request)2401 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2402 struct ipw_scan_request_ext *request)
2403 {
2404 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2405 sizeof(*request), request);
2406 }
2407
ipw_send_scan_abort(struct ipw_priv * priv)2408 static int ipw_send_scan_abort(struct ipw_priv *priv)
2409 {
2410 if (!priv) {
2411 IPW_ERROR("Invalid args\n");
2412 return -1;
2413 }
2414
2415 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2416 }
2417
ipw_set_sensitivity(struct ipw_priv * priv,u16 sens)2418 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2419 {
2420 struct ipw_sensitivity_calib calib = {
2421 .beacon_rssi_raw = cpu_to_le16(sens),
2422 };
2423
2424 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2425 &calib);
2426 }
2427
ipw_send_associate(struct ipw_priv * priv,struct ipw_associate * associate)2428 static int ipw_send_associate(struct ipw_priv *priv,
2429 struct ipw_associate *associate)
2430 {
2431 if (!priv || !associate) {
2432 IPW_ERROR("Invalid args\n");
2433 return -1;
2434 }
2435
2436 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2437 associate);
2438 }
2439
ipw_send_supported_rates(struct ipw_priv * priv,struct ipw_supported_rates * rates)2440 static int ipw_send_supported_rates(struct ipw_priv *priv,
2441 struct ipw_supported_rates *rates)
2442 {
2443 if (!priv || !rates) {
2444 IPW_ERROR("Invalid args\n");
2445 return -1;
2446 }
2447
2448 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2449 rates);
2450 }
2451
ipw_set_random_seed(struct ipw_priv * priv)2452 static int ipw_set_random_seed(struct ipw_priv *priv)
2453 {
2454 u32 val;
2455
2456 if (!priv) {
2457 IPW_ERROR("Invalid args\n");
2458 return -1;
2459 }
2460
2461 get_random_bytes(&val, sizeof(val));
2462
2463 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2464 }
2465
ipw_send_card_disable(struct ipw_priv * priv,u32 phy_off)2466 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2467 {
2468 __le32 v = cpu_to_le32(phy_off);
2469 if (!priv) {
2470 IPW_ERROR("Invalid args\n");
2471 return -1;
2472 }
2473
2474 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2475 }
2476
ipw_send_tx_power(struct ipw_priv * priv,struct ipw_tx_power * power)2477 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2478 {
2479 if (!priv || !power) {
2480 IPW_ERROR("Invalid args\n");
2481 return -1;
2482 }
2483
2484 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2485 }
2486
ipw_set_tx_power(struct ipw_priv * priv)2487 static int ipw_set_tx_power(struct ipw_priv *priv)
2488 {
2489 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2490 struct ipw_tx_power tx_power;
2491 s8 max_power;
2492 int i;
2493
2494 memset(&tx_power, 0, sizeof(tx_power));
2495
2496 /* configure device for 'G' band */
2497 tx_power.ieee_mode = IPW_G_MODE;
2498 tx_power.num_channels = geo->bg_channels;
2499 for (i = 0; i < geo->bg_channels; i++) {
2500 max_power = geo->bg[i].max_power;
2501 tx_power.channels_tx_power[i].channel_number =
2502 geo->bg[i].channel;
2503 tx_power.channels_tx_power[i].tx_power = max_power ?
2504 min(max_power, priv->tx_power) : priv->tx_power;
2505 }
2506 if (ipw_send_tx_power(priv, &tx_power))
2507 return -EIO;
2508
2509 /* configure device to also handle 'B' band */
2510 tx_power.ieee_mode = IPW_B_MODE;
2511 if (ipw_send_tx_power(priv, &tx_power))
2512 return -EIO;
2513
2514 /* configure device to also handle 'A' band */
2515 if (priv->ieee->abg_true) {
2516 tx_power.ieee_mode = IPW_A_MODE;
2517 tx_power.num_channels = geo->a_channels;
2518 for (i = 0; i < tx_power.num_channels; i++) {
2519 max_power = geo->a[i].max_power;
2520 tx_power.channels_tx_power[i].channel_number =
2521 geo->a[i].channel;
2522 tx_power.channels_tx_power[i].tx_power = max_power ?
2523 min(max_power, priv->tx_power) : priv->tx_power;
2524 }
2525 if (ipw_send_tx_power(priv, &tx_power))
2526 return -EIO;
2527 }
2528 return 0;
2529 }
2530
ipw_send_rts_threshold(struct ipw_priv * priv,u16 rts)2531 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2532 {
2533 struct ipw_rts_threshold rts_threshold = {
2534 .rts_threshold = cpu_to_le16(rts),
2535 };
2536
2537 if (!priv) {
2538 IPW_ERROR("Invalid args\n");
2539 return -1;
2540 }
2541
2542 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2543 sizeof(rts_threshold), &rts_threshold);
2544 }
2545
ipw_send_frag_threshold(struct ipw_priv * priv,u16 frag)2546 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2547 {
2548 struct ipw_frag_threshold frag_threshold = {
2549 .frag_threshold = cpu_to_le16(frag),
2550 };
2551
2552 if (!priv) {
2553 IPW_ERROR("Invalid args\n");
2554 return -1;
2555 }
2556
2557 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2558 sizeof(frag_threshold), &frag_threshold);
2559 }
2560
ipw_send_power_mode(struct ipw_priv * priv,u32 mode)2561 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2562 {
2563 __le32 param;
2564
2565 if (!priv) {
2566 IPW_ERROR("Invalid args\n");
2567 return -1;
2568 }
2569
2570 /* If on battery, set to 3, if AC set to CAM, else user
2571 * level */
2572 switch (mode) {
2573 case IPW_POWER_BATTERY:
2574 param = cpu_to_le32(IPW_POWER_INDEX_3);
2575 break;
2576 case IPW_POWER_AC:
2577 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2578 break;
2579 default:
2580 param = cpu_to_le32(mode);
2581 break;
2582 }
2583
2584 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2585 ¶m);
2586 }
2587
ipw_send_retry_limit(struct ipw_priv * priv,u8 slimit,u8 llimit)2588 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2589 {
2590 struct ipw_retry_limit retry_limit = {
2591 .short_retry_limit = slimit,
2592 .long_retry_limit = llimit
2593 };
2594
2595 if (!priv) {
2596 IPW_ERROR("Invalid args\n");
2597 return -1;
2598 }
2599
2600 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2601 &retry_limit);
2602 }
2603
2604 /*
2605 * The IPW device contains a Microwire compatible EEPROM that stores
2606 * various data like the MAC address. Usually the firmware has exclusive
2607 * access to the eeprom, but during device initialization (before the
2608 * device driver has sent the HostComplete command to the firmware) the
2609 * device driver has read access to the EEPROM by way of indirect addressing
2610 * through a couple of memory mapped registers.
2611 *
2612 * The following is a simplified implementation for pulling data out of the
2613 * the eeprom, along with some helper functions to find information in
2614 * the per device private data's copy of the eeprom.
2615 *
2616 * NOTE: To better understand how these functions work (i.e what is a chip
2617 * select and why do have to keep driving the eeprom clock?), read
2618 * just about any data sheet for a Microwire compatible EEPROM.
2619 */
2620
2621 /* write a 32 bit value into the indirect accessor register */
eeprom_write_reg(struct ipw_priv * p,u32 data)2622 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2623 {
2624 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2625
2626 /* the eeprom requires some time to complete the operation */
2627 udelay(p->eeprom_delay);
2628 }
2629
2630 /* perform a chip select operation */
eeprom_cs(struct ipw_priv * priv)2631 static void eeprom_cs(struct ipw_priv *priv)
2632 {
2633 eeprom_write_reg(priv, 0);
2634 eeprom_write_reg(priv, EEPROM_BIT_CS);
2635 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636 eeprom_write_reg(priv, EEPROM_BIT_CS);
2637 }
2638
2639 /* perform a chip select operation */
eeprom_disable_cs(struct ipw_priv * priv)2640 static void eeprom_disable_cs(struct ipw_priv *priv)
2641 {
2642 eeprom_write_reg(priv, EEPROM_BIT_CS);
2643 eeprom_write_reg(priv, 0);
2644 eeprom_write_reg(priv, EEPROM_BIT_SK);
2645 }
2646
2647 /* push a single bit down to the eeprom */
eeprom_write_bit(struct ipw_priv * p,u8 bit)2648 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2649 {
2650 int d = (bit ? EEPROM_BIT_DI : 0);
2651 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2652 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2653 }
2654
2655 /* push an opcode followed by an address down to the eeprom */
eeprom_op(struct ipw_priv * priv,u8 op,u8 addr)2656 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2657 {
2658 int i;
2659
2660 eeprom_cs(priv);
2661 eeprom_write_bit(priv, 1);
2662 eeprom_write_bit(priv, op & 2);
2663 eeprom_write_bit(priv, op & 1);
2664 for (i = 7; i >= 0; i--) {
2665 eeprom_write_bit(priv, addr & (1 << i));
2666 }
2667 }
2668
2669 /* pull 16 bits off the eeprom, one bit at a time */
eeprom_read_u16(struct ipw_priv * priv,u8 addr)2670 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2671 {
2672 int i;
2673 u16 r = 0;
2674
2675 /* Send READ Opcode */
2676 eeprom_op(priv, EEPROM_CMD_READ, addr);
2677
2678 /* Send dummy bit */
2679 eeprom_write_reg(priv, EEPROM_BIT_CS);
2680
2681 /* Read the byte off the eeprom one bit at a time */
2682 for (i = 0; i < 16; i++) {
2683 u32 data = 0;
2684 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2685 eeprom_write_reg(priv, EEPROM_BIT_CS);
2686 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2687 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2688 }
2689
2690 /* Send another dummy bit */
2691 eeprom_write_reg(priv, 0);
2692 eeprom_disable_cs(priv);
2693
2694 return r;
2695 }
2696
2697 /* helper function for pulling the mac address out of the private */
2698 /* data's copy of the eeprom data */
eeprom_parse_mac(struct ipw_priv * priv,u8 * mac)2699 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700 {
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2702 }
2703
2704 /*
2705 * Either the device driver (i.e. the host) or the firmware can
2706 * load eeprom data into the designated region in SRAM. If neither
2707 * happens then the FW will shutdown with a fatal error.
2708 *
2709 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2710 * bit needs region of shared SRAM needs to be non-zero.
2711 */
ipw_eeprom_init_sram(struct ipw_priv * priv)2712 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2713 {
2714 int i;
2715 __le16 *eeprom = (__le16 *) priv->eeprom;
2716
2717 IPW_DEBUG_TRACE(">>\n");
2718
2719 /* read entire contents of eeprom into private buffer */
2720 for (i = 0; i < 128; i++)
2721 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2722
2723 /*
2724 If the data looks correct, then copy it to our private
2725 copy. Otherwise let the firmware know to perform the operation
2726 on its own.
2727 */
2728 if (priv->eeprom[EEPROM_VERSION] != 0) {
2729 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2730
2731 /* write the eeprom data to sram */
2732 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2733 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2734
2735 /* Do not load eeprom data on fatal error or suspend */
2736 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2737 } else {
2738 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2739
2740 /* Load eeprom data on fatal error or suspend */
2741 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2742 }
2743
2744 IPW_DEBUG_TRACE("<<\n");
2745 }
2746
ipw_zero_memory(struct ipw_priv * priv,u32 start,u32 count)2747 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2748 {
2749 count >>= 2;
2750 if (!count)
2751 return;
2752 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2753 while (count--)
2754 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2755 }
2756
ipw_fw_dma_reset_command_blocks(struct ipw_priv * priv)2757 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2758 {
2759 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2760 CB_NUMBER_OF_ELEMENTS_SMALL *
2761 sizeof(struct command_block));
2762 }
2763
ipw_fw_dma_enable(struct ipw_priv * priv)2764 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2765 { /* start dma engine but no transfers yet */
2766
2767 IPW_DEBUG_FW(">> :\n");
2768
2769 /* Start the dma */
2770 ipw_fw_dma_reset_command_blocks(priv);
2771
2772 /* Write CB base address */
2773 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2774
2775 IPW_DEBUG_FW("<< :\n");
2776 return 0;
2777 }
2778
ipw_fw_dma_abort(struct ipw_priv * priv)2779 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2780 {
2781 u32 control = 0;
2782
2783 IPW_DEBUG_FW(">> :\n");
2784
2785 /* set the Stop and Abort bit */
2786 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2787 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2788 priv->sram_desc.last_cb_index = 0;
2789
2790 IPW_DEBUG_FW("<<\n");
2791 }
2792
ipw_fw_dma_write_command_block(struct ipw_priv * priv,int index,struct command_block * cb)2793 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2794 struct command_block *cb)
2795 {
2796 u32 address =
2797 IPW_SHARED_SRAM_DMA_CONTROL +
2798 (sizeof(struct command_block) * index);
2799 IPW_DEBUG_FW(">> :\n");
2800
2801 ipw_write_indirect(priv, address, (u8 *) cb,
2802 (int)sizeof(struct command_block));
2803
2804 IPW_DEBUG_FW("<< :\n");
2805 return 0;
2806
2807 }
2808
ipw_fw_dma_kick(struct ipw_priv * priv)2809 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2810 {
2811 u32 control = 0;
2812 u32 index = 0;
2813
2814 IPW_DEBUG_FW(">> :\n");
2815
2816 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2817 ipw_fw_dma_write_command_block(priv, index,
2818 &priv->sram_desc.cb_list[index]);
2819
2820 /* Enable the DMA in the CSR register */
2821 ipw_clear_bit(priv, IPW_RESET_REG,
2822 IPW_RESET_REG_MASTER_DISABLED |
2823 IPW_RESET_REG_STOP_MASTER);
2824
2825 /* Set the Start bit. */
2826 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2827 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2828
2829 IPW_DEBUG_FW("<< :\n");
2830 return 0;
2831 }
2832
ipw_fw_dma_dump_command_block(struct ipw_priv * priv)2833 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2834 {
2835 u32 address;
2836 u32 register_value = 0;
2837 u32 cb_fields_address = 0;
2838
2839 IPW_DEBUG_FW(">> :\n");
2840 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2841 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2842
2843 /* Read the DMA Controlor register */
2844 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2845 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2846
2847 /* Print the CB values */
2848 cb_fields_address = address;
2849 register_value = ipw_read_reg32(priv, cb_fields_address);
2850 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2851
2852 cb_fields_address += sizeof(u32);
2853 register_value = ipw_read_reg32(priv, cb_fields_address);
2854 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2855
2856 cb_fields_address += sizeof(u32);
2857 register_value = ipw_read_reg32(priv, cb_fields_address);
2858 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2859 register_value);
2860
2861 cb_fields_address += sizeof(u32);
2862 register_value = ipw_read_reg32(priv, cb_fields_address);
2863 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2864
2865 IPW_DEBUG_FW(">> :\n");
2866 }
2867
ipw_fw_dma_command_block_index(struct ipw_priv * priv)2868 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2869 {
2870 u32 current_cb_address = 0;
2871 u32 current_cb_index = 0;
2872
2873 IPW_DEBUG_FW("<< :\n");
2874 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2875
2876 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2877 sizeof(struct command_block);
2878
2879 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2880 current_cb_index, current_cb_address);
2881
2882 IPW_DEBUG_FW(">> :\n");
2883 return current_cb_index;
2884
2885 }
2886
ipw_fw_dma_add_command_block(struct ipw_priv * priv,u32 src_address,u32 dest_address,u32 length,int interrupt_enabled,int is_last)2887 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2888 u32 src_address,
2889 u32 dest_address,
2890 u32 length,
2891 int interrupt_enabled, int is_last)
2892 {
2893
2894 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2895 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2896 CB_DEST_SIZE_LONG;
2897 struct command_block *cb;
2898 u32 last_cb_element = 0;
2899
2900 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2901 src_address, dest_address, length);
2902
2903 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2904 return -1;
2905
2906 last_cb_element = priv->sram_desc.last_cb_index;
2907 cb = &priv->sram_desc.cb_list[last_cb_element];
2908 priv->sram_desc.last_cb_index++;
2909
2910 /* Calculate the new CB control word */
2911 if (interrupt_enabled)
2912 control |= CB_INT_ENABLED;
2913
2914 if (is_last)
2915 control |= CB_LAST_VALID;
2916
2917 control |= length;
2918
2919 /* Calculate the CB Element's checksum value */
2920 cb->status = control ^ src_address ^ dest_address;
2921
2922 /* Copy the Source and Destination addresses */
2923 cb->dest_addr = dest_address;
2924 cb->source_addr = src_address;
2925
2926 /* Copy the Control Word last */
2927 cb->control = control;
2928
2929 return 0;
2930 }
2931
ipw_fw_dma_add_buffer(struct ipw_priv * priv,dma_addr_t * src_address,int nr,u32 dest_address,u32 len)2932 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2933 int nr, u32 dest_address, u32 len)
2934 {
2935 int ret, i;
2936 u32 size;
2937
2938 IPW_DEBUG_FW(">>\n");
2939 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2940 nr, dest_address, len);
2941
2942 for (i = 0; i < nr; i++) {
2943 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2944 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2945 dest_address +
2946 i * CB_MAX_LENGTH, size,
2947 0, 0);
2948 if (ret) {
2949 IPW_DEBUG_FW_INFO(": Failed\n");
2950 return -1;
2951 } else
2952 IPW_DEBUG_FW_INFO(": Added new cb\n");
2953 }
2954
2955 IPW_DEBUG_FW("<<\n");
2956 return 0;
2957 }
2958
ipw_fw_dma_wait(struct ipw_priv * priv)2959 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2960 {
2961 u32 current_index = 0, previous_index;
2962 u32 watchdog = 0;
2963
2964 IPW_DEBUG_FW(">> :\n");
2965
2966 current_index = ipw_fw_dma_command_block_index(priv);
2967 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2968 (int)priv->sram_desc.last_cb_index);
2969
2970 while (current_index < priv->sram_desc.last_cb_index) {
2971 udelay(50);
2972 previous_index = current_index;
2973 current_index = ipw_fw_dma_command_block_index(priv);
2974
2975 if (previous_index < current_index) {
2976 watchdog = 0;
2977 continue;
2978 }
2979 if (++watchdog > 400) {
2980 IPW_DEBUG_FW_INFO("Timeout\n");
2981 ipw_fw_dma_dump_command_block(priv);
2982 ipw_fw_dma_abort(priv);
2983 return -1;
2984 }
2985 }
2986
2987 ipw_fw_dma_abort(priv);
2988
2989 /*Disable the DMA in the CSR register */
2990 ipw_set_bit(priv, IPW_RESET_REG,
2991 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2992
2993 IPW_DEBUG_FW("<< dmaWaitSync\n");
2994 return 0;
2995 }
2996
ipw_remove_current_network(struct ipw_priv * priv)2997 static void ipw_remove_current_network(struct ipw_priv *priv)
2998 {
2999 struct list_head *element, *safe;
3000 struct libipw_network *network = NULL;
3001 unsigned long flags;
3002
3003 spin_lock_irqsave(&priv->ieee->lock, flags);
3004 list_for_each_safe(element, safe, &priv->ieee->network_list) {
3005 network = list_entry(element, struct libipw_network, list);
3006 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
3007 list_del(element);
3008 list_add_tail(&network->list,
3009 &priv->ieee->network_free_list);
3010 }
3011 }
3012 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3013 }
3014
3015 /**
3016 * Check that card is still alive.
3017 * Reads debug register from domain0.
3018 * If card is present, pre-defined value should
3019 * be found there.
3020 *
3021 * @param priv
3022 * @return 1 if card is present, 0 otherwise
3023 */
ipw_alive(struct ipw_priv * priv)3024 static inline int ipw_alive(struct ipw_priv *priv)
3025 {
3026 return ipw_read32(priv, 0x90) == 0xd55555d5;
3027 }
3028
3029 /* timeout in msec, attempted in 10-msec quanta */
ipw_poll_bit(struct ipw_priv * priv,u32 addr,u32 mask,int timeout)3030 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3031 int timeout)
3032 {
3033 int i = 0;
3034
3035 do {
3036 if ((ipw_read32(priv, addr) & mask) == mask)
3037 return i;
3038 mdelay(10);
3039 i += 10;
3040 } while (i < timeout);
3041
3042 return -ETIME;
3043 }
3044
3045 /* These functions load the firmware and micro code for the operation of
3046 * the ipw hardware. It assumes the buffer has all the bits for the
3047 * image and the caller is handling the memory allocation and clean up.
3048 */
3049
ipw_stop_master(struct ipw_priv * priv)3050 static int ipw_stop_master(struct ipw_priv *priv)
3051 {
3052 int rc;
3053
3054 IPW_DEBUG_TRACE(">>\n");
3055 /* stop master. typical delay - 0 */
3056 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3057
3058 /* timeout is in msec, polled in 10-msec quanta */
3059 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3060 IPW_RESET_REG_MASTER_DISABLED, 100);
3061 if (rc < 0) {
3062 IPW_ERROR("wait for stop master failed after 100ms\n");
3063 return -1;
3064 }
3065
3066 IPW_DEBUG_INFO("stop master %dms\n", rc);
3067
3068 return rc;
3069 }
3070
ipw_arc_release(struct ipw_priv * priv)3071 static void ipw_arc_release(struct ipw_priv *priv)
3072 {
3073 IPW_DEBUG_TRACE(">>\n");
3074 mdelay(5);
3075
3076 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3077
3078 /* no one knows timing, for safety add some delay */
3079 mdelay(5);
3080 }
3081
3082 struct fw_chunk {
3083 __le32 address;
3084 __le32 length;
3085 };
3086
ipw_load_ucode(struct ipw_priv * priv,u8 * data,size_t len)3087 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3088 {
3089 int rc = 0, i, addr;
3090 u8 cr = 0;
3091 __le16 *image;
3092
3093 image = (__le16 *) data;
3094
3095 IPW_DEBUG_TRACE(">>\n");
3096
3097 rc = ipw_stop_master(priv);
3098
3099 if (rc < 0)
3100 return rc;
3101
3102 for (addr = IPW_SHARED_LOWER_BOUND;
3103 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3104 ipw_write32(priv, addr, 0);
3105 }
3106
3107 /* no ucode (yet) */
3108 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3109 /* destroy DMA queues */
3110 /* reset sequence */
3111
3112 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3113 ipw_arc_release(priv);
3114 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3115 mdelay(1);
3116
3117 /* reset PHY */
3118 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3119 mdelay(1);
3120
3121 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3122 mdelay(1);
3123
3124 /* enable ucode store */
3125 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3126 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3127 mdelay(1);
3128
3129 /* write ucode */
3130 /**
3131 * @bug
3132 * Do NOT set indirect address register once and then
3133 * store data to indirect data register in the loop.
3134 * It seems very reasonable, but in this case DINO do not
3135 * accept ucode. It is essential to set address each time.
3136 */
3137 /* load new ipw uCode */
3138 for (i = 0; i < len / 2; i++)
3139 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3140 le16_to_cpu(image[i]));
3141
3142 /* enable DINO */
3143 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3144 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3145
3146 /* this is where the igx / win driver deveates from the VAP driver. */
3147
3148 /* wait for alive response */
3149 for (i = 0; i < 100; i++) {
3150 /* poll for incoming data */
3151 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3152 if (cr & DINO_RXFIFO_DATA)
3153 break;
3154 mdelay(1);
3155 }
3156
3157 if (cr & DINO_RXFIFO_DATA) {
3158 /* alive_command_responce size is NOT multiple of 4 */
3159 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3160
3161 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3162 response_buffer[i] =
3163 cpu_to_le32(ipw_read_reg32(priv,
3164 IPW_BASEBAND_RX_FIFO_READ));
3165 memcpy(&priv->dino_alive, response_buffer,
3166 sizeof(priv->dino_alive));
3167 if (priv->dino_alive.alive_command == 1
3168 && priv->dino_alive.ucode_valid == 1) {
3169 rc = 0;
3170 IPW_DEBUG_INFO
3171 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3172 "of %02d/%02d/%02d %02d:%02d\n",
3173 priv->dino_alive.software_revision,
3174 priv->dino_alive.software_revision,
3175 priv->dino_alive.device_identifier,
3176 priv->dino_alive.device_identifier,
3177 priv->dino_alive.time_stamp[0],
3178 priv->dino_alive.time_stamp[1],
3179 priv->dino_alive.time_stamp[2],
3180 priv->dino_alive.time_stamp[3],
3181 priv->dino_alive.time_stamp[4]);
3182 } else {
3183 IPW_DEBUG_INFO("Microcode is not alive\n");
3184 rc = -EINVAL;
3185 }
3186 } else {
3187 IPW_DEBUG_INFO("No alive response from DINO\n");
3188 rc = -ETIME;
3189 }
3190
3191 /* disable DINO, otherwise for some reason
3192 firmware have problem getting alive resp. */
3193 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3194
3195 return rc;
3196 }
3197
ipw_load_firmware(struct ipw_priv * priv,u8 * data,size_t len)3198 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3199 {
3200 int ret = -1;
3201 int offset = 0;
3202 struct fw_chunk *chunk;
3203 int total_nr = 0;
3204 int i;
3205 struct pci_pool *pool;
3206 void **virts;
3207 dma_addr_t *phys;
3208
3209 IPW_DEBUG_TRACE("<< :\n");
3210
3211 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3212 GFP_KERNEL);
3213 if (!virts)
3214 return -ENOMEM;
3215
3216 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3217 GFP_KERNEL);
3218 if (!phys) {
3219 kfree(virts);
3220 return -ENOMEM;
3221 }
3222 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3223 if (!pool) {
3224 IPW_ERROR("pci_pool_create failed\n");
3225 kfree(phys);
3226 kfree(virts);
3227 return -ENOMEM;
3228 }
3229
3230 /* Start the Dma */
3231 ret = ipw_fw_dma_enable(priv);
3232
3233 /* the DMA is already ready this would be a bug. */
3234 BUG_ON(priv->sram_desc.last_cb_index > 0);
3235
3236 do {
3237 u32 chunk_len;
3238 u8 *start;
3239 int size;
3240 int nr = 0;
3241
3242 chunk = (struct fw_chunk *)(data + offset);
3243 offset += sizeof(struct fw_chunk);
3244 chunk_len = le32_to_cpu(chunk->length);
3245 start = data + offset;
3246
3247 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3248 for (i = 0; i < nr; i++) {
3249 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3250 &phys[total_nr]);
3251 if (!virts[total_nr]) {
3252 ret = -ENOMEM;
3253 goto out;
3254 }
3255 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3256 CB_MAX_LENGTH);
3257 memcpy(virts[total_nr], start, size);
3258 start += size;
3259 total_nr++;
3260 /* We don't support fw chunk larger than 64*8K */
3261 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3262 }
3263
3264 /* build DMA packet and queue up for sending */
3265 /* dma to chunk->address, the chunk->length bytes from data +
3266 * offeset*/
3267 /* Dma loading */
3268 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3269 nr, le32_to_cpu(chunk->address),
3270 chunk_len);
3271 if (ret) {
3272 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3273 goto out;
3274 }
3275
3276 offset += chunk_len;
3277 } while (offset < len);
3278
3279 /* Run the DMA and wait for the answer */
3280 ret = ipw_fw_dma_kick(priv);
3281 if (ret) {
3282 IPW_ERROR("dmaKick Failed\n");
3283 goto out;
3284 }
3285
3286 ret = ipw_fw_dma_wait(priv);
3287 if (ret) {
3288 IPW_ERROR("dmaWaitSync Failed\n");
3289 goto out;
3290 }
3291 out:
3292 for (i = 0; i < total_nr; i++)
3293 pci_pool_free(pool, virts[i], phys[i]);
3294
3295 pci_pool_destroy(pool);
3296 kfree(phys);
3297 kfree(virts);
3298
3299 return ret;
3300 }
3301
3302 /* stop nic */
ipw_stop_nic(struct ipw_priv * priv)3303 static int ipw_stop_nic(struct ipw_priv *priv)
3304 {
3305 int rc = 0;
3306
3307 /* stop */
3308 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3309
3310 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3311 IPW_RESET_REG_MASTER_DISABLED, 500);
3312 if (rc < 0) {
3313 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3314 return rc;
3315 }
3316
3317 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3318
3319 return rc;
3320 }
3321
ipw_start_nic(struct ipw_priv * priv)3322 static void ipw_start_nic(struct ipw_priv *priv)
3323 {
3324 IPW_DEBUG_TRACE(">>\n");
3325
3326 /* prvHwStartNic release ARC */
3327 ipw_clear_bit(priv, IPW_RESET_REG,
3328 IPW_RESET_REG_MASTER_DISABLED |
3329 IPW_RESET_REG_STOP_MASTER |
3330 CBD_RESET_REG_PRINCETON_RESET);
3331
3332 /* enable power management */
3333 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3334 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3335
3336 IPW_DEBUG_TRACE("<<\n");
3337 }
3338
ipw_init_nic(struct ipw_priv * priv)3339 static int ipw_init_nic(struct ipw_priv *priv)
3340 {
3341 int rc;
3342
3343 IPW_DEBUG_TRACE(">>\n");
3344 /* reset */
3345 /*prvHwInitNic */
3346 /* set "initialization complete" bit to move adapter to D0 state */
3347 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3348
3349 /* low-level PLL activation */
3350 ipw_write32(priv, IPW_READ_INT_REGISTER,
3351 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3352
3353 /* wait for clock stabilization */
3354 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3355 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3356 if (rc < 0)
3357 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3358
3359 /* assert SW reset */
3360 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3361
3362 udelay(10);
3363
3364 /* set "initialization complete" bit to move adapter to D0 state */
3365 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3366
3367 IPW_DEBUG_TRACE(">>\n");
3368 return 0;
3369 }
3370
3371 /* Call this function from process context, it will sleep in request_firmware.
3372 * Probe is an ok place to call this from.
3373 */
ipw_reset_nic(struct ipw_priv * priv)3374 static int ipw_reset_nic(struct ipw_priv *priv)
3375 {
3376 int rc = 0;
3377 unsigned long flags;
3378
3379 IPW_DEBUG_TRACE(">>\n");
3380
3381 rc = ipw_init_nic(priv);
3382
3383 spin_lock_irqsave(&priv->lock, flags);
3384 /* Clear the 'host command active' bit... */
3385 priv->status &= ~STATUS_HCMD_ACTIVE;
3386 wake_up_interruptible(&priv->wait_command_queue);
3387 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3388 wake_up_interruptible(&priv->wait_state);
3389 spin_unlock_irqrestore(&priv->lock, flags);
3390
3391 IPW_DEBUG_TRACE("<<\n");
3392 return rc;
3393 }
3394
3395
3396 struct ipw_fw {
3397 __le32 ver;
3398 __le32 boot_size;
3399 __le32 ucode_size;
3400 __le32 fw_size;
3401 u8 data[0];
3402 };
3403
ipw_get_fw(struct ipw_priv * priv,const struct firmware ** raw,const char * name)3404 static int ipw_get_fw(struct ipw_priv *priv,
3405 const struct firmware **raw, const char *name)
3406 {
3407 struct ipw_fw *fw;
3408 int rc;
3409
3410 /* ask firmware_class module to get the boot firmware off disk */
3411 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3412 if (rc < 0) {
3413 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3414 return rc;
3415 }
3416
3417 if ((*raw)->size < sizeof(*fw)) {
3418 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3419 return -EINVAL;
3420 }
3421
3422 fw = (void *)(*raw)->data;
3423
3424 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3425 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3426 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3427 name, (*raw)->size);
3428 return -EINVAL;
3429 }
3430
3431 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3432 name,
3433 le32_to_cpu(fw->ver) >> 16,
3434 le32_to_cpu(fw->ver) & 0xff,
3435 (*raw)->size - sizeof(*fw));
3436 return 0;
3437 }
3438
3439 #define IPW_RX_BUF_SIZE (3000)
3440
ipw_rx_queue_reset(struct ipw_priv * priv,struct ipw_rx_queue * rxq)3441 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3442 struct ipw_rx_queue *rxq)
3443 {
3444 unsigned long flags;
3445 int i;
3446
3447 spin_lock_irqsave(&rxq->lock, flags);
3448
3449 INIT_LIST_HEAD(&rxq->rx_free);
3450 INIT_LIST_HEAD(&rxq->rx_used);
3451
3452 /* Fill the rx_used queue with _all_ of the Rx buffers */
3453 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3454 /* In the reset function, these buffers may have been allocated
3455 * to an SKB, so we need to unmap and free potential storage */
3456 if (rxq->pool[i].skb != NULL) {
3457 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3458 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3459 dev_kfree_skb(rxq->pool[i].skb);
3460 rxq->pool[i].skb = NULL;
3461 }
3462 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3463 }
3464
3465 /* Set us so that we have processed and used all buffers, but have
3466 * not restocked the Rx queue with fresh buffers */
3467 rxq->read = rxq->write = 0;
3468 rxq->free_count = 0;
3469 spin_unlock_irqrestore(&rxq->lock, flags);
3470 }
3471
3472 #ifdef CONFIG_PM
3473 static int fw_loaded = 0;
3474 static const struct firmware *raw = NULL;
3475
free_firmware(void)3476 static void free_firmware(void)
3477 {
3478 if (fw_loaded) {
3479 release_firmware(raw);
3480 raw = NULL;
3481 fw_loaded = 0;
3482 }
3483 }
3484 #else
3485 #define free_firmware() do {} while (0)
3486 #endif
3487
ipw_load(struct ipw_priv * priv)3488 static int ipw_load(struct ipw_priv *priv)
3489 {
3490 #ifndef CONFIG_PM
3491 const struct firmware *raw = NULL;
3492 #endif
3493 struct ipw_fw *fw;
3494 u8 *boot_img, *ucode_img, *fw_img;
3495 u8 *name = NULL;
3496 int rc = 0, retries = 3;
3497
3498 switch (priv->ieee->iw_mode) {
3499 case IW_MODE_ADHOC:
3500 name = "ipw2200-ibss.fw";
3501 break;
3502 #ifdef CONFIG_IPW2200_MONITOR
3503 case IW_MODE_MONITOR:
3504 name = "ipw2200-sniffer.fw";
3505 break;
3506 #endif
3507 case IW_MODE_INFRA:
3508 name = "ipw2200-bss.fw";
3509 break;
3510 }
3511
3512 if (!name) {
3513 rc = -EINVAL;
3514 goto error;
3515 }
3516
3517 #ifdef CONFIG_PM
3518 if (!fw_loaded) {
3519 #endif
3520 rc = ipw_get_fw(priv, &raw, name);
3521 if (rc < 0)
3522 goto error;
3523 #ifdef CONFIG_PM
3524 }
3525 #endif
3526
3527 fw = (void *)raw->data;
3528 boot_img = &fw->data[0];
3529 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3530 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3531 le32_to_cpu(fw->ucode_size)];
3532
3533 if (rc < 0)
3534 goto error;
3535
3536 if (!priv->rxq)
3537 priv->rxq = ipw_rx_queue_alloc(priv);
3538 else
3539 ipw_rx_queue_reset(priv, priv->rxq);
3540 if (!priv->rxq) {
3541 IPW_ERROR("Unable to initialize Rx queue\n");
3542 goto error;
3543 }
3544
3545 retry:
3546 /* Ensure interrupts are disabled */
3547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3548 priv->status &= ~STATUS_INT_ENABLED;
3549
3550 /* ack pending interrupts */
3551 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3552
3553 ipw_stop_nic(priv);
3554
3555 rc = ipw_reset_nic(priv);
3556 if (rc < 0) {
3557 IPW_ERROR("Unable to reset NIC\n");
3558 goto error;
3559 }
3560
3561 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3562 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3563
3564 /* DMA the initial boot firmware into the device */
3565 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3566 if (rc < 0) {
3567 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3568 goto error;
3569 }
3570
3571 /* kick start the device */
3572 ipw_start_nic(priv);
3573
3574 /* wait for the device to finish its initial startup sequence */
3575 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3576 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3577 if (rc < 0) {
3578 IPW_ERROR("device failed to boot initial fw image\n");
3579 goto error;
3580 }
3581 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3582
3583 /* ack fw init done interrupt */
3584 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3585
3586 /* DMA the ucode into the device */
3587 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3588 if (rc < 0) {
3589 IPW_ERROR("Unable to load ucode: %d\n", rc);
3590 goto error;
3591 }
3592
3593 /* stop nic */
3594 ipw_stop_nic(priv);
3595
3596 /* DMA bss firmware into the device */
3597 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3598 if (rc < 0) {
3599 IPW_ERROR("Unable to load firmware: %d\n", rc);
3600 goto error;
3601 }
3602 #ifdef CONFIG_PM
3603 fw_loaded = 1;
3604 #endif
3605
3606 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3607
3608 rc = ipw_queue_reset(priv);
3609 if (rc < 0) {
3610 IPW_ERROR("Unable to initialize queues\n");
3611 goto error;
3612 }
3613
3614 /* Ensure interrupts are disabled */
3615 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3616 /* ack pending interrupts */
3617 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3618
3619 /* kick start the device */
3620 ipw_start_nic(priv);
3621
3622 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3623 if (retries > 0) {
3624 IPW_WARNING("Parity error. Retrying init.\n");
3625 retries--;
3626 goto retry;
3627 }
3628
3629 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3630 rc = -EIO;
3631 goto error;
3632 }
3633
3634 /* wait for the device */
3635 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3636 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3637 if (rc < 0) {
3638 IPW_ERROR("device failed to start within 500ms\n");
3639 goto error;
3640 }
3641 IPW_DEBUG_INFO("device response after %dms\n", rc);
3642
3643 /* ack fw init done interrupt */
3644 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3645
3646 /* read eeprom data and initialize the eeprom region of sram */
3647 priv->eeprom_delay = 1;
3648 ipw_eeprom_init_sram(priv);
3649
3650 /* enable interrupts */
3651 ipw_enable_interrupts(priv);
3652
3653 /* Ensure our queue has valid packets */
3654 ipw_rx_queue_replenish(priv);
3655
3656 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3657
3658 /* ack pending interrupts */
3659 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3660
3661 #ifndef CONFIG_PM
3662 release_firmware(raw);
3663 #endif
3664 return 0;
3665
3666 error:
3667 if (priv->rxq) {
3668 ipw_rx_queue_free(priv, priv->rxq);
3669 priv->rxq = NULL;
3670 }
3671 ipw_tx_queue_free(priv);
3672 if (raw)
3673 release_firmware(raw);
3674 #ifdef CONFIG_PM
3675 fw_loaded = 0;
3676 raw = NULL;
3677 #endif
3678
3679 return rc;
3680 }
3681
3682 /**
3683 * DMA services
3684 *
3685 * Theory of operation
3686 *
3687 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3688 * 2 empty entries always kept in the buffer to protect from overflow.
3689 *
3690 * For Tx queue, there are low mark and high mark limits. If, after queuing
3691 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3692 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3693 * Tx queue resumed.
3694 *
3695 * The IPW operates with six queues, one receive queue in the device's
3696 * sram, one transmit queue for sending commands to the device firmware,
3697 * and four transmit queues for data.
3698 *
3699 * The four transmit queues allow for performing quality of service (qos)
3700 * transmissions as per the 802.11 protocol. Currently Linux does not
3701 * provide a mechanism to the user for utilizing prioritized queues, so
3702 * we only utilize the first data transmit queue (queue1).
3703 */
3704
3705 /**
3706 * Driver allocates buffers of this size for Rx
3707 */
3708
3709 /**
3710 * ipw_rx_queue_space - Return number of free slots available in queue.
3711 */
ipw_rx_queue_space(const struct ipw_rx_queue * q)3712 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3713 {
3714 int s = q->read - q->write;
3715 if (s <= 0)
3716 s += RX_QUEUE_SIZE;
3717 /* keep some buffer to not confuse full and empty queue */
3718 s -= 2;
3719 if (s < 0)
3720 s = 0;
3721 return s;
3722 }
3723
ipw_tx_queue_space(const struct clx2_queue * q)3724 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3725 {
3726 int s = q->last_used - q->first_empty;
3727 if (s <= 0)
3728 s += q->n_bd;
3729 s -= 2; /* keep some reserve to not confuse empty and full situations */
3730 if (s < 0)
3731 s = 0;
3732 return s;
3733 }
3734
ipw_queue_inc_wrap(int index,int n_bd)3735 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3736 {
3737 return (++index == n_bd) ? 0 : index;
3738 }
3739
3740 /**
3741 * Initialize common DMA queue structure
3742 *
3743 * @param q queue to init
3744 * @param count Number of BD's to allocate. Should be power of 2
3745 * @param read_register Address for 'read' register
3746 * (not offset within BAR, full address)
3747 * @param write_register Address for 'write' register
3748 * (not offset within BAR, full address)
3749 * @param base_register Address for 'base' register
3750 * (not offset within BAR, full address)
3751 * @param size Address for 'size' register
3752 * (not offset within BAR, full address)
3753 */
ipw_queue_init(struct ipw_priv * priv,struct clx2_queue * q,int count,u32 read,u32 write,u32 base,u32 size)3754 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3755 int count, u32 read, u32 write, u32 base, u32 size)
3756 {
3757 q->n_bd = count;
3758
3759 q->low_mark = q->n_bd / 4;
3760 if (q->low_mark < 4)
3761 q->low_mark = 4;
3762
3763 q->high_mark = q->n_bd / 8;
3764 if (q->high_mark < 2)
3765 q->high_mark = 2;
3766
3767 q->first_empty = q->last_used = 0;
3768 q->reg_r = read;
3769 q->reg_w = write;
3770
3771 ipw_write32(priv, base, q->dma_addr);
3772 ipw_write32(priv, size, count);
3773 ipw_write32(priv, read, 0);
3774 ipw_write32(priv, write, 0);
3775
3776 _ipw_read32(priv, 0x90);
3777 }
3778
ipw_queue_tx_init(struct ipw_priv * priv,struct clx2_tx_queue * q,int count,u32 read,u32 write,u32 base,u32 size)3779 static int ipw_queue_tx_init(struct ipw_priv *priv,
3780 struct clx2_tx_queue *q,
3781 int count, u32 read, u32 write, u32 base, u32 size)
3782 {
3783 struct pci_dev *dev = priv->pci_dev;
3784
3785 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3786 if (!q->txb) {
3787 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3788 return -ENOMEM;
3789 }
3790
3791 q->bd =
3792 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3793 if (!q->bd) {
3794 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3795 sizeof(q->bd[0]) * count);
3796 kfree(q->txb);
3797 q->txb = NULL;
3798 return -ENOMEM;
3799 }
3800
3801 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3802 return 0;
3803 }
3804
3805 /**
3806 * Free one TFD, those at index [txq->q.last_used].
3807 * Do NOT advance any indexes
3808 *
3809 * @param dev
3810 * @param txq
3811 */
ipw_queue_tx_free_tfd(struct ipw_priv * priv,struct clx2_tx_queue * txq)3812 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3813 struct clx2_tx_queue *txq)
3814 {
3815 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3816 struct pci_dev *dev = priv->pci_dev;
3817 int i;
3818
3819 /* classify bd */
3820 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3821 /* nothing to cleanup after for host commands */
3822 return;
3823
3824 /* sanity check */
3825 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3826 IPW_ERROR("Too many chunks: %i\n",
3827 le32_to_cpu(bd->u.data.num_chunks));
3828 /** @todo issue fatal error, it is quite serious situation */
3829 return;
3830 }
3831
3832 /* unmap chunks if any */
3833 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3834 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3835 le16_to_cpu(bd->u.data.chunk_len[i]),
3836 PCI_DMA_TODEVICE);
3837 if (txq->txb[txq->q.last_used]) {
3838 libipw_txb_free(txq->txb[txq->q.last_used]);
3839 txq->txb[txq->q.last_used] = NULL;
3840 }
3841 }
3842 }
3843
3844 /**
3845 * Deallocate DMA queue.
3846 *
3847 * Empty queue by removing and destroying all BD's.
3848 * Free all buffers.
3849 *
3850 * @param dev
3851 * @param q
3852 */
ipw_queue_tx_free(struct ipw_priv * priv,struct clx2_tx_queue * txq)3853 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3854 {
3855 struct clx2_queue *q = &txq->q;
3856 struct pci_dev *dev = priv->pci_dev;
3857
3858 if (q->n_bd == 0)
3859 return;
3860
3861 /* first, empty all BD's */
3862 for (; q->first_empty != q->last_used;
3863 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3864 ipw_queue_tx_free_tfd(priv, txq);
3865 }
3866
3867 /* free buffers belonging to queue itself */
3868 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3869 q->dma_addr);
3870 kfree(txq->txb);
3871
3872 /* 0 fill whole structure */
3873 memset(txq, 0, sizeof(*txq));
3874 }
3875
3876 /**
3877 * Destroy all DMA queues and structures
3878 *
3879 * @param priv
3880 */
ipw_tx_queue_free(struct ipw_priv * priv)3881 static void ipw_tx_queue_free(struct ipw_priv *priv)
3882 {
3883 /* Tx CMD queue */
3884 ipw_queue_tx_free(priv, &priv->txq_cmd);
3885
3886 /* Tx queues */
3887 ipw_queue_tx_free(priv, &priv->txq[0]);
3888 ipw_queue_tx_free(priv, &priv->txq[1]);
3889 ipw_queue_tx_free(priv, &priv->txq[2]);
3890 ipw_queue_tx_free(priv, &priv->txq[3]);
3891 }
3892
ipw_create_bssid(struct ipw_priv * priv,u8 * bssid)3893 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3894 {
3895 /* First 3 bytes are manufacturer */
3896 bssid[0] = priv->mac_addr[0];
3897 bssid[1] = priv->mac_addr[1];
3898 bssid[2] = priv->mac_addr[2];
3899
3900 /* Last bytes are random */
3901 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3902
3903 bssid[0] &= 0xfe; /* clear multicast bit */
3904 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3905 }
3906
ipw_add_station(struct ipw_priv * priv,u8 * bssid)3907 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3908 {
3909 struct ipw_station_entry entry;
3910 int i;
3911
3912 for (i = 0; i < priv->num_stations; i++) {
3913 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3914 /* Another node is active in network */
3915 priv->missed_adhoc_beacons = 0;
3916 if (!(priv->config & CFG_STATIC_CHANNEL))
3917 /* when other nodes drop out, we drop out */
3918 priv->config &= ~CFG_ADHOC_PERSIST;
3919
3920 return i;
3921 }
3922 }
3923
3924 if (i == MAX_STATIONS)
3925 return IPW_INVALID_STATION;
3926
3927 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3928
3929 entry.reserved = 0;
3930 entry.support_mode = 0;
3931 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3932 memcpy(priv->stations[i], bssid, ETH_ALEN);
3933 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3934 &entry, sizeof(entry));
3935 priv->num_stations++;
3936
3937 return i;
3938 }
3939
ipw_find_station(struct ipw_priv * priv,u8 * bssid)3940 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3941 {
3942 int i;
3943
3944 for (i = 0; i < priv->num_stations; i++)
3945 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3946 return i;
3947
3948 return IPW_INVALID_STATION;
3949 }
3950
ipw_send_disassociate(struct ipw_priv * priv,int quiet)3951 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3952 {
3953 int err;
3954
3955 if (priv->status & STATUS_ASSOCIATING) {
3956 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3957 schedule_work(&priv->disassociate);
3958 return;
3959 }
3960
3961 if (!(priv->status & STATUS_ASSOCIATED)) {
3962 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3963 return;
3964 }
3965
3966 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3967 "on channel %d.\n",
3968 priv->assoc_request.bssid,
3969 priv->assoc_request.channel);
3970
3971 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3972 priv->status |= STATUS_DISASSOCIATING;
3973
3974 if (quiet)
3975 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3976 else
3977 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3978
3979 err = ipw_send_associate(priv, &priv->assoc_request);
3980 if (err) {
3981 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3982 "failed.\n");
3983 return;
3984 }
3985
3986 }
3987
ipw_disassociate(void * data)3988 static int ipw_disassociate(void *data)
3989 {
3990 struct ipw_priv *priv = data;
3991 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3992 return 0;
3993 ipw_send_disassociate(data, 0);
3994 netif_carrier_off(priv->net_dev);
3995 return 1;
3996 }
3997
ipw_bg_disassociate(struct work_struct * work)3998 static void ipw_bg_disassociate(struct work_struct *work)
3999 {
4000 struct ipw_priv *priv =
4001 container_of(work, struct ipw_priv, disassociate);
4002 mutex_lock(&priv->mutex);
4003 ipw_disassociate(priv);
4004 mutex_unlock(&priv->mutex);
4005 }
4006
ipw_system_config(struct work_struct * work)4007 static void ipw_system_config(struct work_struct *work)
4008 {
4009 struct ipw_priv *priv =
4010 container_of(work, struct ipw_priv, system_config);
4011
4012 #ifdef CONFIG_IPW2200_PROMISCUOUS
4013 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4014 priv->sys_config.accept_all_data_frames = 1;
4015 priv->sys_config.accept_non_directed_frames = 1;
4016 priv->sys_config.accept_all_mgmt_bcpr = 1;
4017 priv->sys_config.accept_all_mgmt_frames = 1;
4018 }
4019 #endif
4020
4021 ipw_send_system_config(priv);
4022 }
4023
4024 struct ipw_status_code {
4025 u16 status;
4026 const char *reason;
4027 };
4028
4029 static const struct ipw_status_code ipw_status_codes[] = {
4030 {0x00, "Successful"},
4031 {0x01, "Unspecified failure"},
4032 {0x0A, "Cannot support all requested capabilities in the "
4033 "Capability information field"},
4034 {0x0B, "Reassociation denied due to inability to confirm that "
4035 "association exists"},
4036 {0x0C, "Association denied due to reason outside the scope of this "
4037 "standard"},
4038 {0x0D,
4039 "Responding station does not support the specified authentication "
4040 "algorithm"},
4041 {0x0E,
4042 "Received an Authentication frame with authentication sequence "
4043 "transaction sequence number out of expected sequence"},
4044 {0x0F, "Authentication rejected because of challenge failure"},
4045 {0x10, "Authentication rejected due to timeout waiting for next "
4046 "frame in sequence"},
4047 {0x11, "Association denied because AP is unable to handle additional "
4048 "associated stations"},
4049 {0x12,
4050 "Association denied due to requesting station not supporting all "
4051 "of the datarates in the BSSBasicServiceSet Parameter"},
4052 {0x13,
4053 "Association denied due to requesting station not supporting "
4054 "short preamble operation"},
4055 {0x14,
4056 "Association denied due to requesting station not supporting "
4057 "PBCC encoding"},
4058 {0x15,
4059 "Association denied due to requesting station not supporting "
4060 "channel agility"},
4061 {0x19,
4062 "Association denied due to requesting station not supporting "
4063 "short slot operation"},
4064 {0x1A,
4065 "Association denied due to requesting station not supporting "
4066 "DSSS-OFDM operation"},
4067 {0x28, "Invalid Information Element"},
4068 {0x29, "Group Cipher is not valid"},
4069 {0x2A, "Pairwise Cipher is not valid"},
4070 {0x2B, "AKMP is not valid"},
4071 {0x2C, "Unsupported RSN IE version"},
4072 {0x2D, "Invalid RSN IE Capabilities"},
4073 {0x2E, "Cipher suite is rejected per security policy"},
4074 };
4075
ipw_get_status_code(u16 status)4076 static const char *ipw_get_status_code(u16 status)
4077 {
4078 int i;
4079 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4080 if (ipw_status_codes[i].status == (status & 0xff))
4081 return ipw_status_codes[i].reason;
4082 return "Unknown status value.";
4083 }
4084
average_init(struct average * avg)4085 static void inline average_init(struct average *avg)
4086 {
4087 memset(avg, 0, sizeof(*avg));
4088 }
4089
4090 #define DEPTH_RSSI 8
4091 #define DEPTH_NOISE 16
exponential_average(s16 prev_avg,s16 val,u8 depth)4092 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4093 {
4094 return ((depth-1)*prev_avg + val)/depth;
4095 }
4096
average_add(struct average * avg,s16 val)4097 static void average_add(struct average *avg, s16 val)
4098 {
4099 avg->sum -= avg->entries[avg->pos];
4100 avg->sum += val;
4101 avg->entries[avg->pos++] = val;
4102 if (unlikely(avg->pos == AVG_ENTRIES)) {
4103 avg->init = 1;
4104 avg->pos = 0;
4105 }
4106 }
4107
average_value(struct average * avg)4108 static s16 average_value(struct average *avg)
4109 {
4110 if (!unlikely(avg->init)) {
4111 if (avg->pos)
4112 return avg->sum / avg->pos;
4113 return 0;
4114 }
4115
4116 return avg->sum / AVG_ENTRIES;
4117 }
4118
ipw_reset_stats(struct ipw_priv * priv)4119 static void ipw_reset_stats(struct ipw_priv *priv)
4120 {
4121 u32 len = sizeof(u32);
4122
4123 priv->quality = 0;
4124
4125 average_init(&priv->average_missed_beacons);
4126 priv->exp_avg_rssi = -60;
4127 priv->exp_avg_noise = -85 + 0x100;
4128
4129 priv->last_rate = 0;
4130 priv->last_missed_beacons = 0;
4131 priv->last_rx_packets = 0;
4132 priv->last_tx_packets = 0;
4133 priv->last_tx_failures = 0;
4134
4135 /* Firmware managed, reset only when NIC is restarted, so we have to
4136 * normalize on the current value */
4137 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4138 &priv->last_rx_err, &len);
4139 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4140 &priv->last_tx_failures, &len);
4141
4142 /* Driver managed, reset with each association */
4143 priv->missed_adhoc_beacons = 0;
4144 priv->missed_beacons = 0;
4145 priv->tx_packets = 0;
4146 priv->rx_packets = 0;
4147
4148 }
4149
ipw_get_max_rate(struct ipw_priv * priv)4150 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4151 {
4152 u32 i = 0x80000000;
4153 u32 mask = priv->rates_mask;
4154 /* If currently associated in B mode, restrict the maximum
4155 * rate match to B rates */
4156 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4157 mask &= LIBIPW_CCK_RATES_MASK;
4158
4159 /* TODO: Verify that the rate is supported by the current rates
4160 * list. */
4161
4162 while (i && !(mask & i))
4163 i >>= 1;
4164 switch (i) {
4165 case LIBIPW_CCK_RATE_1MB_MASK:
4166 return 1000000;
4167 case LIBIPW_CCK_RATE_2MB_MASK:
4168 return 2000000;
4169 case LIBIPW_CCK_RATE_5MB_MASK:
4170 return 5500000;
4171 case LIBIPW_OFDM_RATE_6MB_MASK:
4172 return 6000000;
4173 case LIBIPW_OFDM_RATE_9MB_MASK:
4174 return 9000000;
4175 case LIBIPW_CCK_RATE_11MB_MASK:
4176 return 11000000;
4177 case LIBIPW_OFDM_RATE_12MB_MASK:
4178 return 12000000;
4179 case LIBIPW_OFDM_RATE_18MB_MASK:
4180 return 18000000;
4181 case LIBIPW_OFDM_RATE_24MB_MASK:
4182 return 24000000;
4183 case LIBIPW_OFDM_RATE_36MB_MASK:
4184 return 36000000;
4185 case LIBIPW_OFDM_RATE_48MB_MASK:
4186 return 48000000;
4187 case LIBIPW_OFDM_RATE_54MB_MASK:
4188 return 54000000;
4189 }
4190
4191 if (priv->ieee->mode == IEEE_B)
4192 return 11000000;
4193 else
4194 return 54000000;
4195 }
4196
ipw_get_current_rate(struct ipw_priv * priv)4197 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4198 {
4199 u32 rate, len = sizeof(rate);
4200 int err;
4201
4202 if (!(priv->status & STATUS_ASSOCIATED))
4203 return 0;
4204
4205 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4206 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4207 &len);
4208 if (err) {
4209 IPW_DEBUG_INFO("failed querying ordinals.\n");
4210 return 0;
4211 }
4212 } else
4213 return ipw_get_max_rate(priv);
4214
4215 switch (rate) {
4216 case IPW_TX_RATE_1MB:
4217 return 1000000;
4218 case IPW_TX_RATE_2MB:
4219 return 2000000;
4220 case IPW_TX_RATE_5MB:
4221 return 5500000;
4222 case IPW_TX_RATE_6MB:
4223 return 6000000;
4224 case IPW_TX_RATE_9MB:
4225 return 9000000;
4226 case IPW_TX_RATE_11MB:
4227 return 11000000;
4228 case IPW_TX_RATE_12MB:
4229 return 12000000;
4230 case IPW_TX_RATE_18MB:
4231 return 18000000;
4232 case IPW_TX_RATE_24MB:
4233 return 24000000;
4234 case IPW_TX_RATE_36MB:
4235 return 36000000;
4236 case IPW_TX_RATE_48MB:
4237 return 48000000;
4238 case IPW_TX_RATE_54MB:
4239 return 54000000;
4240 }
4241
4242 return 0;
4243 }
4244
4245 #define IPW_STATS_INTERVAL (2 * HZ)
ipw_gather_stats(struct ipw_priv * priv)4246 static void ipw_gather_stats(struct ipw_priv *priv)
4247 {
4248 u32 rx_err, rx_err_delta, rx_packets_delta;
4249 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4250 u32 missed_beacons_percent, missed_beacons_delta;
4251 u32 quality = 0;
4252 u32 len = sizeof(u32);
4253 s16 rssi;
4254 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4255 rate_quality;
4256 u32 max_rate;
4257
4258 if (!(priv->status & STATUS_ASSOCIATED)) {
4259 priv->quality = 0;
4260 return;
4261 }
4262
4263 /* Update the statistics */
4264 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4265 &priv->missed_beacons, &len);
4266 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4267 priv->last_missed_beacons = priv->missed_beacons;
4268 if (priv->assoc_request.beacon_interval) {
4269 missed_beacons_percent = missed_beacons_delta *
4270 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4271 (IPW_STATS_INTERVAL * 10);
4272 } else {
4273 missed_beacons_percent = 0;
4274 }
4275 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4276
4277 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4278 rx_err_delta = rx_err - priv->last_rx_err;
4279 priv->last_rx_err = rx_err;
4280
4281 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4282 tx_failures_delta = tx_failures - priv->last_tx_failures;
4283 priv->last_tx_failures = tx_failures;
4284
4285 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4286 priv->last_rx_packets = priv->rx_packets;
4287
4288 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4289 priv->last_tx_packets = priv->tx_packets;
4290
4291 /* Calculate quality based on the following:
4292 *
4293 * Missed beacon: 100% = 0, 0% = 70% missed
4294 * Rate: 60% = 1Mbs, 100% = Max
4295 * Rx and Tx errors represent a straight % of total Rx/Tx
4296 * RSSI: 100% = > -50, 0% = < -80
4297 * Rx errors: 100% = 0, 0% = 50% missed
4298 *
4299 * The lowest computed quality is used.
4300 *
4301 */
4302 #define BEACON_THRESHOLD 5
4303 beacon_quality = 100 - missed_beacons_percent;
4304 if (beacon_quality < BEACON_THRESHOLD)
4305 beacon_quality = 0;
4306 else
4307 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4308 (100 - BEACON_THRESHOLD);
4309 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4310 beacon_quality, missed_beacons_percent);
4311
4312 priv->last_rate = ipw_get_current_rate(priv);
4313 max_rate = ipw_get_max_rate(priv);
4314 rate_quality = priv->last_rate * 40 / max_rate + 60;
4315 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4316 rate_quality, priv->last_rate / 1000000);
4317
4318 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4319 rx_quality = 100 - (rx_err_delta * 100) /
4320 (rx_packets_delta + rx_err_delta);
4321 else
4322 rx_quality = 100;
4323 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4324 rx_quality, rx_err_delta, rx_packets_delta);
4325
4326 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4327 tx_quality = 100 - (tx_failures_delta * 100) /
4328 (tx_packets_delta + tx_failures_delta);
4329 else
4330 tx_quality = 100;
4331 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4332 tx_quality, tx_failures_delta, tx_packets_delta);
4333
4334 rssi = priv->exp_avg_rssi;
4335 signal_quality =
4336 (100 *
4337 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4338 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4339 (priv->ieee->perfect_rssi - rssi) *
4340 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4341 62 * (priv->ieee->perfect_rssi - rssi))) /
4342 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4343 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4344 if (signal_quality > 100)
4345 signal_quality = 100;
4346 else if (signal_quality < 1)
4347 signal_quality = 0;
4348
4349 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4350 signal_quality, rssi);
4351
4352 quality = min(rx_quality, signal_quality);
4353 quality = min(tx_quality, quality);
4354 quality = min(rate_quality, quality);
4355 quality = min(beacon_quality, quality);
4356 if (quality == beacon_quality)
4357 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4358 quality);
4359 if (quality == rate_quality)
4360 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4361 quality);
4362 if (quality == tx_quality)
4363 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4364 quality);
4365 if (quality == rx_quality)
4366 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4367 quality);
4368 if (quality == signal_quality)
4369 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4370 quality);
4371
4372 priv->quality = quality;
4373
4374 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4375 }
4376
ipw_bg_gather_stats(struct work_struct * work)4377 static void ipw_bg_gather_stats(struct work_struct *work)
4378 {
4379 struct ipw_priv *priv =
4380 container_of(work, struct ipw_priv, gather_stats.work);
4381 mutex_lock(&priv->mutex);
4382 ipw_gather_stats(priv);
4383 mutex_unlock(&priv->mutex);
4384 }
4385
4386 /* Missed beacon behavior:
4387 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4388 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4389 * Above disassociate threshold, give up and stop scanning.
4390 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
ipw_handle_missed_beacon(struct ipw_priv * priv,int missed_count)4391 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4392 int missed_count)
4393 {
4394 priv->notif_missed_beacons = missed_count;
4395
4396 if (missed_count > priv->disassociate_threshold &&
4397 priv->status & STATUS_ASSOCIATED) {
4398 /* If associated and we've hit the missed
4399 * beacon threshold, disassociate, turn
4400 * off roaming, and abort any active scans */
4401 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4402 IPW_DL_STATE | IPW_DL_ASSOC,
4403 "Missed beacon: %d - disassociate\n", missed_count);
4404 priv->status &= ~STATUS_ROAMING;
4405 if (priv->status & STATUS_SCANNING) {
4406 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4407 IPW_DL_STATE,
4408 "Aborting scan with missed beacon.\n");
4409 schedule_work(&priv->abort_scan);
4410 }
4411
4412 schedule_work(&priv->disassociate);
4413 return;
4414 }
4415
4416 if (priv->status & STATUS_ROAMING) {
4417 /* If we are currently roaming, then just
4418 * print a debug statement... */
4419 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4420 "Missed beacon: %d - roam in progress\n",
4421 missed_count);
4422 return;
4423 }
4424
4425 if (roaming &&
4426 (missed_count > priv->roaming_threshold &&
4427 missed_count <= priv->disassociate_threshold)) {
4428 /* If we are not already roaming, set the ROAM
4429 * bit in the status and kick off a scan.
4430 * This can happen several times before we reach
4431 * disassociate_threshold. */
4432 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4433 "Missed beacon: %d - initiate "
4434 "roaming\n", missed_count);
4435 if (!(priv->status & STATUS_ROAMING)) {
4436 priv->status |= STATUS_ROAMING;
4437 if (!(priv->status & STATUS_SCANNING))
4438 schedule_delayed_work(&priv->request_scan, 0);
4439 }
4440 return;
4441 }
4442
4443 if (priv->status & STATUS_SCANNING &&
4444 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4445 /* Stop scan to keep fw from getting
4446 * stuck (only if we aren't roaming --
4447 * otherwise we'll never scan more than 2 or 3
4448 * channels..) */
4449 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4450 "Aborting scan with missed beacon.\n");
4451 schedule_work(&priv->abort_scan);
4452 }
4453
4454 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4455 }
4456
ipw_scan_event(struct work_struct * work)4457 static void ipw_scan_event(struct work_struct *work)
4458 {
4459 union iwreq_data wrqu;
4460
4461 struct ipw_priv *priv =
4462 container_of(work, struct ipw_priv, scan_event.work);
4463
4464 wrqu.data.length = 0;
4465 wrqu.data.flags = 0;
4466 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4467 }
4468
handle_scan_event(struct ipw_priv * priv)4469 static void handle_scan_event(struct ipw_priv *priv)
4470 {
4471 /* Only userspace-requested scan completion events go out immediately */
4472 if (!priv->user_requested_scan) {
4473 if (!delayed_work_pending(&priv->scan_event))
4474 schedule_delayed_work(&priv->scan_event,
4475 round_jiffies_relative(msecs_to_jiffies(4000)));
4476 } else {
4477 union iwreq_data wrqu;
4478
4479 priv->user_requested_scan = 0;
4480 cancel_delayed_work(&priv->scan_event);
4481
4482 wrqu.data.length = 0;
4483 wrqu.data.flags = 0;
4484 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4485 }
4486 }
4487
4488 /**
4489 * Handle host notification packet.
4490 * Called from interrupt routine
4491 */
ipw_rx_notification(struct ipw_priv * priv,struct ipw_rx_notification * notif)4492 static void ipw_rx_notification(struct ipw_priv *priv,
4493 struct ipw_rx_notification *notif)
4494 {
4495 DECLARE_SSID_BUF(ssid);
4496 u16 size = le16_to_cpu(notif->size);
4497
4498 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4499
4500 switch (notif->subtype) {
4501 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4502 struct notif_association *assoc = ¬if->u.assoc;
4503
4504 switch (assoc->state) {
4505 case CMAS_ASSOCIATED:{
4506 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4507 IPW_DL_ASSOC,
4508 "associated: '%s' %pM\n",
4509 print_ssid(ssid, priv->essid,
4510 priv->essid_len),
4511 priv->bssid);
4512
4513 switch (priv->ieee->iw_mode) {
4514 case IW_MODE_INFRA:
4515 memcpy(priv->ieee->bssid,
4516 priv->bssid, ETH_ALEN);
4517 break;
4518
4519 case IW_MODE_ADHOC:
4520 memcpy(priv->ieee->bssid,
4521 priv->bssid, ETH_ALEN);
4522
4523 /* clear out the station table */
4524 priv->num_stations = 0;
4525
4526 IPW_DEBUG_ASSOC
4527 ("queueing adhoc check\n");
4528 schedule_delayed_work(
4529 &priv->adhoc_check,
4530 le16_to_cpu(priv->
4531 assoc_request.
4532 beacon_interval));
4533 break;
4534 }
4535
4536 priv->status &= ~STATUS_ASSOCIATING;
4537 priv->status |= STATUS_ASSOCIATED;
4538 schedule_work(&priv->system_config);
4539
4540 #ifdef CONFIG_IPW2200_QOS
4541 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4542 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4543 if ((priv->status & STATUS_AUTH) &&
4544 (IPW_GET_PACKET_STYPE(¬if->u.raw)
4545 == IEEE80211_STYPE_ASSOC_RESP)) {
4546 if ((sizeof
4547 (struct
4548 libipw_assoc_response)
4549 <= size)
4550 && (size <= 2314)) {
4551 struct
4552 libipw_rx_stats
4553 stats = {
4554 .len = size - 1,
4555 };
4556
4557 IPW_DEBUG_QOS
4558 ("QoS Associate "
4559 "size %d\n", size);
4560 libipw_rx_mgt(priv->
4561 ieee,
4562 (struct
4563 libipw_hdr_4addr
4564 *)
4565 ¬if->u.raw, &stats);
4566 }
4567 }
4568 #endif
4569
4570 schedule_work(&priv->link_up);
4571
4572 break;
4573 }
4574
4575 case CMAS_AUTHENTICATED:{
4576 if (priv->
4577 status & (STATUS_ASSOCIATED |
4578 STATUS_AUTH)) {
4579 struct notif_authenticate *auth
4580 = ¬if->u.auth;
4581 IPW_DEBUG(IPW_DL_NOTIF |
4582 IPW_DL_STATE |
4583 IPW_DL_ASSOC,
4584 "deauthenticated: '%s' "
4585 "%pM"
4586 ": (0x%04X) - %s\n",
4587 print_ssid(ssid,
4588 priv->
4589 essid,
4590 priv->
4591 essid_len),
4592 priv->bssid,
4593 le16_to_cpu(auth->status),
4594 ipw_get_status_code
4595 (le16_to_cpu
4596 (auth->status)));
4597
4598 priv->status &=
4599 ~(STATUS_ASSOCIATING |
4600 STATUS_AUTH |
4601 STATUS_ASSOCIATED);
4602
4603 schedule_work(&priv->link_down);
4604 break;
4605 }
4606
4607 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4608 IPW_DL_ASSOC,
4609 "authenticated: '%s' %pM\n",
4610 print_ssid(ssid, priv->essid,
4611 priv->essid_len),
4612 priv->bssid);
4613 break;
4614 }
4615
4616 case CMAS_INIT:{
4617 if (priv->status & STATUS_AUTH) {
4618 struct
4619 libipw_assoc_response
4620 *resp;
4621 resp =
4622 (struct
4623 libipw_assoc_response
4624 *)¬if->u.raw;
4625 IPW_DEBUG(IPW_DL_NOTIF |
4626 IPW_DL_STATE |
4627 IPW_DL_ASSOC,
4628 "association failed (0x%04X): %s\n",
4629 le16_to_cpu(resp->status),
4630 ipw_get_status_code
4631 (le16_to_cpu
4632 (resp->status)));
4633 }
4634
4635 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4636 IPW_DL_ASSOC,
4637 "disassociated: '%s' %pM\n",
4638 print_ssid(ssid, priv->essid,
4639 priv->essid_len),
4640 priv->bssid);
4641
4642 priv->status &=
4643 ~(STATUS_DISASSOCIATING |
4644 STATUS_ASSOCIATING |
4645 STATUS_ASSOCIATED | STATUS_AUTH);
4646 if (priv->assoc_network
4647 && (priv->assoc_network->
4648 capability &
4649 WLAN_CAPABILITY_IBSS))
4650 ipw_remove_current_network
4651 (priv);
4652
4653 schedule_work(&priv->link_down);
4654
4655 break;
4656 }
4657
4658 case CMAS_RX_ASSOC_RESP:
4659 break;
4660
4661 default:
4662 IPW_ERROR("assoc: unknown (%d)\n",
4663 assoc->state);
4664 break;
4665 }
4666
4667 break;
4668 }
4669
4670 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4671 struct notif_authenticate *auth = ¬if->u.auth;
4672 switch (auth->state) {
4673 case CMAS_AUTHENTICATED:
4674 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4675 "authenticated: '%s' %pM\n",
4676 print_ssid(ssid, priv->essid,
4677 priv->essid_len),
4678 priv->bssid);
4679 priv->status |= STATUS_AUTH;
4680 break;
4681
4682 case CMAS_INIT:
4683 if (priv->status & STATUS_AUTH) {
4684 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4685 IPW_DL_ASSOC,
4686 "authentication failed (0x%04X): %s\n",
4687 le16_to_cpu(auth->status),
4688 ipw_get_status_code(le16_to_cpu
4689 (auth->
4690 status)));
4691 }
4692 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4693 IPW_DL_ASSOC,
4694 "deauthenticated: '%s' %pM\n",
4695 print_ssid(ssid, priv->essid,
4696 priv->essid_len),
4697 priv->bssid);
4698
4699 priv->status &= ~(STATUS_ASSOCIATING |
4700 STATUS_AUTH |
4701 STATUS_ASSOCIATED);
4702
4703 schedule_work(&priv->link_down);
4704 break;
4705
4706 case CMAS_TX_AUTH_SEQ_1:
4707 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4708 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4709 break;
4710 case CMAS_RX_AUTH_SEQ_2:
4711 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4712 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4713 break;
4714 case CMAS_AUTH_SEQ_1_PASS:
4715 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4716 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4717 break;
4718 case CMAS_AUTH_SEQ_1_FAIL:
4719 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4720 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4721 break;
4722 case CMAS_TX_AUTH_SEQ_3:
4723 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4724 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4725 break;
4726 case CMAS_RX_AUTH_SEQ_4:
4727 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4728 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4729 break;
4730 case CMAS_AUTH_SEQ_2_PASS:
4731 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4732 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4733 break;
4734 case CMAS_AUTH_SEQ_2_FAIL:
4735 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4736 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4737 break;
4738 case CMAS_TX_ASSOC:
4739 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4740 IPW_DL_ASSOC, "TX_ASSOC\n");
4741 break;
4742 case CMAS_RX_ASSOC_RESP:
4743 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4744 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4745
4746 break;
4747 case CMAS_ASSOCIATED:
4748 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4749 IPW_DL_ASSOC, "ASSOCIATED\n");
4750 break;
4751 default:
4752 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4753 auth->state);
4754 break;
4755 }
4756 break;
4757 }
4758
4759 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4760 struct notif_channel_result *x =
4761 ¬if->u.channel_result;
4762
4763 if (size == sizeof(*x)) {
4764 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4765 x->channel_num);
4766 } else {
4767 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4768 "(should be %zd)\n",
4769 size, sizeof(*x));
4770 }
4771 break;
4772 }
4773
4774 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4775 struct notif_scan_complete *x = ¬if->u.scan_complete;
4776 if (size == sizeof(*x)) {
4777 IPW_DEBUG_SCAN
4778 ("Scan completed: type %d, %d channels, "
4779 "%d status\n", x->scan_type,
4780 x->num_channels, x->status);
4781 } else {
4782 IPW_ERROR("Scan completed of wrong size %d "
4783 "(should be %zd)\n",
4784 size, sizeof(*x));
4785 }
4786
4787 priv->status &=
4788 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4789
4790 wake_up_interruptible(&priv->wait_state);
4791 cancel_delayed_work(&priv->scan_check);
4792
4793 if (priv->status & STATUS_EXIT_PENDING)
4794 break;
4795
4796 priv->ieee->scans++;
4797
4798 #ifdef CONFIG_IPW2200_MONITOR
4799 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4800 priv->status |= STATUS_SCAN_FORCED;
4801 schedule_delayed_work(&priv->request_scan, 0);
4802 break;
4803 }
4804 priv->status &= ~STATUS_SCAN_FORCED;
4805 #endif /* CONFIG_IPW2200_MONITOR */
4806
4807 /* Do queued direct scans first */
4808 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4809 schedule_delayed_work(&priv->request_direct_scan, 0);
4810
4811 if (!(priv->status & (STATUS_ASSOCIATED |
4812 STATUS_ASSOCIATING |
4813 STATUS_ROAMING |
4814 STATUS_DISASSOCIATING)))
4815 schedule_work(&priv->associate);
4816 else if (priv->status & STATUS_ROAMING) {
4817 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4818 /* If a scan completed and we are in roam mode, then
4819 * the scan that completed was the one requested as a
4820 * result of entering roam... so, schedule the
4821 * roam work */
4822 schedule_work(&priv->roam);
4823 else
4824 /* Don't schedule if we aborted the scan */
4825 priv->status &= ~STATUS_ROAMING;
4826 } else if (priv->status & STATUS_SCAN_PENDING)
4827 schedule_delayed_work(&priv->request_scan, 0);
4828 else if (priv->config & CFG_BACKGROUND_SCAN
4829 && priv->status & STATUS_ASSOCIATED)
4830 schedule_delayed_work(&priv->request_scan,
4831 round_jiffies_relative(HZ));
4832
4833 /* Send an empty event to user space.
4834 * We don't send the received data on the event because
4835 * it would require us to do complex transcoding, and
4836 * we want to minimise the work done in the irq handler
4837 * Use a request to extract the data.
4838 * Also, we generate this even for any scan, regardless
4839 * on how the scan was initiated. User space can just
4840 * sync on periodic scan to get fresh data...
4841 * Jean II */
4842 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4843 handle_scan_event(priv);
4844 break;
4845 }
4846
4847 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4848 struct notif_frag_length *x = ¬if->u.frag_len;
4849
4850 if (size == sizeof(*x))
4851 IPW_ERROR("Frag length: %d\n",
4852 le16_to_cpu(x->frag_length));
4853 else
4854 IPW_ERROR("Frag length of wrong size %d "
4855 "(should be %zd)\n",
4856 size, sizeof(*x));
4857 break;
4858 }
4859
4860 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4861 struct notif_link_deterioration *x =
4862 ¬if->u.link_deterioration;
4863
4864 if (size == sizeof(*x)) {
4865 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4866 "link deterioration: type %d, cnt %d\n",
4867 x->silence_notification_type,
4868 x->silence_count);
4869 memcpy(&priv->last_link_deterioration, x,
4870 sizeof(*x));
4871 } else {
4872 IPW_ERROR("Link Deterioration of wrong size %d "
4873 "(should be %zd)\n",
4874 size, sizeof(*x));
4875 }
4876 break;
4877 }
4878
4879 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4880 IPW_ERROR("Dino config\n");
4881 if (priv->hcmd
4882 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4883 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4884
4885 break;
4886 }
4887
4888 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4889 struct notif_beacon_state *x = ¬if->u.beacon_state;
4890 if (size != sizeof(*x)) {
4891 IPW_ERROR
4892 ("Beacon state of wrong size %d (should "
4893 "be %zd)\n", size, sizeof(*x));
4894 break;
4895 }
4896
4897 if (le32_to_cpu(x->state) ==
4898 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4899 ipw_handle_missed_beacon(priv,
4900 le32_to_cpu(x->
4901 number));
4902
4903 break;
4904 }
4905
4906 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4907 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
4908 if (size == sizeof(*x)) {
4909 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4910 "0x%02x station %d\n",
4911 x->key_state, x->security_type,
4912 x->station_index);
4913 break;
4914 }
4915
4916 IPW_ERROR
4917 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4918 size, sizeof(*x));
4919 break;
4920 }
4921
4922 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4923 struct notif_calibration *x = ¬if->u.calibration;
4924
4925 if (size == sizeof(*x)) {
4926 memcpy(&priv->calib, x, sizeof(*x));
4927 IPW_DEBUG_INFO("TODO: Calibration\n");
4928 break;
4929 }
4930
4931 IPW_ERROR
4932 ("Calibration of wrong size %d (should be %zd)\n",
4933 size, sizeof(*x));
4934 break;
4935 }
4936
4937 case HOST_NOTIFICATION_NOISE_STATS:{
4938 if (size == sizeof(u32)) {
4939 priv->exp_avg_noise =
4940 exponential_average(priv->exp_avg_noise,
4941 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4942 DEPTH_NOISE);
4943 break;
4944 }
4945
4946 IPW_ERROR
4947 ("Noise stat is wrong size %d (should be %zd)\n",
4948 size, sizeof(u32));
4949 break;
4950 }
4951
4952 default:
4953 IPW_DEBUG_NOTIF("Unknown notification: "
4954 "subtype=%d,flags=0x%2x,size=%d\n",
4955 notif->subtype, notif->flags, size);
4956 }
4957 }
4958
4959 /**
4960 * Destroys all DMA structures and initialise them again
4961 *
4962 * @param priv
4963 * @return error code
4964 */
ipw_queue_reset(struct ipw_priv * priv)4965 static int ipw_queue_reset(struct ipw_priv *priv)
4966 {
4967 int rc = 0;
4968 /** @todo customize queue sizes */
4969 int nTx = 64, nTxCmd = 8;
4970 ipw_tx_queue_free(priv);
4971 /* Tx CMD queue */
4972 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4973 IPW_TX_CMD_QUEUE_READ_INDEX,
4974 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4975 IPW_TX_CMD_QUEUE_BD_BASE,
4976 IPW_TX_CMD_QUEUE_BD_SIZE);
4977 if (rc) {
4978 IPW_ERROR("Tx Cmd queue init failed\n");
4979 goto error;
4980 }
4981 /* Tx queue(s) */
4982 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4983 IPW_TX_QUEUE_0_READ_INDEX,
4984 IPW_TX_QUEUE_0_WRITE_INDEX,
4985 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4986 if (rc) {
4987 IPW_ERROR("Tx 0 queue init failed\n");
4988 goto error;
4989 }
4990 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4991 IPW_TX_QUEUE_1_READ_INDEX,
4992 IPW_TX_QUEUE_1_WRITE_INDEX,
4993 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4994 if (rc) {
4995 IPW_ERROR("Tx 1 queue init failed\n");
4996 goto error;
4997 }
4998 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4999 IPW_TX_QUEUE_2_READ_INDEX,
5000 IPW_TX_QUEUE_2_WRITE_INDEX,
5001 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
5002 if (rc) {
5003 IPW_ERROR("Tx 2 queue init failed\n");
5004 goto error;
5005 }
5006 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5007 IPW_TX_QUEUE_3_READ_INDEX,
5008 IPW_TX_QUEUE_3_WRITE_INDEX,
5009 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5010 if (rc) {
5011 IPW_ERROR("Tx 3 queue init failed\n");
5012 goto error;
5013 }
5014 /* statistics */
5015 priv->rx_bufs_min = 0;
5016 priv->rx_pend_max = 0;
5017 return rc;
5018
5019 error:
5020 ipw_tx_queue_free(priv);
5021 return rc;
5022 }
5023
5024 /**
5025 * Reclaim Tx queue entries no more used by NIC.
5026 *
5027 * When FW advances 'R' index, all entries between old and
5028 * new 'R' index need to be reclaimed. As result, some free space
5029 * forms. If there is enough free space (> low mark), wake Tx queue.
5030 *
5031 * @note Need to protect against garbage in 'R' index
5032 * @param priv
5033 * @param txq
5034 * @param qindex
5035 * @return Number of used entries remains in the queue
5036 */
ipw_queue_tx_reclaim(struct ipw_priv * priv,struct clx2_tx_queue * txq,int qindex)5037 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5038 struct clx2_tx_queue *txq, int qindex)
5039 {
5040 u32 hw_tail;
5041 int used;
5042 struct clx2_queue *q = &txq->q;
5043
5044 hw_tail = ipw_read32(priv, q->reg_r);
5045 if (hw_tail >= q->n_bd) {
5046 IPW_ERROR
5047 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5048 hw_tail, q->n_bd);
5049 goto done;
5050 }
5051 for (; q->last_used != hw_tail;
5052 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5053 ipw_queue_tx_free_tfd(priv, txq);
5054 priv->tx_packets++;
5055 }
5056 done:
5057 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5058 (qindex >= 0))
5059 netif_wake_queue(priv->net_dev);
5060 used = q->first_empty - q->last_used;
5061 if (used < 0)
5062 used += q->n_bd;
5063
5064 return used;
5065 }
5066
ipw_queue_tx_hcmd(struct ipw_priv * priv,int hcmd,void * buf,int len,int sync)5067 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5068 int len, int sync)
5069 {
5070 struct clx2_tx_queue *txq = &priv->txq_cmd;
5071 struct clx2_queue *q = &txq->q;
5072 struct tfd_frame *tfd;
5073
5074 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5075 IPW_ERROR("No space for Tx\n");
5076 return -EBUSY;
5077 }
5078
5079 tfd = &txq->bd[q->first_empty];
5080 txq->txb[q->first_empty] = NULL;
5081
5082 memset(tfd, 0, sizeof(*tfd));
5083 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5084 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5085 priv->hcmd_seq++;
5086 tfd->u.cmd.index = hcmd;
5087 tfd->u.cmd.length = len;
5088 memcpy(tfd->u.cmd.payload, buf, len);
5089 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5090 ipw_write32(priv, q->reg_w, q->first_empty);
5091 _ipw_read32(priv, 0x90);
5092
5093 return 0;
5094 }
5095
5096 /*
5097 * Rx theory of operation
5098 *
5099 * The host allocates 32 DMA target addresses and passes the host address
5100 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5101 * 0 to 31
5102 *
5103 * Rx Queue Indexes
5104 * The host/firmware share two index registers for managing the Rx buffers.
5105 *
5106 * The READ index maps to the first position that the firmware may be writing
5107 * to -- the driver can read up to (but not including) this position and get
5108 * good data.
5109 * The READ index is managed by the firmware once the card is enabled.
5110 *
5111 * The WRITE index maps to the last position the driver has read from -- the
5112 * position preceding WRITE is the last slot the firmware can place a packet.
5113 *
5114 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5115 * WRITE = READ.
5116 *
5117 * During initialization the host sets up the READ queue position to the first
5118 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5119 *
5120 * When the firmware places a packet in a buffer it will advance the READ index
5121 * and fire the RX interrupt. The driver can then query the READ index and
5122 * process as many packets as possible, moving the WRITE index forward as it
5123 * resets the Rx queue buffers with new memory.
5124 *
5125 * The management in the driver is as follows:
5126 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5127 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5128 * to replensish the ipw->rxq->rx_free.
5129 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5130 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5131 * 'processed' and 'read' driver indexes as well)
5132 * + A received packet is processed and handed to the kernel network stack,
5133 * detached from the ipw->rxq. The driver 'processed' index is updated.
5134 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5135 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5136 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5137 * were enough free buffers and RX_STALLED is set it is cleared.
5138 *
5139 *
5140 * Driver sequence:
5141 *
5142 * ipw_rx_queue_alloc() Allocates rx_free
5143 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5144 * ipw_rx_queue_restock
5145 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5146 * queue, updates firmware pointers, and updates
5147 * the WRITE index. If insufficient rx_free buffers
5148 * are available, schedules ipw_rx_queue_replenish
5149 *
5150 * -- enable interrupts --
5151 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5152 * READ INDEX, detaching the SKB from the pool.
5153 * Moves the packet buffer from queue to rx_used.
5154 * Calls ipw_rx_queue_restock to refill any empty
5155 * slots.
5156 * ...
5157 *
5158 */
5159
5160 /*
5161 * If there are slots in the RX queue that need to be restocked,
5162 * and we have free pre-allocated buffers, fill the ranks as much
5163 * as we can pulling from rx_free.
5164 *
5165 * This moves the 'write' index forward to catch up with 'processed', and
5166 * also updates the memory address in the firmware to reference the new
5167 * target buffer.
5168 */
ipw_rx_queue_restock(struct ipw_priv * priv)5169 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5170 {
5171 struct ipw_rx_queue *rxq = priv->rxq;
5172 struct list_head *element;
5173 struct ipw_rx_mem_buffer *rxb;
5174 unsigned long flags;
5175 int write;
5176
5177 spin_lock_irqsave(&rxq->lock, flags);
5178 write = rxq->write;
5179 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5180 element = rxq->rx_free.next;
5181 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5182 list_del(element);
5183
5184 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5185 rxb->dma_addr);
5186 rxq->queue[rxq->write] = rxb;
5187 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5188 rxq->free_count--;
5189 }
5190 spin_unlock_irqrestore(&rxq->lock, flags);
5191
5192 /* If the pre-allocated buffer pool is dropping low, schedule to
5193 * refill it */
5194 if (rxq->free_count <= RX_LOW_WATERMARK)
5195 schedule_work(&priv->rx_replenish);
5196
5197 /* If we've added more space for the firmware to place data, tell it */
5198 if (write != rxq->write)
5199 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5200 }
5201
5202 /*
5203 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5204 * Also restock the Rx queue via ipw_rx_queue_restock.
5205 *
5206 * This is called as a scheduled work item (except for during intialization)
5207 */
ipw_rx_queue_replenish(void * data)5208 static void ipw_rx_queue_replenish(void *data)
5209 {
5210 struct ipw_priv *priv = data;
5211 struct ipw_rx_queue *rxq = priv->rxq;
5212 struct list_head *element;
5213 struct ipw_rx_mem_buffer *rxb;
5214 unsigned long flags;
5215
5216 spin_lock_irqsave(&rxq->lock, flags);
5217 while (!list_empty(&rxq->rx_used)) {
5218 element = rxq->rx_used.next;
5219 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5220 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5221 if (!rxb->skb) {
5222 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5223 priv->net_dev->name);
5224 /* We don't reschedule replenish work here -- we will
5225 * call the restock method and if it still needs
5226 * more buffers it will schedule replenish */
5227 break;
5228 }
5229 list_del(element);
5230
5231 rxb->dma_addr =
5232 pci_map_single(priv->pci_dev, rxb->skb->data,
5233 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5234
5235 list_add_tail(&rxb->list, &rxq->rx_free);
5236 rxq->free_count++;
5237 }
5238 spin_unlock_irqrestore(&rxq->lock, flags);
5239
5240 ipw_rx_queue_restock(priv);
5241 }
5242
ipw_bg_rx_queue_replenish(struct work_struct * work)5243 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5244 {
5245 struct ipw_priv *priv =
5246 container_of(work, struct ipw_priv, rx_replenish);
5247 mutex_lock(&priv->mutex);
5248 ipw_rx_queue_replenish(priv);
5249 mutex_unlock(&priv->mutex);
5250 }
5251
5252 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5253 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5254 * This free routine walks the list of POOL entries and if SKB is set to
5255 * non NULL it is unmapped and freed
5256 */
ipw_rx_queue_free(struct ipw_priv * priv,struct ipw_rx_queue * rxq)5257 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5258 {
5259 int i;
5260
5261 if (!rxq)
5262 return;
5263
5264 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5265 if (rxq->pool[i].skb != NULL) {
5266 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5267 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5268 dev_kfree_skb(rxq->pool[i].skb);
5269 }
5270 }
5271
5272 kfree(rxq);
5273 }
5274
ipw_rx_queue_alloc(struct ipw_priv * priv)5275 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5276 {
5277 struct ipw_rx_queue *rxq;
5278 int i;
5279
5280 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5281 if (unlikely(!rxq)) {
5282 IPW_ERROR("memory allocation failed\n");
5283 return NULL;
5284 }
5285 spin_lock_init(&rxq->lock);
5286 INIT_LIST_HEAD(&rxq->rx_free);
5287 INIT_LIST_HEAD(&rxq->rx_used);
5288
5289 /* Fill the rx_used queue with _all_ of the Rx buffers */
5290 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5291 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5292
5293 /* Set us so that we have processed and used all buffers, but have
5294 * not restocked the Rx queue with fresh buffers */
5295 rxq->read = rxq->write = 0;
5296 rxq->free_count = 0;
5297
5298 return rxq;
5299 }
5300
ipw_is_rate_in_mask(struct ipw_priv * priv,int ieee_mode,u8 rate)5301 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5302 {
5303 rate &= ~LIBIPW_BASIC_RATE_MASK;
5304 if (ieee_mode == IEEE_A) {
5305 switch (rate) {
5306 case LIBIPW_OFDM_RATE_6MB:
5307 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5308 1 : 0;
5309 case LIBIPW_OFDM_RATE_9MB:
5310 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5311 1 : 0;
5312 case LIBIPW_OFDM_RATE_12MB:
5313 return priv->
5314 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5315 case LIBIPW_OFDM_RATE_18MB:
5316 return priv->
5317 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5318 case LIBIPW_OFDM_RATE_24MB:
5319 return priv->
5320 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5321 case LIBIPW_OFDM_RATE_36MB:
5322 return priv->
5323 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5324 case LIBIPW_OFDM_RATE_48MB:
5325 return priv->
5326 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5327 case LIBIPW_OFDM_RATE_54MB:
5328 return priv->
5329 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5330 default:
5331 return 0;
5332 }
5333 }
5334
5335 /* B and G mixed */
5336 switch (rate) {
5337 case LIBIPW_CCK_RATE_1MB:
5338 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5339 case LIBIPW_CCK_RATE_2MB:
5340 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5341 case LIBIPW_CCK_RATE_5MB:
5342 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5343 case LIBIPW_CCK_RATE_11MB:
5344 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5345 }
5346
5347 /* If we are limited to B modulations, bail at this point */
5348 if (ieee_mode == IEEE_B)
5349 return 0;
5350
5351 /* G */
5352 switch (rate) {
5353 case LIBIPW_OFDM_RATE_6MB:
5354 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5355 case LIBIPW_OFDM_RATE_9MB:
5356 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5357 case LIBIPW_OFDM_RATE_12MB:
5358 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5359 case LIBIPW_OFDM_RATE_18MB:
5360 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5361 case LIBIPW_OFDM_RATE_24MB:
5362 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5363 case LIBIPW_OFDM_RATE_36MB:
5364 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5365 case LIBIPW_OFDM_RATE_48MB:
5366 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5367 case LIBIPW_OFDM_RATE_54MB:
5368 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5369 }
5370
5371 return 0;
5372 }
5373
ipw_compatible_rates(struct ipw_priv * priv,const struct libipw_network * network,struct ipw_supported_rates * rates)5374 static int ipw_compatible_rates(struct ipw_priv *priv,
5375 const struct libipw_network *network,
5376 struct ipw_supported_rates *rates)
5377 {
5378 int num_rates, i;
5379
5380 memset(rates, 0, sizeof(*rates));
5381 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5382 rates->num_rates = 0;
5383 for (i = 0; i < num_rates; i++) {
5384 if (!ipw_is_rate_in_mask(priv, network->mode,
5385 network->rates[i])) {
5386
5387 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5388 IPW_DEBUG_SCAN("Adding masked mandatory "
5389 "rate %02X\n",
5390 network->rates[i]);
5391 rates->supported_rates[rates->num_rates++] =
5392 network->rates[i];
5393 continue;
5394 }
5395
5396 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5397 network->rates[i], priv->rates_mask);
5398 continue;
5399 }
5400
5401 rates->supported_rates[rates->num_rates++] = network->rates[i];
5402 }
5403
5404 num_rates = min(network->rates_ex_len,
5405 (u8) (IPW_MAX_RATES - num_rates));
5406 for (i = 0; i < num_rates; i++) {
5407 if (!ipw_is_rate_in_mask(priv, network->mode,
5408 network->rates_ex[i])) {
5409 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5410 IPW_DEBUG_SCAN("Adding masked mandatory "
5411 "rate %02X\n",
5412 network->rates_ex[i]);
5413 rates->supported_rates[rates->num_rates++] =
5414 network->rates[i];
5415 continue;
5416 }
5417
5418 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5419 network->rates_ex[i], priv->rates_mask);
5420 continue;
5421 }
5422
5423 rates->supported_rates[rates->num_rates++] =
5424 network->rates_ex[i];
5425 }
5426
5427 return 1;
5428 }
5429
ipw_copy_rates(struct ipw_supported_rates * dest,const struct ipw_supported_rates * src)5430 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5431 const struct ipw_supported_rates *src)
5432 {
5433 u8 i;
5434 for (i = 0; i < src->num_rates; i++)
5435 dest->supported_rates[i] = src->supported_rates[i];
5436 dest->num_rates = src->num_rates;
5437 }
5438
5439 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5440 * mask should ever be used -- right now all callers to add the scan rates are
5441 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
ipw_add_cck_scan_rates(struct ipw_supported_rates * rates,u8 modulation,u32 rate_mask)5442 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5443 u8 modulation, u32 rate_mask)
5444 {
5445 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5446 LIBIPW_BASIC_RATE_MASK : 0;
5447
5448 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5449 rates->supported_rates[rates->num_rates++] =
5450 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5451
5452 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5453 rates->supported_rates[rates->num_rates++] =
5454 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5455
5456 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5457 rates->supported_rates[rates->num_rates++] = basic_mask |
5458 LIBIPW_CCK_RATE_5MB;
5459
5460 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5461 rates->supported_rates[rates->num_rates++] = basic_mask |
5462 LIBIPW_CCK_RATE_11MB;
5463 }
5464
ipw_add_ofdm_scan_rates(struct ipw_supported_rates * rates,u8 modulation,u32 rate_mask)5465 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5466 u8 modulation, u32 rate_mask)
5467 {
5468 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5469 LIBIPW_BASIC_RATE_MASK : 0;
5470
5471 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5472 rates->supported_rates[rates->num_rates++] = basic_mask |
5473 LIBIPW_OFDM_RATE_6MB;
5474
5475 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5476 rates->supported_rates[rates->num_rates++] =
5477 LIBIPW_OFDM_RATE_9MB;
5478
5479 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5480 rates->supported_rates[rates->num_rates++] = basic_mask |
5481 LIBIPW_OFDM_RATE_12MB;
5482
5483 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5484 rates->supported_rates[rates->num_rates++] =
5485 LIBIPW_OFDM_RATE_18MB;
5486
5487 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5488 rates->supported_rates[rates->num_rates++] = basic_mask |
5489 LIBIPW_OFDM_RATE_24MB;
5490
5491 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5492 rates->supported_rates[rates->num_rates++] =
5493 LIBIPW_OFDM_RATE_36MB;
5494
5495 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5496 rates->supported_rates[rates->num_rates++] =
5497 LIBIPW_OFDM_RATE_48MB;
5498
5499 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5500 rates->supported_rates[rates->num_rates++] =
5501 LIBIPW_OFDM_RATE_54MB;
5502 }
5503
5504 struct ipw_network_match {
5505 struct libipw_network *network;
5506 struct ipw_supported_rates rates;
5507 };
5508
ipw_find_adhoc_network(struct ipw_priv * priv,struct ipw_network_match * match,struct libipw_network * network,int roaming)5509 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5510 struct ipw_network_match *match,
5511 struct libipw_network *network,
5512 int roaming)
5513 {
5514 struct ipw_supported_rates rates;
5515 DECLARE_SSID_BUF(ssid);
5516
5517 /* Verify that this network's capability is compatible with the
5518 * current mode (AdHoc or Infrastructure) */
5519 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5520 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5521 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5522 "capability mismatch.\n",
5523 print_ssid(ssid, network->ssid,
5524 network->ssid_len),
5525 network->bssid);
5526 return 0;
5527 }
5528
5529 if (unlikely(roaming)) {
5530 /* If we are roaming, then ensure check if this is a valid
5531 * network to try and roam to */
5532 if ((network->ssid_len != match->network->ssid_len) ||
5533 memcmp(network->ssid, match->network->ssid,
5534 network->ssid_len)) {
5535 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5536 "because of non-network ESSID.\n",
5537 print_ssid(ssid, network->ssid,
5538 network->ssid_len),
5539 network->bssid);
5540 return 0;
5541 }
5542 } else {
5543 /* If an ESSID has been configured then compare the broadcast
5544 * ESSID to ours */
5545 if ((priv->config & CFG_STATIC_ESSID) &&
5546 ((network->ssid_len != priv->essid_len) ||
5547 memcmp(network->ssid, priv->essid,
5548 min(network->ssid_len, priv->essid_len)))) {
5549 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5550
5551 strncpy(escaped,
5552 print_ssid(ssid, network->ssid,
5553 network->ssid_len),
5554 sizeof(escaped));
5555 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5556 "because of ESSID mismatch: '%s'.\n",
5557 escaped, network->bssid,
5558 print_ssid(ssid, priv->essid,
5559 priv->essid_len));
5560 return 0;
5561 }
5562 }
5563
5564 /* If the old network rate is better than this one, don't bother
5565 * testing everything else. */
5566
5567 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5568 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5569 "current network.\n",
5570 print_ssid(ssid, match->network->ssid,
5571 match->network->ssid_len));
5572 return 0;
5573 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5574 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5575 "current network.\n",
5576 print_ssid(ssid, match->network->ssid,
5577 match->network->ssid_len));
5578 return 0;
5579 }
5580
5581 /* Now go through and see if the requested network is valid... */
5582 if (priv->ieee->scan_age != 0 &&
5583 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5584 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5585 "because of age: %ums.\n",
5586 print_ssid(ssid, network->ssid,
5587 network->ssid_len),
5588 network->bssid,
5589 jiffies_to_msecs(jiffies -
5590 network->last_scanned));
5591 return 0;
5592 }
5593
5594 if ((priv->config & CFG_STATIC_CHANNEL) &&
5595 (network->channel != priv->channel)) {
5596 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5597 "because of channel mismatch: %d != %d.\n",
5598 print_ssid(ssid, network->ssid,
5599 network->ssid_len),
5600 network->bssid,
5601 network->channel, priv->channel);
5602 return 0;
5603 }
5604
5605 /* Verify privacy compatibility */
5606 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5607 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5608 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5609 "because of privacy mismatch: %s != %s.\n",
5610 print_ssid(ssid, network->ssid,
5611 network->ssid_len),
5612 network->bssid,
5613 priv->
5614 capability & CAP_PRIVACY_ON ? "on" : "off",
5615 network->
5616 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5617 "off");
5618 return 0;
5619 }
5620
5621 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5622 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5623 "because of the same BSSID match: %pM"
5624 ".\n", print_ssid(ssid, network->ssid,
5625 network->ssid_len),
5626 network->bssid,
5627 priv->bssid);
5628 return 0;
5629 }
5630
5631 /* Filter out any incompatible freq / mode combinations */
5632 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5633 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5634 "because of invalid frequency/mode "
5635 "combination.\n",
5636 print_ssid(ssid, network->ssid,
5637 network->ssid_len),
5638 network->bssid);
5639 return 0;
5640 }
5641
5642 /* Ensure that the rates supported by the driver are compatible with
5643 * this AP, including verification of basic rates (mandatory) */
5644 if (!ipw_compatible_rates(priv, network, &rates)) {
5645 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5646 "because configured rate mask excludes "
5647 "AP mandatory rate.\n",
5648 print_ssid(ssid, network->ssid,
5649 network->ssid_len),
5650 network->bssid);
5651 return 0;
5652 }
5653
5654 if (rates.num_rates == 0) {
5655 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5656 "because of no compatible rates.\n",
5657 print_ssid(ssid, network->ssid,
5658 network->ssid_len),
5659 network->bssid);
5660 return 0;
5661 }
5662
5663 /* TODO: Perform any further minimal comparititive tests. We do not
5664 * want to put too much policy logic here; intelligent scan selection
5665 * should occur within a generic IEEE 802.11 user space tool. */
5666
5667 /* Set up 'new' AP to this network */
5668 ipw_copy_rates(&match->rates, &rates);
5669 match->network = network;
5670 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5671 print_ssid(ssid, network->ssid, network->ssid_len),
5672 network->bssid);
5673
5674 return 1;
5675 }
5676
ipw_merge_adhoc_network(struct work_struct * work)5677 static void ipw_merge_adhoc_network(struct work_struct *work)
5678 {
5679 DECLARE_SSID_BUF(ssid);
5680 struct ipw_priv *priv =
5681 container_of(work, struct ipw_priv, merge_networks);
5682 struct libipw_network *network = NULL;
5683 struct ipw_network_match match = {
5684 .network = priv->assoc_network
5685 };
5686
5687 if ((priv->status & STATUS_ASSOCIATED) &&
5688 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5689 /* First pass through ROAM process -- look for a better
5690 * network */
5691 unsigned long flags;
5692
5693 spin_lock_irqsave(&priv->ieee->lock, flags);
5694 list_for_each_entry(network, &priv->ieee->network_list, list) {
5695 if (network != priv->assoc_network)
5696 ipw_find_adhoc_network(priv, &match, network,
5697 1);
5698 }
5699 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5700
5701 if (match.network == priv->assoc_network) {
5702 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5703 "merge to.\n");
5704 return;
5705 }
5706
5707 mutex_lock(&priv->mutex);
5708 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5709 IPW_DEBUG_MERGE("remove network %s\n",
5710 print_ssid(ssid, priv->essid,
5711 priv->essid_len));
5712 ipw_remove_current_network(priv);
5713 }
5714
5715 ipw_disassociate(priv);
5716 priv->assoc_network = match.network;
5717 mutex_unlock(&priv->mutex);
5718 return;
5719 }
5720 }
5721
ipw_best_network(struct ipw_priv * priv,struct ipw_network_match * match,struct libipw_network * network,int roaming)5722 static int ipw_best_network(struct ipw_priv *priv,
5723 struct ipw_network_match *match,
5724 struct libipw_network *network, int roaming)
5725 {
5726 struct ipw_supported_rates rates;
5727 DECLARE_SSID_BUF(ssid);
5728
5729 /* Verify that this network's capability is compatible with the
5730 * current mode (AdHoc or Infrastructure) */
5731 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5732 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5733 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5734 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5735 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5736 "capability mismatch.\n",
5737 print_ssid(ssid, network->ssid,
5738 network->ssid_len),
5739 network->bssid);
5740 return 0;
5741 }
5742
5743 if (unlikely(roaming)) {
5744 /* If we are roaming, then ensure check if this is a valid
5745 * network to try and roam to */
5746 if ((network->ssid_len != match->network->ssid_len) ||
5747 memcmp(network->ssid, match->network->ssid,
5748 network->ssid_len)) {
5749 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5750 "because of non-network ESSID.\n",
5751 print_ssid(ssid, network->ssid,
5752 network->ssid_len),
5753 network->bssid);
5754 return 0;
5755 }
5756 } else {
5757 /* If an ESSID has been configured then compare the broadcast
5758 * ESSID to ours */
5759 if ((priv->config & CFG_STATIC_ESSID) &&
5760 ((network->ssid_len != priv->essid_len) ||
5761 memcmp(network->ssid, priv->essid,
5762 min(network->ssid_len, priv->essid_len)))) {
5763 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5764 strncpy(escaped,
5765 print_ssid(ssid, network->ssid,
5766 network->ssid_len),
5767 sizeof(escaped));
5768 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5769 "because of ESSID mismatch: '%s'.\n",
5770 escaped, network->bssid,
5771 print_ssid(ssid, priv->essid,
5772 priv->essid_len));
5773 return 0;
5774 }
5775 }
5776
5777 /* If the old network rate is better than this one, don't bother
5778 * testing everything else. */
5779 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5780 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5781 strncpy(escaped,
5782 print_ssid(ssid, network->ssid, network->ssid_len),
5783 sizeof(escaped));
5784 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5785 "'%s (%pM)' has a stronger signal.\n",
5786 escaped, network->bssid,
5787 print_ssid(ssid, match->network->ssid,
5788 match->network->ssid_len),
5789 match->network->bssid);
5790 return 0;
5791 }
5792
5793 /* If this network has already had an association attempt within the
5794 * last 3 seconds, do not try and associate again... */
5795 if (network->last_associate &&
5796 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5797 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5798 "because of storming (%ums since last "
5799 "assoc attempt).\n",
5800 print_ssid(ssid, network->ssid,
5801 network->ssid_len),
5802 network->bssid,
5803 jiffies_to_msecs(jiffies -
5804 network->last_associate));
5805 return 0;
5806 }
5807
5808 /* Now go through and see if the requested network is valid... */
5809 if (priv->ieee->scan_age != 0 &&
5810 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5811 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5812 "because of age: %ums.\n",
5813 print_ssid(ssid, network->ssid,
5814 network->ssid_len),
5815 network->bssid,
5816 jiffies_to_msecs(jiffies -
5817 network->last_scanned));
5818 return 0;
5819 }
5820
5821 if ((priv->config & CFG_STATIC_CHANNEL) &&
5822 (network->channel != priv->channel)) {
5823 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5824 "because of channel mismatch: %d != %d.\n",
5825 print_ssid(ssid, network->ssid,
5826 network->ssid_len),
5827 network->bssid,
5828 network->channel, priv->channel);
5829 return 0;
5830 }
5831
5832 /* Verify privacy compatibility */
5833 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5834 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5835 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5836 "because of privacy mismatch: %s != %s.\n",
5837 print_ssid(ssid, network->ssid,
5838 network->ssid_len),
5839 network->bssid,
5840 priv->capability & CAP_PRIVACY_ON ? "on" :
5841 "off",
5842 network->capability &
5843 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5844 return 0;
5845 }
5846
5847 if ((priv->config & CFG_STATIC_BSSID) &&
5848 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5849 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5850 "because of BSSID mismatch: %pM.\n",
5851 print_ssid(ssid, network->ssid,
5852 network->ssid_len),
5853 network->bssid, priv->bssid);
5854 return 0;
5855 }
5856
5857 /* Filter out any incompatible freq / mode combinations */
5858 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5859 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5860 "because of invalid frequency/mode "
5861 "combination.\n",
5862 print_ssid(ssid, network->ssid,
5863 network->ssid_len),
5864 network->bssid);
5865 return 0;
5866 }
5867
5868 /* Filter out invalid channel in current GEO */
5869 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5870 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5871 "because of invalid channel in current GEO\n",
5872 print_ssid(ssid, network->ssid,
5873 network->ssid_len),
5874 network->bssid);
5875 return 0;
5876 }
5877
5878 /* Ensure that the rates supported by the driver are compatible with
5879 * this AP, including verification of basic rates (mandatory) */
5880 if (!ipw_compatible_rates(priv, network, &rates)) {
5881 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5882 "because configured rate mask excludes "
5883 "AP mandatory rate.\n",
5884 print_ssid(ssid, network->ssid,
5885 network->ssid_len),
5886 network->bssid);
5887 return 0;
5888 }
5889
5890 if (rates.num_rates == 0) {
5891 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5892 "because of no compatible rates.\n",
5893 print_ssid(ssid, network->ssid,
5894 network->ssid_len),
5895 network->bssid);
5896 return 0;
5897 }
5898
5899 /* TODO: Perform any further minimal comparititive tests. We do not
5900 * want to put too much policy logic here; intelligent scan selection
5901 * should occur within a generic IEEE 802.11 user space tool. */
5902
5903 /* Set up 'new' AP to this network */
5904 ipw_copy_rates(&match->rates, &rates);
5905 match->network = network;
5906
5907 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5908 print_ssid(ssid, network->ssid, network->ssid_len),
5909 network->bssid);
5910
5911 return 1;
5912 }
5913
ipw_adhoc_create(struct ipw_priv * priv,struct libipw_network * network)5914 static void ipw_adhoc_create(struct ipw_priv *priv,
5915 struct libipw_network *network)
5916 {
5917 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5918 int i;
5919
5920 /*
5921 * For the purposes of scanning, we can set our wireless mode
5922 * to trigger scans across combinations of bands, but when it
5923 * comes to creating a new ad-hoc network, we have tell the FW
5924 * exactly which band to use.
5925 *
5926 * We also have the possibility of an invalid channel for the
5927 * chossen band. Attempting to create a new ad-hoc network
5928 * with an invalid channel for wireless mode will trigger a
5929 * FW fatal error.
5930 *
5931 */
5932 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5933 case LIBIPW_52GHZ_BAND:
5934 network->mode = IEEE_A;
5935 i = libipw_channel_to_index(priv->ieee, priv->channel);
5936 BUG_ON(i == -1);
5937 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5938 IPW_WARNING("Overriding invalid channel\n");
5939 priv->channel = geo->a[0].channel;
5940 }
5941 break;
5942
5943 case LIBIPW_24GHZ_BAND:
5944 if (priv->ieee->mode & IEEE_G)
5945 network->mode = IEEE_G;
5946 else
5947 network->mode = IEEE_B;
5948 i = libipw_channel_to_index(priv->ieee, priv->channel);
5949 BUG_ON(i == -1);
5950 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5951 IPW_WARNING("Overriding invalid channel\n");
5952 priv->channel = geo->bg[0].channel;
5953 }
5954 break;
5955
5956 default:
5957 IPW_WARNING("Overriding invalid channel\n");
5958 if (priv->ieee->mode & IEEE_A) {
5959 network->mode = IEEE_A;
5960 priv->channel = geo->a[0].channel;
5961 } else if (priv->ieee->mode & IEEE_G) {
5962 network->mode = IEEE_G;
5963 priv->channel = geo->bg[0].channel;
5964 } else {
5965 network->mode = IEEE_B;
5966 priv->channel = geo->bg[0].channel;
5967 }
5968 break;
5969 }
5970
5971 network->channel = priv->channel;
5972 priv->config |= CFG_ADHOC_PERSIST;
5973 ipw_create_bssid(priv, network->bssid);
5974 network->ssid_len = priv->essid_len;
5975 memcpy(network->ssid, priv->essid, priv->essid_len);
5976 memset(&network->stats, 0, sizeof(network->stats));
5977 network->capability = WLAN_CAPABILITY_IBSS;
5978 if (!(priv->config & CFG_PREAMBLE_LONG))
5979 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5980 if (priv->capability & CAP_PRIVACY_ON)
5981 network->capability |= WLAN_CAPABILITY_PRIVACY;
5982 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5983 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5984 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5985 memcpy(network->rates_ex,
5986 &priv->rates.supported_rates[network->rates_len],
5987 network->rates_ex_len);
5988 network->last_scanned = 0;
5989 network->flags = 0;
5990 network->last_associate = 0;
5991 network->time_stamp[0] = 0;
5992 network->time_stamp[1] = 0;
5993 network->beacon_interval = 100; /* Default */
5994 network->listen_interval = 10; /* Default */
5995 network->atim_window = 0; /* Default */
5996 network->wpa_ie_len = 0;
5997 network->rsn_ie_len = 0;
5998 }
5999
ipw_send_tgi_tx_key(struct ipw_priv * priv,int type,int index)6000 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
6001 {
6002 struct ipw_tgi_tx_key key;
6003
6004 if (!(priv->ieee->sec.flags & (1 << index)))
6005 return;
6006
6007 key.key_id = index;
6008 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
6009 key.security_type = type;
6010 key.station_index = 0; /* always 0 for BSS */
6011 key.flags = 0;
6012 /* 0 for new key; previous value of counter (after fatal error) */
6013 key.tx_counter[0] = cpu_to_le32(0);
6014 key.tx_counter[1] = cpu_to_le32(0);
6015
6016 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6017 }
6018
ipw_send_wep_keys(struct ipw_priv * priv,int type)6019 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6020 {
6021 struct ipw_wep_key key;
6022 int i;
6023
6024 key.cmd_id = DINO_CMD_WEP_KEY;
6025 key.seq_num = 0;
6026
6027 /* Note: AES keys cannot be set for multiple times.
6028 * Only set it at the first time. */
6029 for (i = 0; i < 4; i++) {
6030 key.key_index = i | type;
6031 if (!(priv->ieee->sec.flags & (1 << i))) {
6032 key.key_size = 0;
6033 continue;
6034 }
6035
6036 key.key_size = priv->ieee->sec.key_sizes[i];
6037 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6038
6039 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6040 }
6041 }
6042
ipw_set_hw_decrypt_unicast(struct ipw_priv * priv,int level)6043 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6044 {
6045 if (priv->ieee->host_encrypt)
6046 return;
6047
6048 switch (level) {
6049 case SEC_LEVEL_3:
6050 priv->sys_config.disable_unicast_decryption = 0;
6051 priv->ieee->host_decrypt = 0;
6052 break;
6053 case SEC_LEVEL_2:
6054 priv->sys_config.disable_unicast_decryption = 1;
6055 priv->ieee->host_decrypt = 1;
6056 break;
6057 case SEC_LEVEL_1:
6058 priv->sys_config.disable_unicast_decryption = 0;
6059 priv->ieee->host_decrypt = 0;
6060 break;
6061 case SEC_LEVEL_0:
6062 priv->sys_config.disable_unicast_decryption = 1;
6063 break;
6064 default:
6065 break;
6066 }
6067 }
6068
ipw_set_hw_decrypt_multicast(struct ipw_priv * priv,int level)6069 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6070 {
6071 if (priv->ieee->host_encrypt)
6072 return;
6073
6074 switch (level) {
6075 case SEC_LEVEL_3:
6076 priv->sys_config.disable_multicast_decryption = 0;
6077 break;
6078 case SEC_LEVEL_2:
6079 priv->sys_config.disable_multicast_decryption = 1;
6080 break;
6081 case SEC_LEVEL_1:
6082 priv->sys_config.disable_multicast_decryption = 0;
6083 break;
6084 case SEC_LEVEL_0:
6085 priv->sys_config.disable_multicast_decryption = 1;
6086 break;
6087 default:
6088 break;
6089 }
6090 }
6091
ipw_set_hwcrypto_keys(struct ipw_priv * priv)6092 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6093 {
6094 switch (priv->ieee->sec.level) {
6095 case SEC_LEVEL_3:
6096 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6097 ipw_send_tgi_tx_key(priv,
6098 DCT_FLAG_EXT_SECURITY_CCM,
6099 priv->ieee->sec.active_key);
6100
6101 if (!priv->ieee->host_mc_decrypt)
6102 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6103 break;
6104 case SEC_LEVEL_2:
6105 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6106 ipw_send_tgi_tx_key(priv,
6107 DCT_FLAG_EXT_SECURITY_TKIP,
6108 priv->ieee->sec.active_key);
6109 break;
6110 case SEC_LEVEL_1:
6111 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6112 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6113 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6114 break;
6115 case SEC_LEVEL_0:
6116 default:
6117 break;
6118 }
6119 }
6120
ipw_adhoc_check(void * data)6121 static void ipw_adhoc_check(void *data)
6122 {
6123 struct ipw_priv *priv = data;
6124
6125 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6126 !(priv->config & CFG_ADHOC_PERSIST)) {
6127 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6128 IPW_DL_STATE | IPW_DL_ASSOC,
6129 "Missed beacon: %d - disassociate\n",
6130 priv->missed_adhoc_beacons);
6131 ipw_remove_current_network(priv);
6132 ipw_disassociate(priv);
6133 return;
6134 }
6135
6136 schedule_delayed_work(&priv->adhoc_check,
6137 le16_to_cpu(priv->assoc_request.beacon_interval));
6138 }
6139
ipw_bg_adhoc_check(struct work_struct * work)6140 static void ipw_bg_adhoc_check(struct work_struct *work)
6141 {
6142 struct ipw_priv *priv =
6143 container_of(work, struct ipw_priv, adhoc_check.work);
6144 mutex_lock(&priv->mutex);
6145 ipw_adhoc_check(priv);
6146 mutex_unlock(&priv->mutex);
6147 }
6148
ipw_debug_config(struct ipw_priv * priv)6149 static void ipw_debug_config(struct ipw_priv *priv)
6150 {
6151 DECLARE_SSID_BUF(ssid);
6152 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6153 "[CFG 0x%08X]\n", priv->config);
6154 if (priv->config & CFG_STATIC_CHANNEL)
6155 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6156 else
6157 IPW_DEBUG_INFO("Channel unlocked.\n");
6158 if (priv->config & CFG_STATIC_ESSID)
6159 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6160 print_ssid(ssid, priv->essid, priv->essid_len));
6161 else
6162 IPW_DEBUG_INFO("ESSID unlocked.\n");
6163 if (priv->config & CFG_STATIC_BSSID)
6164 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6165 else
6166 IPW_DEBUG_INFO("BSSID unlocked.\n");
6167 if (priv->capability & CAP_PRIVACY_ON)
6168 IPW_DEBUG_INFO("PRIVACY on\n");
6169 else
6170 IPW_DEBUG_INFO("PRIVACY off\n");
6171 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6172 }
6173
ipw_set_fixed_rate(struct ipw_priv * priv,int mode)6174 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6175 {
6176 /* TODO: Verify that this works... */
6177 struct ipw_fixed_rate fr;
6178 u32 reg;
6179 u16 mask = 0;
6180 u16 new_tx_rates = priv->rates_mask;
6181
6182 /* Identify 'current FW band' and match it with the fixed
6183 * Tx rates */
6184
6185 switch (priv->ieee->freq_band) {
6186 case LIBIPW_52GHZ_BAND: /* A only */
6187 /* IEEE_A */
6188 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6189 /* Invalid fixed rate mask */
6190 IPW_DEBUG_WX
6191 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6192 new_tx_rates = 0;
6193 break;
6194 }
6195
6196 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6197 break;
6198
6199 default: /* 2.4Ghz or Mixed */
6200 /* IEEE_B */
6201 if (mode == IEEE_B) {
6202 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6203 /* Invalid fixed rate mask */
6204 IPW_DEBUG_WX
6205 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6206 new_tx_rates = 0;
6207 }
6208 break;
6209 }
6210
6211 /* IEEE_G */
6212 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6213 LIBIPW_OFDM_RATES_MASK)) {
6214 /* Invalid fixed rate mask */
6215 IPW_DEBUG_WX
6216 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6217 new_tx_rates = 0;
6218 break;
6219 }
6220
6221 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6222 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6223 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6224 }
6225
6226 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6227 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6228 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6229 }
6230
6231 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6232 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6233 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6234 }
6235
6236 new_tx_rates |= mask;
6237 break;
6238 }
6239
6240 fr.tx_rates = cpu_to_le16(new_tx_rates);
6241
6242 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6243 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6244 }
6245
ipw_abort_scan(struct ipw_priv * priv)6246 static void ipw_abort_scan(struct ipw_priv *priv)
6247 {
6248 int err;
6249
6250 if (priv->status & STATUS_SCAN_ABORTING) {
6251 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6252 return;
6253 }
6254 priv->status |= STATUS_SCAN_ABORTING;
6255
6256 err = ipw_send_scan_abort(priv);
6257 if (err)
6258 IPW_DEBUG_HC("Request to abort scan failed.\n");
6259 }
6260
ipw_add_scan_channels(struct ipw_priv * priv,struct ipw_scan_request_ext * scan,int scan_type)6261 static void ipw_add_scan_channels(struct ipw_priv *priv,
6262 struct ipw_scan_request_ext *scan,
6263 int scan_type)
6264 {
6265 int channel_index = 0;
6266 const struct libipw_geo *geo;
6267 int i;
6268
6269 geo = libipw_get_geo(priv->ieee);
6270
6271 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6272 int start = channel_index;
6273 for (i = 0; i < geo->a_channels; i++) {
6274 if ((priv->status & STATUS_ASSOCIATED) &&
6275 geo->a[i].channel == priv->channel)
6276 continue;
6277 channel_index++;
6278 scan->channels_list[channel_index] = geo->a[i].channel;
6279 ipw_set_scan_type(scan, channel_index,
6280 geo->a[i].
6281 flags & LIBIPW_CH_PASSIVE_ONLY ?
6282 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6283 scan_type);
6284 }
6285
6286 if (start != channel_index) {
6287 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6288 (channel_index - start);
6289 channel_index++;
6290 }
6291 }
6292
6293 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6294 int start = channel_index;
6295 if (priv->config & CFG_SPEED_SCAN) {
6296 int index;
6297 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6298 /* nop out the list */
6299 [0] = 0
6300 };
6301
6302 u8 channel;
6303 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6304 channel =
6305 priv->speed_scan[priv->speed_scan_pos];
6306 if (channel == 0) {
6307 priv->speed_scan_pos = 0;
6308 channel = priv->speed_scan[0];
6309 }
6310 if ((priv->status & STATUS_ASSOCIATED) &&
6311 channel == priv->channel) {
6312 priv->speed_scan_pos++;
6313 continue;
6314 }
6315
6316 /* If this channel has already been
6317 * added in scan, break from loop
6318 * and this will be the first channel
6319 * in the next scan.
6320 */
6321 if (channels[channel - 1] != 0)
6322 break;
6323
6324 channels[channel - 1] = 1;
6325 priv->speed_scan_pos++;
6326 channel_index++;
6327 scan->channels_list[channel_index] = channel;
6328 index =
6329 libipw_channel_to_index(priv->ieee, channel);
6330 ipw_set_scan_type(scan, channel_index,
6331 geo->bg[index].
6332 flags &
6333 LIBIPW_CH_PASSIVE_ONLY ?
6334 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6335 : scan_type);
6336 }
6337 } else {
6338 for (i = 0; i < geo->bg_channels; i++) {
6339 if ((priv->status & STATUS_ASSOCIATED) &&
6340 geo->bg[i].channel == priv->channel)
6341 continue;
6342 channel_index++;
6343 scan->channels_list[channel_index] =
6344 geo->bg[i].channel;
6345 ipw_set_scan_type(scan, channel_index,
6346 geo->bg[i].
6347 flags &
6348 LIBIPW_CH_PASSIVE_ONLY ?
6349 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6350 : scan_type);
6351 }
6352 }
6353
6354 if (start != channel_index) {
6355 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6356 (channel_index - start);
6357 }
6358 }
6359 }
6360
ipw_passive_dwell_time(struct ipw_priv * priv)6361 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6362 {
6363 /* staying on passive channels longer than the DTIM interval during a
6364 * scan, while associated, causes the firmware to cancel the scan
6365 * without notification. Hence, don't stay on passive channels longer
6366 * than the beacon interval.
6367 */
6368 if (priv->status & STATUS_ASSOCIATED
6369 && priv->assoc_network->beacon_interval > 10)
6370 return priv->assoc_network->beacon_interval - 10;
6371 else
6372 return 120;
6373 }
6374
ipw_request_scan_helper(struct ipw_priv * priv,int type,int direct)6375 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6376 {
6377 struct ipw_scan_request_ext scan;
6378 int err = 0, scan_type;
6379
6380 if (!(priv->status & STATUS_INIT) ||
6381 (priv->status & STATUS_EXIT_PENDING))
6382 return 0;
6383
6384 mutex_lock(&priv->mutex);
6385
6386 if (direct && (priv->direct_scan_ssid_len == 0)) {
6387 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6388 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6389 goto done;
6390 }
6391
6392 if (priv->status & STATUS_SCANNING) {
6393 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6394 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6395 STATUS_SCAN_PENDING;
6396 goto done;
6397 }
6398
6399 if (!(priv->status & STATUS_SCAN_FORCED) &&
6400 priv->status & STATUS_SCAN_ABORTING) {
6401 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6402 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6403 STATUS_SCAN_PENDING;
6404 goto done;
6405 }
6406
6407 if (priv->status & STATUS_RF_KILL_MASK) {
6408 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6409 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6410 STATUS_SCAN_PENDING;
6411 goto done;
6412 }
6413
6414 memset(&scan, 0, sizeof(scan));
6415 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6416
6417 if (type == IW_SCAN_TYPE_PASSIVE) {
6418 IPW_DEBUG_WX("use passive scanning\n");
6419 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6420 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6421 cpu_to_le16(ipw_passive_dwell_time(priv));
6422 ipw_add_scan_channels(priv, &scan, scan_type);
6423 goto send_request;
6424 }
6425
6426 /* Use active scan by default. */
6427 if (priv->config & CFG_SPEED_SCAN)
6428 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6429 cpu_to_le16(30);
6430 else
6431 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6432 cpu_to_le16(20);
6433
6434 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6435 cpu_to_le16(20);
6436
6437 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6438 cpu_to_le16(ipw_passive_dwell_time(priv));
6439 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6440
6441 #ifdef CONFIG_IPW2200_MONITOR
6442 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6443 u8 channel;
6444 u8 band = 0;
6445
6446 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6447 case LIBIPW_52GHZ_BAND:
6448 band = (u8) (IPW_A_MODE << 6) | 1;
6449 channel = priv->channel;
6450 break;
6451
6452 case LIBIPW_24GHZ_BAND:
6453 band = (u8) (IPW_B_MODE << 6) | 1;
6454 channel = priv->channel;
6455 break;
6456
6457 default:
6458 band = (u8) (IPW_B_MODE << 6) | 1;
6459 channel = 9;
6460 break;
6461 }
6462
6463 scan.channels_list[0] = band;
6464 scan.channels_list[1] = channel;
6465 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6466
6467 /* NOTE: The card will sit on this channel for this time
6468 * period. Scan aborts are timing sensitive and frequently
6469 * result in firmware restarts. As such, it is best to
6470 * set a small dwell_time here and just keep re-issuing
6471 * scans. Otherwise fast channel hopping will not actually
6472 * hop channels.
6473 *
6474 * TODO: Move SPEED SCAN support to all modes and bands */
6475 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6476 cpu_to_le16(2000);
6477 } else {
6478 #endif /* CONFIG_IPW2200_MONITOR */
6479 /* Honor direct scans first, otherwise if we are roaming make
6480 * this a direct scan for the current network. Finally,
6481 * ensure that every other scan is a fast channel hop scan */
6482 if (direct) {
6483 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6484 priv->direct_scan_ssid_len);
6485 if (err) {
6486 IPW_DEBUG_HC("Attempt to send SSID command "
6487 "failed\n");
6488 goto done;
6489 }
6490
6491 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6492 } else if ((priv->status & STATUS_ROAMING)
6493 || (!(priv->status & STATUS_ASSOCIATED)
6494 && (priv->config & CFG_STATIC_ESSID)
6495 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6496 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6497 if (err) {
6498 IPW_DEBUG_HC("Attempt to send SSID command "
6499 "failed.\n");
6500 goto done;
6501 }
6502
6503 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6504 } else
6505 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6506
6507 ipw_add_scan_channels(priv, &scan, scan_type);
6508 #ifdef CONFIG_IPW2200_MONITOR
6509 }
6510 #endif
6511
6512 send_request:
6513 err = ipw_send_scan_request_ext(priv, &scan);
6514 if (err) {
6515 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6516 goto done;
6517 }
6518
6519 priv->status |= STATUS_SCANNING;
6520 if (direct) {
6521 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6522 priv->direct_scan_ssid_len = 0;
6523 } else
6524 priv->status &= ~STATUS_SCAN_PENDING;
6525
6526 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6527 done:
6528 mutex_unlock(&priv->mutex);
6529 return err;
6530 }
6531
ipw_request_passive_scan(struct work_struct * work)6532 static void ipw_request_passive_scan(struct work_struct *work)
6533 {
6534 struct ipw_priv *priv =
6535 container_of(work, struct ipw_priv, request_passive_scan.work);
6536 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6537 }
6538
ipw_request_scan(struct work_struct * work)6539 static void ipw_request_scan(struct work_struct *work)
6540 {
6541 struct ipw_priv *priv =
6542 container_of(work, struct ipw_priv, request_scan.work);
6543 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6544 }
6545
ipw_request_direct_scan(struct work_struct * work)6546 static void ipw_request_direct_scan(struct work_struct *work)
6547 {
6548 struct ipw_priv *priv =
6549 container_of(work, struct ipw_priv, request_direct_scan.work);
6550 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6551 }
6552
ipw_bg_abort_scan(struct work_struct * work)6553 static void ipw_bg_abort_scan(struct work_struct *work)
6554 {
6555 struct ipw_priv *priv =
6556 container_of(work, struct ipw_priv, abort_scan);
6557 mutex_lock(&priv->mutex);
6558 ipw_abort_scan(priv);
6559 mutex_unlock(&priv->mutex);
6560 }
6561
ipw_wpa_enable(struct ipw_priv * priv,int value)6562 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6563 {
6564 /* This is called when wpa_supplicant loads and closes the driver
6565 * interface. */
6566 priv->ieee->wpa_enabled = value;
6567 return 0;
6568 }
6569
ipw_wpa_set_auth_algs(struct ipw_priv * priv,int value)6570 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6571 {
6572 struct libipw_device *ieee = priv->ieee;
6573 struct libipw_security sec = {
6574 .flags = SEC_AUTH_MODE,
6575 };
6576 int ret = 0;
6577
6578 if (value & IW_AUTH_ALG_SHARED_KEY) {
6579 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6580 ieee->open_wep = 0;
6581 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6582 sec.auth_mode = WLAN_AUTH_OPEN;
6583 ieee->open_wep = 1;
6584 } else if (value & IW_AUTH_ALG_LEAP) {
6585 sec.auth_mode = WLAN_AUTH_LEAP;
6586 ieee->open_wep = 1;
6587 } else
6588 return -EINVAL;
6589
6590 if (ieee->set_security)
6591 ieee->set_security(ieee->dev, &sec);
6592 else
6593 ret = -EOPNOTSUPP;
6594
6595 return ret;
6596 }
6597
ipw_wpa_assoc_frame(struct ipw_priv * priv,char * wpa_ie,int wpa_ie_len)6598 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6599 int wpa_ie_len)
6600 {
6601 /* make sure WPA is enabled */
6602 ipw_wpa_enable(priv, 1);
6603 }
6604
ipw_set_rsn_capa(struct ipw_priv * priv,char * capabilities,int length)6605 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6606 char *capabilities, int length)
6607 {
6608 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6609
6610 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6611 capabilities);
6612 }
6613
6614 /*
6615 * WE-18 support
6616 */
6617
6618 /* SIOCSIWGENIE */
ipw_wx_set_genie(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6619 static int ipw_wx_set_genie(struct net_device *dev,
6620 struct iw_request_info *info,
6621 union iwreq_data *wrqu, char *extra)
6622 {
6623 struct ipw_priv *priv = libipw_priv(dev);
6624 struct libipw_device *ieee = priv->ieee;
6625 u8 *buf;
6626 int err = 0;
6627
6628 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6629 (wrqu->data.length && extra == NULL))
6630 return -EINVAL;
6631
6632 if (wrqu->data.length) {
6633 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6634 if (buf == NULL) {
6635 err = -ENOMEM;
6636 goto out;
6637 }
6638
6639 kfree(ieee->wpa_ie);
6640 ieee->wpa_ie = buf;
6641 ieee->wpa_ie_len = wrqu->data.length;
6642 } else {
6643 kfree(ieee->wpa_ie);
6644 ieee->wpa_ie = NULL;
6645 ieee->wpa_ie_len = 0;
6646 }
6647
6648 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6649 out:
6650 return err;
6651 }
6652
6653 /* SIOCGIWGENIE */
ipw_wx_get_genie(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6654 static int ipw_wx_get_genie(struct net_device *dev,
6655 struct iw_request_info *info,
6656 union iwreq_data *wrqu, char *extra)
6657 {
6658 struct ipw_priv *priv = libipw_priv(dev);
6659 struct libipw_device *ieee = priv->ieee;
6660 int err = 0;
6661
6662 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6663 wrqu->data.length = 0;
6664 goto out;
6665 }
6666
6667 if (wrqu->data.length < ieee->wpa_ie_len) {
6668 err = -E2BIG;
6669 goto out;
6670 }
6671
6672 wrqu->data.length = ieee->wpa_ie_len;
6673 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6674
6675 out:
6676 return err;
6677 }
6678
wext_cipher2level(int cipher)6679 static int wext_cipher2level(int cipher)
6680 {
6681 switch (cipher) {
6682 case IW_AUTH_CIPHER_NONE:
6683 return SEC_LEVEL_0;
6684 case IW_AUTH_CIPHER_WEP40:
6685 case IW_AUTH_CIPHER_WEP104:
6686 return SEC_LEVEL_1;
6687 case IW_AUTH_CIPHER_TKIP:
6688 return SEC_LEVEL_2;
6689 case IW_AUTH_CIPHER_CCMP:
6690 return SEC_LEVEL_3;
6691 default:
6692 return -1;
6693 }
6694 }
6695
6696 /* SIOCSIWAUTH */
ipw_wx_set_auth(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6697 static int ipw_wx_set_auth(struct net_device *dev,
6698 struct iw_request_info *info,
6699 union iwreq_data *wrqu, char *extra)
6700 {
6701 struct ipw_priv *priv = libipw_priv(dev);
6702 struct libipw_device *ieee = priv->ieee;
6703 struct iw_param *param = &wrqu->param;
6704 struct lib80211_crypt_data *crypt;
6705 unsigned long flags;
6706 int ret = 0;
6707
6708 switch (param->flags & IW_AUTH_INDEX) {
6709 case IW_AUTH_WPA_VERSION:
6710 break;
6711 case IW_AUTH_CIPHER_PAIRWISE:
6712 ipw_set_hw_decrypt_unicast(priv,
6713 wext_cipher2level(param->value));
6714 break;
6715 case IW_AUTH_CIPHER_GROUP:
6716 ipw_set_hw_decrypt_multicast(priv,
6717 wext_cipher2level(param->value));
6718 break;
6719 case IW_AUTH_KEY_MGMT:
6720 /*
6721 * ipw2200 does not use these parameters
6722 */
6723 break;
6724
6725 case IW_AUTH_TKIP_COUNTERMEASURES:
6726 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6727 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6728 break;
6729
6730 flags = crypt->ops->get_flags(crypt->priv);
6731
6732 if (param->value)
6733 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6734 else
6735 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6736
6737 crypt->ops->set_flags(flags, crypt->priv);
6738
6739 break;
6740
6741 case IW_AUTH_DROP_UNENCRYPTED:{
6742 /* HACK:
6743 *
6744 * wpa_supplicant calls set_wpa_enabled when the driver
6745 * is loaded and unloaded, regardless of if WPA is being
6746 * used. No other calls are made which can be used to
6747 * determine if encryption will be used or not prior to
6748 * association being expected. If encryption is not being
6749 * used, drop_unencrypted is set to false, else true -- we
6750 * can use this to determine if the CAP_PRIVACY_ON bit should
6751 * be set.
6752 */
6753 struct libipw_security sec = {
6754 .flags = SEC_ENABLED,
6755 .enabled = param->value,
6756 };
6757 priv->ieee->drop_unencrypted = param->value;
6758 /* We only change SEC_LEVEL for open mode. Others
6759 * are set by ipw_wpa_set_encryption.
6760 */
6761 if (!param->value) {
6762 sec.flags |= SEC_LEVEL;
6763 sec.level = SEC_LEVEL_0;
6764 } else {
6765 sec.flags |= SEC_LEVEL;
6766 sec.level = SEC_LEVEL_1;
6767 }
6768 if (priv->ieee->set_security)
6769 priv->ieee->set_security(priv->ieee->dev, &sec);
6770 break;
6771 }
6772
6773 case IW_AUTH_80211_AUTH_ALG:
6774 ret = ipw_wpa_set_auth_algs(priv, param->value);
6775 break;
6776
6777 case IW_AUTH_WPA_ENABLED:
6778 ret = ipw_wpa_enable(priv, param->value);
6779 ipw_disassociate(priv);
6780 break;
6781
6782 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6783 ieee->ieee802_1x = param->value;
6784 break;
6785
6786 case IW_AUTH_PRIVACY_INVOKED:
6787 ieee->privacy_invoked = param->value;
6788 break;
6789
6790 default:
6791 return -EOPNOTSUPP;
6792 }
6793 return ret;
6794 }
6795
6796 /* SIOCGIWAUTH */
ipw_wx_get_auth(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6797 static int ipw_wx_get_auth(struct net_device *dev,
6798 struct iw_request_info *info,
6799 union iwreq_data *wrqu, char *extra)
6800 {
6801 struct ipw_priv *priv = libipw_priv(dev);
6802 struct libipw_device *ieee = priv->ieee;
6803 struct lib80211_crypt_data *crypt;
6804 struct iw_param *param = &wrqu->param;
6805 int ret = 0;
6806
6807 switch (param->flags & IW_AUTH_INDEX) {
6808 case IW_AUTH_WPA_VERSION:
6809 case IW_AUTH_CIPHER_PAIRWISE:
6810 case IW_AUTH_CIPHER_GROUP:
6811 case IW_AUTH_KEY_MGMT:
6812 /*
6813 * wpa_supplicant will control these internally
6814 */
6815 ret = -EOPNOTSUPP;
6816 break;
6817
6818 case IW_AUTH_TKIP_COUNTERMEASURES:
6819 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6820 if (!crypt || !crypt->ops->get_flags)
6821 break;
6822
6823 param->value = (crypt->ops->get_flags(crypt->priv) &
6824 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6825
6826 break;
6827
6828 case IW_AUTH_DROP_UNENCRYPTED:
6829 param->value = ieee->drop_unencrypted;
6830 break;
6831
6832 case IW_AUTH_80211_AUTH_ALG:
6833 param->value = ieee->sec.auth_mode;
6834 break;
6835
6836 case IW_AUTH_WPA_ENABLED:
6837 param->value = ieee->wpa_enabled;
6838 break;
6839
6840 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6841 param->value = ieee->ieee802_1x;
6842 break;
6843
6844 case IW_AUTH_ROAMING_CONTROL:
6845 case IW_AUTH_PRIVACY_INVOKED:
6846 param->value = ieee->privacy_invoked;
6847 break;
6848
6849 default:
6850 return -EOPNOTSUPP;
6851 }
6852 return 0;
6853 }
6854
6855 /* SIOCSIWENCODEEXT */
ipw_wx_set_encodeext(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6856 static int ipw_wx_set_encodeext(struct net_device *dev,
6857 struct iw_request_info *info,
6858 union iwreq_data *wrqu, char *extra)
6859 {
6860 struct ipw_priv *priv = libipw_priv(dev);
6861 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6862
6863 if (hwcrypto) {
6864 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6865 /* IPW HW can't build TKIP MIC,
6866 host decryption still needed */
6867 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6868 priv->ieee->host_mc_decrypt = 1;
6869 else {
6870 priv->ieee->host_encrypt = 0;
6871 priv->ieee->host_encrypt_msdu = 1;
6872 priv->ieee->host_decrypt = 1;
6873 }
6874 } else {
6875 priv->ieee->host_encrypt = 0;
6876 priv->ieee->host_encrypt_msdu = 0;
6877 priv->ieee->host_decrypt = 0;
6878 priv->ieee->host_mc_decrypt = 0;
6879 }
6880 }
6881
6882 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6883 }
6884
6885 /* SIOCGIWENCODEEXT */
ipw_wx_get_encodeext(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6886 static int ipw_wx_get_encodeext(struct net_device *dev,
6887 struct iw_request_info *info,
6888 union iwreq_data *wrqu, char *extra)
6889 {
6890 struct ipw_priv *priv = libipw_priv(dev);
6891 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6892 }
6893
6894 /* SIOCSIWMLME */
ipw_wx_set_mlme(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6895 static int ipw_wx_set_mlme(struct net_device *dev,
6896 struct iw_request_info *info,
6897 union iwreq_data *wrqu, char *extra)
6898 {
6899 struct ipw_priv *priv = libipw_priv(dev);
6900 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6901 __le16 reason;
6902
6903 reason = cpu_to_le16(mlme->reason_code);
6904
6905 switch (mlme->cmd) {
6906 case IW_MLME_DEAUTH:
6907 /* silently ignore */
6908 break;
6909
6910 case IW_MLME_DISASSOC:
6911 ipw_disassociate(priv);
6912 break;
6913
6914 default:
6915 return -EOPNOTSUPP;
6916 }
6917 return 0;
6918 }
6919
6920 #ifdef CONFIG_IPW2200_QOS
6921
6922 /* QoS */
6923 /*
6924 * get the modulation type of the current network or
6925 * the card current mode
6926 */
ipw_qos_current_mode(struct ipw_priv * priv)6927 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6928 {
6929 u8 mode = 0;
6930
6931 if (priv->status & STATUS_ASSOCIATED) {
6932 unsigned long flags;
6933
6934 spin_lock_irqsave(&priv->ieee->lock, flags);
6935 mode = priv->assoc_network->mode;
6936 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6937 } else {
6938 mode = priv->ieee->mode;
6939 }
6940 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6941 return mode;
6942 }
6943
6944 /*
6945 * Handle management frame beacon and probe response
6946 */
ipw_qos_handle_probe_response(struct ipw_priv * priv,int active_network,struct libipw_network * network)6947 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6948 int active_network,
6949 struct libipw_network *network)
6950 {
6951 u32 size = sizeof(struct libipw_qos_parameters);
6952
6953 if (network->capability & WLAN_CAPABILITY_IBSS)
6954 network->qos_data.active = network->qos_data.supported;
6955
6956 if (network->flags & NETWORK_HAS_QOS_MASK) {
6957 if (active_network &&
6958 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6959 network->qos_data.active = network->qos_data.supported;
6960
6961 if ((network->qos_data.active == 1) && (active_network == 1) &&
6962 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6963 (network->qos_data.old_param_count !=
6964 network->qos_data.param_count)) {
6965 network->qos_data.old_param_count =
6966 network->qos_data.param_count;
6967 schedule_work(&priv->qos_activate);
6968 IPW_DEBUG_QOS("QoS parameters change call "
6969 "qos_activate\n");
6970 }
6971 } else {
6972 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6973 memcpy(&network->qos_data.parameters,
6974 &def_parameters_CCK, size);
6975 else
6976 memcpy(&network->qos_data.parameters,
6977 &def_parameters_OFDM, size);
6978
6979 if ((network->qos_data.active == 1) && (active_network == 1)) {
6980 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6981 schedule_work(&priv->qos_activate);
6982 }
6983
6984 network->qos_data.active = 0;
6985 network->qos_data.supported = 0;
6986 }
6987 if ((priv->status & STATUS_ASSOCIATED) &&
6988 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6989 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6990 if (network->capability & WLAN_CAPABILITY_IBSS)
6991 if ((network->ssid_len ==
6992 priv->assoc_network->ssid_len) &&
6993 !memcmp(network->ssid,
6994 priv->assoc_network->ssid,
6995 network->ssid_len)) {
6996 schedule_work(&priv->merge_networks);
6997 }
6998 }
6999
7000 return 0;
7001 }
7002
7003 /*
7004 * This function set up the firmware to support QoS. It sends
7005 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
7006 */
ipw_qos_activate(struct ipw_priv * priv,struct libipw_qos_data * qos_network_data)7007 static int ipw_qos_activate(struct ipw_priv *priv,
7008 struct libipw_qos_data *qos_network_data)
7009 {
7010 int err;
7011 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7012 struct libipw_qos_parameters *active_one = NULL;
7013 u32 size = sizeof(struct libipw_qos_parameters);
7014 u32 burst_duration;
7015 int i;
7016 u8 type;
7017
7018 type = ipw_qos_current_mode(priv);
7019
7020 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7021 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7022 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7023 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7024
7025 if (qos_network_data == NULL) {
7026 if (type == IEEE_B) {
7027 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7028 active_one = &def_parameters_CCK;
7029 } else
7030 active_one = &def_parameters_OFDM;
7031
7032 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7033 burst_duration = ipw_qos_get_burst_duration(priv);
7034 for (i = 0; i < QOS_QUEUE_NUM; i++)
7035 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7036 cpu_to_le16(burst_duration);
7037 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7038 if (type == IEEE_B) {
7039 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
7040 type);
7041 if (priv->qos_data.qos_enable == 0)
7042 active_one = &def_parameters_CCK;
7043 else
7044 active_one = priv->qos_data.def_qos_parm_CCK;
7045 } else {
7046 if (priv->qos_data.qos_enable == 0)
7047 active_one = &def_parameters_OFDM;
7048 else
7049 active_one = priv->qos_data.def_qos_parm_OFDM;
7050 }
7051 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7052 } else {
7053 unsigned long flags;
7054 int active;
7055
7056 spin_lock_irqsave(&priv->ieee->lock, flags);
7057 active_one = &(qos_network_data->parameters);
7058 qos_network_data->old_param_count =
7059 qos_network_data->param_count;
7060 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7061 active = qos_network_data->supported;
7062 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7063
7064 if (active == 0) {
7065 burst_duration = ipw_qos_get_burst_duration(priv);
7066 for (i = 0; i < QOS_QUEUE_NUM; i++)
7067 qos_parameters[QOS_PARAM_SET_ACTIVE].
7068 tx_op_limit[i] = cpu_to_le16(burst_duration);
7069 }
7070 }
7071
7072 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7073 err = ipw_send_qos_params_command(priv,
7074 (struct libipw_qos_parameters *)
7075 &(qos_parameters[0]));
7076 if (err)
7077 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7078
7079 return err;
7080 }
7081
7082 /*
7083 * send IPW_CMD_WME_INFO to the firmware
7084 */
ipw_qos_set_info_element(struct ipw_priv * priv)7085 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7086 {
7087 int ret = 0;
7088 struct libipw_qos_information_element qos_info;
7089
7090 if (priv == NULL)
7091 return -1;
7092
7093 qos_info.elementID = QOS_ELEMENT_ID;
7094 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7095
7096 qos_info.version = QOS_VERSION_1;
7097 qos_info.ac_info = 0;
7098
7099 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7100 qos_info.qui_type = QOS_OUI_TYPE;
7101 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7102
7103 ret = ipw_send_qos_info_command(priv, &qos_info);
7104 if (ret != 0) {
7105 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7106 }
7107 return ret;
7108 }
7109
7110 /*
7111 * Set the QoS parameter with the association request structure
7112 */
ipw_qos_association(struct ipw_priv * priv,struct libipw_network * network)7113 static int ipw_qos_association(struct ipw_priv *priv,
7114 struct libipw_network *network)
7115 {
7116 int err = 0;
7117 struct libipw_qos_data *qos_data = NULL;
7118 struct libipw_qos_data ibss_data = {
7119 .supported = 1,
7120 .active = 1,
7121 };
7122
7123 switch (priv->ieee->iw_mode) {
7124 case IW_MODE_ADHOC:
7125 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7126
7127 qos_data = &ibss_data;
7128 break;
7129
7130 case IW_MODE_INFRA:
7131 qos_data = &network->qos_data;
7132 break;
7133
7134 default:
7135 BUG();
7136 break;
7137 }
7138
7139 err = ipw_qos_activate(priv, qos_data);
7140 if (err) {
7141 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7142 return err;
7143 }
7144
7145 if (priv->qos_data.qos_enable && qos_data->supported) {
7146 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7147 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7148 return ipw_qos_set_info_element(priv);
7149 }
7150
7151 return 0;
7152 }
7153
7154 /*
7155 * handling the beaconing responses. if we get different QoS setting
7156 * off the network from the associated setting, adjust the QoS
7157 * setting
7158 */
ipw_qos_association_resp(struct ipw_priv * priv,struct libipw_network * network)7159 static int ipw_qos_association_resp(struct ipw_priv *priv,
7160 struct libipw_network *network)
7161 {
7162 int ret = 0;
7163 unsigned long flags;
7164 u32 size = sizeof(struct libipw_qos_parameters);
7165 int set_qos_param = 0;
7166
7167 if ((priv == NULL) || (network == NULL) ||
7168 (priv->assoc_network == NULL))
7169 return ret;
7170
7171 if (!(priv->status & STATUS_ASSOCIATED))
7172 return ret;
7173
7174 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7175 return ret;
7176
7177 spin_lock_irqsave(&priv->ieee->lock, flags);
7178 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7179 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7180 sizeof(struct libipw_qos_data));
7181 priv->assoc_network->qos_data.active = 1;
7182 if ((network->qos_data.old_param_count !=
7183 network->qos_data.param_count)) {
7184 set_qos_param = 1;
7185 network->qos_data.old_param_count =
7186 network->qos_data.param_count;
7187 }
7188
7189 } else {
7190 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7191 memcpy(&priv->assoc_network->qos_data.parameters,
7192 &def_parameters_CCK, size);
7193 else
7194 memcpy(&priv->assoc_network->qos_data.parameters,
7195 &def_parameters_OFDM, size);
7196 priv->assoc_network->qos_data.active = 0;
7197 priv->assoc_network->qos_data.supported = 0;
7198 set_qos_param = 1;
7199 }
7200
7201 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7202
7203 if (set_qos_param == 1)
7204 schedule_work(&priv->qos_activate);
7205
7206 return ret;
7207 }
7208
ipw_qos_get_burst_duration(struct ipw_priv * priv)7209 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7210 {
7211 u32 ret = 0;
7212
7213 if ((priv == NULL))
7214 return 0;
7215
7216 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7217 ret = priv->qos_data.burst_duration_CCK;
7218 else
7219 ret = priv->qos_data.burst_duration_OFDM;
7220
7221 return ret;
7222 }
7223
7224 /*
7225 * Initialize the setting of QoS global
7226 */
ipw_qos_init(struct ipw_priv * priv,int enable,int burst_enable,u32 burst_duration_CCK,u32 burst_duration_OFDM)7227 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7228 int burst_enable, u32 burst_duration_CCK,
7229 u32 burst_duration_OFDM)
7230 {
7231 priv->qos_data.qos_enable = enable;
7232
7233 if (priv->qos_data.qos_enable) {
7234 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7235 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7236 IPW_DEBUG_QOS("QoS is enabled\n");
7237 } else {
7238 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7239 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7240 IPW_DEBUG_QOS("QoS is not enabled\n");
7241 }
7242
7243 priv->qos_data.burst_enable = burst_enable;
7244
7245 if (burst_enable) {
7246 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7247 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7248 } else {
7249 priv->qos_data.burst_duration_CCK = 0;
7250 priv->qos_data.burst_duration_OFDM = 0;
7251 }
7252 }
7253
7254 /*
7255 * map the packet priority to the right TX Queue
7256 */
ipw_get_tx_queue_number(struct ipw_priv * priv,u16 priority)7257 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7258 {
7259 if (priority > 7 || !priv->qos_data.qos_enable)
7260 priority = 0;
7261
7262 return from_priority_to_tx_queue[priority] - 1;
7263 }
7264
ipw_is_qos_active(struct net_device * dev,struct sk_buff * skb)7265 static int ipw_is_qos_active(struct net_device *dev,
7266 struct sk_buff *skb)
7267 {
7268 struct ipw_priv *priv = libipw_priv(dev);
7269 struct libipw_qos_data *qos_data = NULL;
7270 int active, supported;
7271 u8 *daddr = skb->data + ETH_ALEN;
7272 int unicast = !is_multicast_ether_addr(daddr);
7273
7274 if (!(priv->status & STATUS_ASSOCIATED))
7275 return 0;
7276
7277 qos_data = &priv->assoc_network->qos_data;
7278
7279 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7280 if (unicast == 0)
7281 qos_data->active = 0;
7282 else
7283 qos_data->active = qos_data->supported;
7284 }
7285 active = qos_data->active;
7286 supported = qos_data->supported;
7287 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7288 "unicast %d\n",
7289 priv->qos_data.qos_enable, active, supported, unicast);
7290 if (active && priv->qos_data.qos_enable)
7291 return 1;
7292
7293 return 0;
7294
7295 }
7296 /*
7297 * add QoS parameter to the TX command
7298 */
ipw_qos_set_tx_queue_command(struct ipw_priv * priv,u16 priority,struct tfd_data * tfd)7299 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7300 u16 priority,
7301 struct tfd_data *tfd)
7302 {
7303 int tx_queue_id = 0;
7304
7305
7306 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7307 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7308
7309 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7310 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7311 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7312 }
7313 return 0;
7314 }
7315
7316 /*
7317 * background support to run QoS activate functionality
7318 */
ipw_bg_qos_activate(struct work_struct * work)7319 static void ipw_bg_qos_activate(struct work_struct *work)
7320 {
7321 struct ipw_priv *priv =
7322 container_of(work, struct ipw_priv, qos_activate);
7323
7324 mutex_lock(&priv->mutex);
7325
7326 if (priv->status & STATUS_ASSOCIATED)
7327 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7328
7329 mutex_unlock(&priv->mutex);
7330 }
7331
ipw_handle_probe_response(struct net_device * dev,struct libipw_probe_response * resp,struct libipw_network * network)7332 static int ipw_handle_probe_response(struct net_device *dev,
7333 struct libipw_probe_response *resp,
7334 struct libipw_network *network)
7335 {
7336 struct ipw_priv *priv = libipw_priv(dev);
7337 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7338 (network == priv->assoc_network));
7339
7340 ipw_qos_handle_probe_response(priv, active_network, network);
7341
7342 return 0;
7343 }
7344
ipw_handle_beacon(struct net_device * dev,struct libipw_beacon * resp,struct libipw_network * network)7345 static int ipw_handle_beacon(struct net_device *dev,
7346 struct libipw_beacon *resp,
7347 struct libipw_network *network)
7348 {
7349 struct ipw_priv *priv = libipw_priv(dev);
7350 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7351 (network == priv->assoc_network));
7352
7353 ipw_qos_handle_probe_response(priv, active_network, network);
7354
7355 return 0;
7356 }
7357
ipw_handle_assoc_response(struct net_device * dev,struct libipw_assoc_response * resp,struct libipw_network * network)7358 static int ipw_handle_assoc_response(struct net_device *dev,
7359 struct libipw_assoc_response *resp,
7360 struct libipw_network *network)
7361 {
7362 struct ipw_priv *priv = libipw_priv(dev);
7363 ipw_qos_association_resp(priv, network);
7364 return 0;
7365 }
7366
ipw_send_qos_params_command(struct ipw_priv * priv,struct libipw_qos_parameters * qos_param)7367 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7368 *qos_param)
7369 {
7370 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7371 sizeof(*qos_param) * 3, qos_param);
7372 }
7373
ipw_send_qos_info_command(struct ipw_priv * priv,struct libipw_qos_information_element * qos_param)7374 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7375 *qos_param)
7376 {
7377 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7378 qos_param);
7379 }
7380
7381 #endif /* CONFIG_IPW2200_QOS */
7382
ipw_associate_network(struct ipw_priv * priv,struct libipw_network * network,struct ipw_supported_rates * rates,int roaming)7383 static int ipw_associate_network(struct ipw_priv *priv,
7384 struct libipw_network *network,
7385 struct ipw_supported_rates *rates, int roaming)
7386 {
7387 int err;
7388 DECLARE_SSID_BUF(ssid);
7389
7390 if (priv->config & CFG_FIXED_RATE)
7391 ipw_set_fixed_rate(priv, network->mode);
7392
7393 if (!(priv->config & CFG_STATIC_ESSID)) {
7394 priv->essid_len = min(network->ssid_len,
7395 (u8) IW_ESSID_MAX_SIZE);
7396 memcpy(priv->essid, network->ssid, priv->essid_len);
7397 }
7398
7399 network->last_associate = jiffies;
7400
7401 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7402 priv->assoc_request.channel = network->channel;
7403 priv->assoc_request.auth_key = 0;
7404
7405 if ((priv->capability & CAP_PRIVACY_ON) &&
7406 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7407 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7408 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7409
7410 if (priv->ieee->sec.level == SEC_LEVEL_1)
7411 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7412
7413 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7414 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7415 priv->assoc_request.auth_type = AUTH_LEAP;
7416 else
7417 priv->assoc_request.auth_type = AUTH_OPEN;
7418
7419 if (priv->ieee->wpa_ie_len) {
7420 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7421 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7422 priv->ieee->wpa_ie_len);
7423 }
7424
7425 /*
7426 * It is valid for our ieee device to support multiple modes, but
7427 * when it comes to associating to a given network we have to choose
7428 * just one mode.
7429 */
7430 if (network->mode & priv->ieee->mode & IEEE_A)
7431 priv->assoc_request.ieee_mode = IPW_A_MODE;
7432 else if (network->mode & priv->ieee->mode & IEEE_G)
7433 priv->assoc_request.ieee_mode = IPW_G_MODE;
7434 else if (network->mode & priv->ieee->mode & IEEE_B)
7435 priv->assoc_request.ieee_mode = IPW_B_MODE;
7436
7437 priv->assoc_request.capability = cpu_to_le16(network->capability);
7438 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7439 && !(priv->config & CFG_PREAMBLE_LONG)) {
7440 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7441 } else {
7442 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7443
7444 /* Clear the short preamble if we won't be supporting it */
7445 priv->assoc_request.capability &=
7446 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7447 }
7448
7449 /* Clear capability bits that aren't used in Ad Hoc */
7450 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7451 priv->assoc_request.capability &=
7452 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7453
7454 IPW_DEBUG_ASSOC("%ssociation attempt: '%s', channel %d, "
7455 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7456 roaming ? "Rea" : "A",
7457 print_ssid(ssid, priv->essid, priv->essid_len),
7458 network->channel,
7459 ipw_modes[priv->assoc_request.ieee_mode],
7460 rates->num_rates,
7461 (priv->assoc_request.preamble_length ==
7462 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7463 network->capability &
7464 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7465 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7466 priv->capability & CAP_PRIVACY_ON ?
7467 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7468 "(open)") : "",
7469 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7470 priv->capability & CAP_PRIVACY_ON ?
7471 '1' + priv->ieee->sec.active_key : '.',
7472 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7473
7474 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7475 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7476 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7477 priv->assoc_request.assoc_type = HC_IBSS_START;
7478 priv->assoc_request.assoc_tsf_msw = 0;
7479 priv->assoc_request.assoc_tsf_lsw = 0;
7480 } else {
7481 if (unlikely(roaming))
7482 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7483 else
7484 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7485 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7486 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7487 }
7488
7489 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7490
7491 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7492 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7493 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7494 } else {
7495 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7496 priv->assoc_request.atim_window = 0;
7497 }
7498
7499 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7500
7501 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7502 if (err) {
7503 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7504 return err;
7505 }
7506
7507 rates->ieee_mode = priv->assoc_request.ieee_mode;
7508 rates->purpose = IPW_RATE_CONNECT;
7509 ipw_send_supported_rates(priv, rates);
7510
7511 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7512 priv->sys_config.dot11g_auto_detection = 1;
7513 else
7514 priv->sys_config.dot11g_auto_detection = 0;
7515
7516 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7517 priv->sys_config.answer_broadcast_ssid_probe = 1;
7518 else
7519 priv->sys_config.answer_broadcast_ssid_probe = 0;
7520
7521 err = ipw_send_system_config(priv);
7522 if (err) {
7523 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7524 return err;
7525 }
7526
7527 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7528 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7529 if (err) {
7530 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7531 return err;
7532 }
7533
7534 /*
7535 * If preemption is enabled, it is possible for the association
7536 * to complete before we return from ipw_send_associate. Therefore
7537 * we have to be sure and update our priviate data first.
7538 */
7539 priv->channel = network->channel;
7540 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7541 priv->status |= STATUS_ASSOCIATING;
7542 priv->status &= ~STATUS_SECURITY_UPDATED;
7543
7544 priv->assoc_network = network;
7545
7546 #ifdef CONFIG_IPW2200_QOS
7547 ipw_qos_association(priv, network);
7548 #endif
7549
7550 err = ipw_send_associate(priv, &priv->assoc_request);
7551 if (err) {
7552 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7553 return err;
7554 }
7555
7556 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7557 print_ssid(ssid, priv->essid, priv->essid_len),
7558 priv->bssid);
7559
7560 return 0;
7561 }
7562
ipw_roam(void * data)7563 static void ipw_roam(void *data)
7564 {
7565 struct ipw_priv *priv = data;
7566 struct libipw_network *network = NULL;
7567 struct ipw_network_match match = {
7568 .network = priv->assoc_network
7569 };
7570
7571 /* The roaming process is as follows:
7572 *
7573 * 1. Missed beacon threshold triggers the roaming process by
7574 * setting the status ROAM bit and requesting a scan.
7575 * 2. When the scan completes, it schedules the ROAM work
7576 * 3. The ROAM work looks at all of the known networks for one that
7577 * is a better network than the currently associated. If none
7578 * found, the ROAM process is over (ROAM bit cleared)
7579 * 4. If a better network is found, a disassociation request is
7580 * sent.
7581 * 5. When the disassociation completes, the roam work is again
7582 * scheduled. The second time through, the driver is no longer
7583 * associated, and the newly selected network is sent an
7584 * association request.
7585 * 6. At this point ,the roaming process is complete and the ROAM
7586 * status bit is cleared.
7587 */
7588
7589 /* If we are no longer associated, and the roaming bit is no longer
7590 * set, then we are not actively roaming, so just return */
7591 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7592 return;
7593
7594 if (priv->status & STATUS_ASSOCIATED) {
7595 /* First pass through ROAM process -- look for a better
7596 * network */
7597 unsigned long flags;
7598 u8 rssi = priv->assoc_network->stats.rssi;
7599 priv->assoc_network->stats.rssi = -128;
7600 spin_lock_irqsave(&priv->ieee->lock, flags);
7601 list_for_each_entry(network, &priv->ieee->network_list, list) {
7602 if (network != priv->assoc_network)
7603 ipw_best_network(priv, &match, network, 1);
7604 }
7605 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7606 priv->assoc_network->stats.rssi = rssi;
7607
7608 if (match.network == priv->assoc_network) {
7609 IPW_DEBUG_ASSOC("No better APs in this network to "
7610 "roam to.\n");
7611 priv->status &= ~STATUS_ROAMING;
7612 ipw_debug_config(priv);
7613 return;
7614 }
7615
7616 ipw_send_disassociate(priv, 1);
7617 priv->assoc_network = match.network;
7618
7619 return;
7620 }
7621
7622 /* Second pass through ROAM process -- request association */
7623 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7624 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7625 priv->status &= ~STATUS_ROAMING;
7626 }
7627
ipw_bg_roam(struct work_struct * work)7628 static void ipw_bg_roam(struct work_struct *work)
7629 {
7630 struct ipw_priv *priv =
7631 container_of(work, struct ipw_priv, roam);
7632 mutex_lock(&priv->mutex);
7633 ipw_roam(priv);
7634 mutex_unlock(&priv->mutex);
7635 }
7636
ipw_associate(void * data)7637 static int ipw_associate(void *data)
7638 {
7639 struct ipw_priv *priv = data;
7640
7641 struct libipw_network *network = NULL;
7642 struct ipw_network_match match = {
7643 .network = NULL
7644 };
7645 struct ipw_supported_rates *rates;
7646 struct list_head *element;
7647 unsigned long flags;
7648 DECLARE_SSID_BUF(ssid);
7649
7650 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7651 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7652 return 0;
7653 }
7654
7655 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7656 IPW_DEBUG_ASSOC("Not attempting association (already in "
7657 "progress)\n");
7658 return 0;
7659 }
7660
7661 if (priv->status & STATUS_DISASSOCIATING) {
7662 IPW_DEBUG_ASSOC("Not attempting association (in "
7663 "disassociating)\n ");
7664 schedule_work(&priv->associate);
7665 return 0;
7666 }
7667
7668 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7669 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7670 "initialized)\n");
7671 return 0;
7672 }
7673
7674 if (!(priv->config & CFG_ASSOCIATE) &&
7675 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7676 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7677 return 0;
7678 }
7679
7680 /* Protect our use of the network_list */
7681 spin_lock_irqsave(&priv->ieee->lock, flags);
7682 list_for_each_entry(network, &priv->ieee->network_list, list)
7683 ipw_best_network(priv, &match, network, 0);
7684
7685 network = match.network;
7686 rates = &match.rates;
7687
7688 if (network == NULL &&
7689 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7690 priv->config & CFG_ADHOC_CREATE &&
7691 priv->config & CFG_STATIC_ESSID &&
7692 priv->config & CFG_STATIC_CHANNEL) {
7693 /* Use oldest network if the free list is empty */
7694 if (list_empty(&priv->ieee->network_free_list)) {
7695 struct libipw_network *oldest = NULL;
7696 struct libipw_network *target;
7697
7698 list_for_each_entry(target, &priv->ieee->network_list, list) {
7699 if ((oldest == NULL) ||
7700 (target->last_scanned < oldest->last_scanned))
7701 oldest = target;
7702 }
7703
7704 /* If there are no more slots, expire the oldest */
7705 list_del(&oldest->list);
7706 target = oldest;
7707 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7708 "network list.\n",
7709 print_ssid(ssid, target->ssid,
7710 target->ssid_len),
7711 target->bssid);
7712 list_add_tail(&target->list,
7713 &priv->ieee->network_free_list);
7714 }
7715
7716 element = priv->ieee->network_free_list.next;
7717 network = list_entry(element, struct libipw_network, list);
7718 ipw_adhoc_create(priv, network);
7719 rates = &priv->rates;
7720 list_del(element);
7721 list_add_tail(&network->list, &priv->ieee->network_list);
7722 }
7723 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7724
7725 /* If we reached the end of the list, then we don't have any valid
7726 * matching APs */
7727 if (!network) {
7728 ipw_debug_config(priv);
7729
7730 if (!(priv->status & STATUS_SCANNING)) {
7731 if (!(priv->config & CFG_SPEED_SCAN))
7732 schedule_delayed_work(&priv->request_scan,
7733 SCAN_INTERVAL);
7734 else
7735 schedule_delayed_work(&priv->request_scan, 0);
7736 }
7737
7738 return 0;
7739 }
7740
7741 ipw_associate_network(priv, network, rates, 0);
7742
7743 return 1;
7744 }
7745
ipw_bg_associate(struct work_struct * work)7746 static void ipw_bg_associate(struct work_struct *work)
7747 {
7748 struct ipw_priv *priv =
7749 container_of(work, struct ipw_priv, associate);
7750 mutex_lock(&priv->mutex);
7751 ipw_associate(priv);
7752 mutex_unlock(&priv->mutex);
7753 }
7754
ipw_rebuild_decrypted_skb(struct ipw_priv * priv,struct sk_buff * skb)7755 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7756 struct sk_buff *skb)
7757 {
7758 struct ieee80211_hdr *hdr;
7759 u16 fc;
7760
7761 hdr = (struct ieee80211_hdr *)skb->data;
7762 fc = le16_to_cpu(hdr->frame_control);
7763 if (!(fc & IEEE80211_FCTL_PROTECTED))
7764 return;
7765
7766 fc &= ~IEEE80211_FCTL_PROTECTED;
7767 hdr->frame_control = cpu_to_le16(fc);
7768 switch (priv->ieee->sec.level) {
7769 case SEC_LEVEL_3:
7770 /* Remove CCMP HDR */
7771 memmove(skb->data + LIBIPW_3ADDR_LEN,
7772 skb->data + LIBIPW_3ADDR_LEN + 8,
7773 skb->len - LIBIPW_3ADDR_LEN - 8);
7774 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7775 break;
7776 case SEC_LEVEL_2:
7777 break;
7778 case SEC_LEVEL_1:
7779 /* Remove IV */
7780 memmove(skb->data + LIBIPW_3ADDR_LEN,
7781 skb->data + LIBIPW_3ADDR_LEN + 4,
7782 skb->len - LIBIPW_3ADDR_LEN - 4);
7783 skb_trim(skb, skb->len - 8); /* IV + ICV */
7784 break;
7785 case SEC_LEVEL_0:
7786 break;
7787 default:
7788 printk(KERN_ERR "Unknown security level %d\n",
7789 priv->ieee->sec.level);
7790 break;
7791 }
7792 }
7793
ipw_handle_data_packet(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)7794 static void ipw_handle_data_packet(struct ipw_priv *priv,
7795 struct ipw_rx_mem_buffer *rxb,
7796 struct libipw_rx_stats *stats)
7797 {
7798 struct net_device *dev = priv->net_dev;
7799 struct libipw_hdr_4addr *hdr;
7800 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7801
7802 /* We received data from the HW, so stop the watchdog */
7803 dev->trans_start = jiffies;
7804
7805 /* We only process data packets if the
7806 * interface is open */
7807 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7808 skb_tailroom(rxb->skb))) {
7809 dev->stats.rx_errors++;
7810 priv->wstats.discard.misc++;
7811 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7812 return;
7813 } else if (unlikely(!netif_running(priv->net_dev))) {
7814 dev->stats.rx_dropped++;
7815 priv->wstats.discard.misc++;
7816 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7817 return;
7818 }
7819
7820 /* Advance skb->data to the start of the actual payload */
7821 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7822
7823 /* Set the size of the skb to the size of the frame */
7824 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7825
7826 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7827
7828 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7829 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7830 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7831 (is_multicast_ether_addr(hdr->addr1) ?
7832 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7833 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7834
7835 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7836 dev->stats.rx_errors++;
7837 else { /* libipw_rx succeeded, so it now owns the SKB */
7838 rxb->skb = NULL;
7839 __ipw_led_activity_on(priv);
7840 }
7841 }
7842
7843 #ifdef CONFIG_IPW2200_RADIOTAP
ipw_handle_data_packet_monitor(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)7844 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7845 struct ipw_rx_mem_buffer *rxb,
7846 struct libipw_rx_stats *stats)
7847 {
7848 struct net_device *dev = priv->net_dev;
7849 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7850 struct ipw_rx_frame *frame = &pkt->u.frame;
7851
7852 /* initial pull of some data */
7853 u16 received_channel = frame->received_channel;
7854 u8 antennaAndPhy = frame->antennaAndPhy;
7855 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7856 u16 pktrate = frame->rate;
7857
7858 /* Magic struct that slots into the radiotap header -- no reason
7859 * to build this manually element by element, we can write it much
7860 * more efficiently than we can parse it. ORDER MATTERS HERE */
7861 struct ipw_rt_hdr *ipw_rt;
7862
7863 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7864
7865 /* We received data from the HW, so stop the watchdog */
7866 dev->trans_start = jiffies;
7867
7868 /* We only process data packets if the
7869 * interface is open */
7870 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7871 skb_tailroom(rxb->skb))) {
7872 dev->stats.rx_errors++;
7873 priv->wstats.discard.misc++;
7874 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7875 return;
7876 } else if (unlikely(!netif_running(priv->net_dev))) {
7877 dev->stats.rx_dropped++;
7878 priv->wstats.discard.misc++;
7879 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7880 return;
7881 }
7882
7883 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7884 * that now */
7885 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7886 /* FIXME: Should alloc bigger skb instead */
7887 dev->stats.rx_dropped++;
7888 priv->wstats.discard.misc++;
7889 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7890 return;
7891 }
7892
7893 /* copy the frame itself */
7894 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7895 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7896
7897 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7898
7899 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7900 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7901 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7902
7903 /* Big bitfield of all the fields we provide in radiotap */
7904 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7905 (1 << IEEE80211_RADIOTAP_TSFT) |
7906 (1 << IEEE80211_RADIOTAP_FLAGS) |
7907 (1 << IEEE80211_RADIOTAP_RATE) |
7908 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7909 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7910 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7911 (1 << IEEE80211_RADIOTAP_ANTENNA));
7912
7913 /* Zero the flags, we'll add to them as we go */
7914 ipw_rt->rt_flags = 0;
7915 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7916 frame->parent_tsf[2] << 16 |
7917 frame->parent_tsf[1] << 8 |
7918 frame->parent_tsf[0]);
7919
7920 /* Convert signal to DBM */
7921 ipw_rt->rt_dbmsignal = antsignal;
7922 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7923
7924 /* Convert the channel data and set the flags */
7925 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7926 if (received_channel > 14) { /* 802.11a */
7927 ipw_rt->rt_chbitmask =
7928 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7929 } else if (antennaAndPhy & 32) { /* 802.11b */
7930 ipw_rt->rt_chbitmask =
7931 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7932 } else { /* 802.11g */
7933 ipw_rt->rt_chbitmask =
7934 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7935 }
7936
7937 /* set the rate in multiples of 500k/s */
7938 switch (pktrate) {
7939 case IPW_TX_RATE_1MB:
7940 ipw_rt->rt_rate = 2;
7941 break;
7942 case IPW_TX_RATE_2MB:
7943 ipw_rt->rt_rate = 4;
7944 break;
7945 case IPW_TX_RATE_5MB:
7946 ipw_rt->rt_rate = 10;
7947 break;
7948 case IPW_TX_RATE_6MB:
7949 ipw_rt->rt_rate = 12;
7950 break;
7951 case IPW_TX_RATE_9MB:
7952 ipw_rt->rt_rate = 18;
7953 break;
7954 case IPW_TX_RATE_11MB:
7955 ipw_rt->rt_rate = 22;
7956 break;
7957 case IPW_TX_RATE_12MB:
7958 ipw_rt->rt_rate = 24;
7959 break;
7960 case IPW_TX_RATE_18MB:
7961 ipw_rt->rt_rate = 36;
7962 break;
7963 case IPW_TX_RATE_24MB:
7964 ipw_rt->rt_rate = 48;
7965 break;
7966 case IPW_TX_RATE_36MB:
7967 ipw_rt->rt_rate = 72;
7968 break;
7969 case IPW_TX_RATE_48MB:
7970 ipw_rt->rt_rate = 96;
7971 break;
7972 case IPW_TX_RATE_54MB:
7973 ipw_rt->rt_rate = 108;
7974 break;
7975 default:
7976 ipw_rt->rt_rate = 0;
7977 break;
7978 }
7979
7980 /* antenna number */
7981 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7982
7983 /* set the preamble flag if we have it */
7984 if ((antennaAndPhy & 64))
7985 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7986
7987 /* Set the size of the skb to the size of the frame */
7988 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7989
7990 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7991
7992 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7993 dev->stats.rx_errors++;
7994 else { /* libipw_rx succeeded, so it now owns the SKB */
7995 rxb->skb = NULL;
7996 /* no LED during capture */
7997 }
7998 }
7999 #endif
8000
8001 #ifdef CONFIG_IPW2200_PROMISCUOUS
8002 #define libipw_is_probe_response(fc) \
8003 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
8004 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
8005
8006 #define libipw_is_management(fc) \
8007 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
8008
8009 #define libipw_is_control(fc) \
8010 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
8011
8012 #define libipw_is_data(fc) \
8013 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8014
8015 #define libipw_is_assoc_request(fc) \
8016 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8017
8018 #define libipw_is_reassoc_request(fc) \
8019 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8020
ipw_handle_promiscuous_rx(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)8021 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8022 struct ipw_rx_mem_buffer *rxb,
8023 struct libipw_rx_stats *stats)
8024 {
8025 struct net_device *dev = priv->prom_net_dev;
8026 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8027 struct ipw_rx_frame *frame = &pkt->u.frame;
8028 struct ipw_rt_hdr *ipw_rt;
8029
8030 /* First cache any information we need before we overwrite
8031 * the information provided in the skb from the hardware */
8032 struct ieee80211_hdr *hdr;
8033 u16 channel = frame->received_channel;
8034 u8 phy_flags = frame->antennaAndPhy;
8035 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8036 s8 noise = (s8) le16_to_cpu(frame->noise);
8037 u8 rate = frame->rate;
8038 unsigned short len = le16_to_cpu(pkt->u.frame.length);
8039 struct sk_buff *skb;
8040 int hdr_only = 0;
8041 u16 filter = priv->prom_priv->filter;
8042
8043 /* If the filter is set to not include Rx frames then return */
8044 if (filter & IPW_PROM_NO_RX)
8045 return;
8046
8047 /* We received data from the HW, so stop the watchdog */
8048 dev->trans_start = jiffies;
8049
8050 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8051 dev->stats.rx_errors++;
8052 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8053 return;
8054 }
8055
8056 /* We only process data packets if the interface is open */
8057 if (unlikely(!netif_running(dev))) {
8058 dev->stats.rx_dropped++;
8059 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8060 return;
8061 }
8062
8063 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8064 * that now */
8065 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8066 /* FIXME: Should alloc bigger skb instead */
8067 dev->stats.rx_dropped++;
8068 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8069 return;
8070 }
8071
8072 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8073 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8074 if (filter & IPW_PROM_NO_MGMT)
8075 return;
8076 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8077 hdr_only = 1;
8078 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8079 if (filter & IPW_PROM_NO_CTL)
8080 return;
8081 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8082 hdr_only = 1;
8083 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8084 if (filter & IPW_PROM_NO_DATA)
8085 return;
8086 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8087 hdr_only = 1;
8088 }
8089
8090 /* Copy the SKB since this is for the promiscuous side */
8091 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8092 if (skb == NULL) {
8093 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8094 return;
8095 }
8096
8097 /* copy the frame data to write after where the radiotap header goes */
8098 ipw_rt = (void *)skb->data;
8099
8100 if (hdr_only)
8101 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8102
8103 memcpy(ipw_rt->payload, hdr, len);
8104
8105 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8106 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8107 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8108
8109 /* Set the size of the skb to the size of the frame */
8110 skb_put(skb, sizeof(*ipw_rt) + len);
8111
8112 /* Big bitfield of all the fields we provide in radiotap */
8113 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8114 (1 << IEEE80211_RADIOTAP_TSFT) |
8115 (1 << IEEE80211_RADIOTAP_FLAGS) |
8116 (1 << IEEE80211_RADIOTAP_RATE) |
8117 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8118 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8119 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8120 (1 << IEEE80211_RADIOTAP_ANTENNA));
8121
8122 /* Zero the flags, we'll add to them as we go */
8123 ipw_rt->rt_flags = 0;
8124 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8125 frame->parent_tsf[2] << 16 |
8126 frame->parent_tsf[1] << 8 |
8127 frame->parent_tsf[0]);
8128
8129 /* Convert to DBM */
8130 ipw_rt->rt_dbmsignal = signal;
8131 ipw_rt->rt_dbmnoise = noise;
8132
8133 /* Convert the channel data and set the flags */
8134 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8135 if (channel > 14) { /* 802.11a */
8136 ipw_rt->rt_chbitmask =
8137 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8138 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8139 ipw_rt->rt_chbitmask =
8140 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8141 } else { /* 802.11g */
8142 ipw_rt->rt_chbitmask =
8143 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8144 }
8145
8146 /* set the rate in multiples of 500k/s */
8147 switch (rate) {
8148 case IPW_TX_RATE_1MB:
8149 ipw_rt->rt_rate = 2;
8150 break;
8151 case IPW_TX_RATE_2MB:
8152 ipw_rt->rt_rate = 4;
8153 break;
8154 case IPW_TX_RATE_5MB:
8155 ipw_rt->rt_rate = 10;
8156 break;
8157 case IPW_TX_RATE_6MB:
8158 ipw_rt->rt_rate = 12;
8159 break;
8160 case IPW_TX_RATE_9MB:
8161 ipw_rt->rt_rate = 18;
8162 break;
8163 case IPW_TX_RATE_11MB:
8164 ipw_rt->rt_rate = 22;
8165 break;
8166 case IPW_TX_RATE_12MB:
8167 ipw_rt->rt_rate = 24;
8168 break;
8169 case IPW_TX_RATE_18MB:
8170 ipw_rt->rt_rate = 36;
8171 break;
8172 case IPW_TX_RATE_24MB:
8173 ipw_rt->rt_rate = 48;
8174 break;
8175 case IPW_TX_RATE_36MB:
8176 ipw_rt->rt_rate = 72;
8177 break;
8178 case IPW_TX_RATE_48MB:
8179 ipw_rt->rt_rate = 96;
8180 break;
8181 case IPW_TX_RATE_54MB:
8182 ipw_rt->rt_rate = 108;
8183 break;
8184 default:
8185 ipw_rt->rt_rate = 0;
8186 break;
8187 }
8188
8189 /* antenna number */
8190 ipw_rt->rt_antenna = (phy_flags & 3);
8191
8192 /* set the preamble flag if we have it */
8193 if (phy_flags & (1 << 6))
8194 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8195
8196 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8197
8198 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8199 dev->stats.rx_errors++;
8200 dev_kfree_skb_any(skb);
8201 }
8202 }
8203 #endif
8204
is_network_packet(struct ipw_priv * priv,struct libipw_hdr_4addr * header)8205 static int is_network_packet(struct ipw_priv *priv,
8206 struct libipw_hdr_4addr *header)
8207 {
8208 /* Filter incoming packets to determine if they are targeted toward
8209 * this network, discarding packets coming from ourselves */
8210 switch (priv->ieee->iw_mode) {
8211 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8212 /* packets from our adapter are dropped (echo) */
8213 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8214 return 0;
8215
8216 /* {broad,multi}cast packets to our BSSID go through */
8217 if (is_multicast_ether_addr(header->addr1))
8218 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8219
8220 /* packets to our adapter go through */
8221 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8222 ETH_ALEN);
8223
8224 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8225 /* packets from our adapter are dropped (echo) */
8226 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8227 return 0;
8228
8229 /* {broad,multi}cast packets to our BSS go through */
8230 if (is_multicast_ether_addr(header->addr1))
8231 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8232
8233 /* packets to our adapter go through */
8234 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8235 ETH_ALEN);
8236 }
8237
8238 return 1;
8239 }
8240
8241 #define IPW_PACKET_RETRY_TIME HZ
8242
is_duplicate_packet(struct ipw_priv * priv,struct libipw_hdr_4addr * header)8243 static int is_duplicate_packet(struct ipw_priv *priv,
8244 struct libipw_hdr_4addr *header)
8245 {
8246 u16 sc = le16_to_cpu(header->seq_ctl);
8247 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8248 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8249 u16 *last_seq, *last_frag;
8250 unsigned long *last_time;
8251
8252 switch (priv->ieee->iw_mode) {
8253 case IW_MODE_ADHOC:
8254 {
8255 struct list_head *p;
8256 struct ipw_ibss_seq *entry = NULL;
8257 u8 *mac = header->addr2;
8258 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8259
8260 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8261 entry =
8262 list_entry(p, struct ipw_ibss_seq, list);
8263 if (!memcmp(entry->mac, mac, ETH_ALEN))
8264 break;
8265 }
8266 if (p == &priv->ibss_mac_hash[index]) {
8267 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8268 if (!entry) {
8269 IPW_ERROR
8270 ("Cannot malloc new mac entry\n");
8271 return 0;
8272 }
8273 memcpy(entry->mac, mac, ETH_ALEN);
8274 entry->seq_num = seq;
8275 entry->frag_num = frag;
8276 entry->packet_time = jiffies;
8277 list_add(&entry->list,
8278 &priv->ibss_mac_hash[index]);
8279 return 0;
8280 }
8281 last_seq = &entry->seq_num;
8282 last_frag = &entry->frag_num;
8283 last_time = &entry->packet_time;
8284 break;
8285 }
8286 case IW_MODE_INFRA:
8287 last_seq = &priv->last_seq_num;
8288 last_frag = &priv->last_frag_num;
8289 last_time = &priv->last_packet_time;
8290 break;
8291 default:
8292 return 0;
8293 }
8294 if ((*last_seq == seq) &&
8295 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8296 if (*last_frag == frag)
8297 goto drop;
8298 if (*last_frag + 1 != frag)
8299 /* out-of-order fragment */
8300 goto drop;
8301 } else
8302 *last_seq = seq;
8303
8304 *last_frag = frag;
8305 *last_time = jiffies;
8306 return 0;
8307
8308 drop:
8309 /* Comment this line now since we observed the card receives
8310 * duplicate packets but the FCTL_RETRY bit is not set in the
8311 * IBSS mode with fragmentation enabled.
8312 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8313 return 1;
8314 }
8315
ipw_handle_mgmt_packet(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)8316 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8317 struct ipw_rx_mem_buffer *rxb,
8318 struct libipw_rx_stats *stats)
8319 {
8320 struct sk_buff *skb = rxb->skb;
8321 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8322 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8323 (skb->data + IPW_RX_FRAME_SIZE);
8324
8325 libipw_rx_mgt(priv->ieee, header, stats);
8326
8327 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8328 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8329 IEEE80211_STYPE_PROBE_RESP) ||
8330 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8331 IEEE80211_STYPE_BEACON))) {
8332 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8333 ipw_add_station(priv, header->addr2);
8334 }
8335
8336 if (priv->config & CFG_NET_STATS) {
8337 IPW_DEBUG_HC("sending stat packet\n");
8338
8339 /* Set the size of the skb to the size of the full
8340 * ipw header and 802.11 frame */
8341 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8342 IPW_RX_FRAME_SIZE);
8343
8344 /* Advance past the ipw packet header to the 802.11 frame */
8345 skb_pull(skb, IPW_RX_FRAME_SIZE);
8346
8347 /* Push the libipw_rx_stats before the 802.11 frame */
8348 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8349
8350 skb->dev = priv->ieee->dev;
8351
8352 /* Point raw at the libipw_stats */
8353 skb_reset_mac_header(skb);
8354
8355 skb->pkt_type = PACKET_OTHERHOST;
8356 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8357 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8358 netif_rx(skb);
8359 rxb->skb = NULL;
8360 }
8361 }
8362
8363 /*
8364 * Main entry function for receiving a packet with 80211 headers. This
8365 * should be called when ever the FW has notified us that there is a new
8366 * skb in the receive queue.
8367 */
ipw_rx(struct ipw_priv * priv)8368 static void ipw_rx(struct ipw_priv *priv)
8369 {
8370 struct ipw_rx_mem_buffer *rxb;
8371 struct ipw_rx_packet *pkt;
8372 struct libipw_hdr_4addr *header;
8373 u32 r, w, i;
8374 u8 network_packet;
8375 u8 fill_rx = 0;
8376
8377 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8378 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8379 i = priv->rxq->read;
8380
8381 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8382 fill_rx = 1;
8383
8384 while (i != r) {
8385 rxb = priv->rxq->queue[i];
8386 if (unlikely(rxb == NULL)) {
8387 printk(KERN_CRIT "Queue not allocated!\n");
8388 break;
8389 }
8390 priv->rxq->queue[i] = NULL;
8391
8392 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8393 IPW_RX_BUF_SIZE,
8394 PCI_DMA_FROMDEVICE);
8395
8396 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8397 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8398 pkt->header.message_type,
8399 pkt->header.rx_seq_num, pkt->header.control_bits);
8400
8401 switch (pkt->header.message_type) {
8402 case RX_FRAME_TYPE: /* 802.11 frame */ {
8403 struct libipw_rx_stats stats = {
8404 .rssi = pkt->u.frame.rssi_dbm -
8405 IPW_RSSI_TO_DBM,
8406 .signal =
8407 pkt->u.frame.rssi_dbm -
8408 IPW_RSSI_TO_DBM + 0x100,
8409 .noise =
8410 le16_to_cpu(pkt->u.frame.noise),
8411 .rate = pkt->u.frame.rate,
8412 .mac_time = jiffies,
8413 .received_channel =
8414 pkt->u.frame.received_channel,
8415 .freq =
8416 (pkt->u.frame.
8417 control & (1 << 0)) ?
8418 LIBIPW_24GHZ_BAND :
8419 LIBIPW_52GHZ_BAND,
8420 .len = le16_to_cpu(pkt->u.frame.length),
8421 };
8422
8423 if (stats.rssi != 0)
8424 stats.mask |= LIBIPW_STATMASK_RSSI;
8425 if (stats.signal != 0)
8426 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8427 if (stats.noise != 0)
8428 stats.mask |= LIBIPW_STATMASK_NOISE;
8429 if (stats.rate != 0)
8430 stats.mask |= LIBIPW_STATMASK_RATE;
8431
8432 priv->rx_packets++;
8433
8434 #ifdef CONFIG_IPW2200_PROMISCUOUS
8435 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8436 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8437 #endif
8438
8439 #ifdef CONFIG_IPW2200_MONITOR
8440 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8441 #ifdef CONFIG_IPW2200_RADIOTAP
8442
8443 ipw_handle_data_packet_monitor(priv,
8444 rxb,
8445 &stats);
8446 #else
8447 ipw_handle_data_packet(priv, rxb,
8448 &stats);
8449 #endif
8450 break;
8451 }
8452 #endif
8453
8454 header =
8455 (struct libipw_hdr_4addr *)(rxb->skb->
8456 data +
8457 IPW_RX_FRAME_SIZE);
8458 /* TODO: Check Ad-Hoc dest/source and make sure
8459 * that we are actually parsing these packets
8460 * correctly -- we should probably use the
8461 * frame control of the packet and disregard
8462 * the current iw_mode */
8463
8464 network_packet =
8465 is_network_packet(priv, header);
8466 if (network_packet && priv->assoc_network) {
8467 priv->assoc_network->stats.rssi =
8468 stats.rssi;
8469 priv->exp_avg_rssi =
8470 exponential_average(priv->exp_avg_rssi,
8471 stats.rssi, DEPTH_RSSI);
8472 }
8473
8474 IPW_DEBUG_RX("Frame: len=%u\n",
8475 le16_to_cpu(pkt->u.frame.length));
8476
8477 if (le16_to_cpu(pkt->u.frame.length) <
8478 libipw_get_hdrlen(le16_to_cpu(
8479 header->frame_ctl))) {
8480 IPW_DEBUG_DROP
8481 ("Received packet is too small. "
8482 "Dropping.\n");
8483 priv->net_dev->stats.rx_errors++;
8484 priv->wstats.discard.misc++;
8485 break;
8486 }
8487
8488 switch (WLAN_FC_GET_TYPE
8489 (le16_to_cpu(header->frame_ctl))) {
8490
8491 case IEEE80211_FTYPE_MGMT:
8492 ipw_handle_mgmt_packet(priv, rxb,
8493 &stats);
8494 break;
8495
8496 case IEEE80211_FTYPE_CTL:
8497 break;
8498
8499 case IEEE80211_FTYPE_DATA:
8500 if (unlikely(!network_packet ||
8501 is_duplicate_packet(priv,
8502 header)))
8503 {
8504 IPW_DEBUG_DROP("Dropping: "
8505 "%pM, "
8506 "%pM, "
8507 "%pM\n",
8508 header->addr1,
8509 header->addr2,
8510 header->addr3);
8511 break;
8512 }
8513
8514 ipw_handle_data_packet(priv, rxb,
8515 &stats);
8516
8517 break;
8518 }
8519 break;
8520 }
8521
8522 case RX_HOST_NOTIFICATION_TYPE:{
8523 IPW_DEBUG_RX
8524 ("Notification: subtype=%02X flags=%02X size=%d\n",
8525 pkt->u.notification.subtype,
8526 pkt->u.notification.flags,
8527 le16_to_cpu(pkt->u.notification.size));
8528 ipw_rx_notification(priv, &pkt->u.notification);
8529 break;
8530 }
8531
8532 default:
8533 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8534 pkt->header.message_type);
8535 break;
8536 }
8537
8538 /* For now we just don't re-use anything. We can tweak this
8539 * later to try and re-use notification packets and SKBs that
8540 * fail to Rx correctly */
8541 if (rxb->skb != NULL) {
8542 dev_kfree_skb_any(rxb->skb);
8543 rxb->skb = NULL;
8544 }
8545
8546 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8547 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8548 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8549
8550 i = (i + 1) % RX_QUEUE_SIZE;
8551
8552 /* If there are a lot of unsued frames, restock the Rx queue
8553 * so the ucode won't assert */
8554 if (fill_rx) {
8555 priv->rxq->read = i;
8556 ipw_rx_queue_replenish(priv);
8557 }
8558 }
8559
8560 /* Backtrack one entry */
8561 priv->rxq->read = i;
8562 ipw_rx_queue_restock(priv);
8563 }
8564
8565 #define DEFAULT_RTS_THRESHOLD 2304U
8566 #define MIN_RTS_THRESHOLD 1U
8567 #define MAX_RTS_THRESHOLD 2304U
8568 #define DEFAULT_BEACON_INTERVAL 100U
8569 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8570 #define DEFAULT_LONG_RETRY_LIMIT 4U
8571
8572 /**
8573 * ipw_sw_reset
8574 * @option: options to control different reset behaviour
8575 * 0 = reset everything except the 'disable' module_param
8576 * 1 = reset everything and print out driver info (for probe only)
8577 * 2 = reset everything
8578 */
ipw_sw_reset(struct ipw_priv * priv,int option)8579 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8580 {
8581 int band, modulation;
8582 int old_mode = priv->ieee->iw_mode;
8583
8584 /* Initialize module parameter values here */
8585 priv->config = 0;
8586
8587 /* We default to disabling the LED code as right now it causes
8588 * too many systems to lock up... */
8589 if (!led_support)
8590 priv->config |= CFG_NO_LED;
8591
8592 if (associate)
8593 priv->config |= CFG_ASSOCIATE;
8594 else
8595 IPW_DEBUG_INFO("Auto associate disabled.\n");
8596
8597 if (auto_create)
8598 priv->config |= CFG_ADHOC_CREATE;
8599 else
8600 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8601
8602 priv->config &= ~CFG_STATIC_ESSID;
8603 priv->essid_len = 0;
8604 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8605
8606 if (disable && option) {
8607 priv->status |= STATUS_RF_KILL_SW;
8608 IPW_DEBUG_INFO("Radio disabled.\n");
8609 }
8610
8611 if (default_channel != 0) {
8612 priv->config |= CFG_STATIC_CHANNEL;
8613 priv->channel = default_channel;
8614 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8615 /* TODO: Validate that provided channel is in range */
8616 }
8617 #ifdef CONFIG_IPW2200_QOS
8618 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8619 burst_duration_CCK, burst_duration_OFDM);
8620 #endif /* CONFIG_IPW2200_QOS */
8621
8622 switch (network_mode) {
8623 case 1:
8624 priv->ieee->iw_mode = IW_MODE_ADHOC;
8625 priv->net_dev->type = ARPHRD_ETHER;
8626
8627 break;
8628 #ifdef CONFIG_IPW2200_MONITOR
8629 case 2:
8630 priv->ieee->iw_mode = IW_MODE_MONITOR;
8631 #ifdef CONFIG_IPW2200_RADIOTAP
8632 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8633 #else
8634 priv->net_dev->type = ARPHRD_IEEE80211;
8635 #endif
8636 break;
8637 #endif
8638 default:
8639 case 0:
8640 priv->net_dev->type = ARPHRD_ETHER;
8641 priv->ieee->iw_mode = IW_MODE_INFRA;
8642 break;
8643 }
8644
8645 if (hwcrypto) {
8646 priv->ieee->host_encrypt = 0;
8647 priv->ieee->host_encrypt_msdu = 0;
8648 priv->ieee->host_decrypt = 0;
8649 priv->ieee->host_mc_decrypt = 0;
8650 }
8651 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8652
8653 /* IPW2200/2915 is abled to do hardware fragmentation. */
8654 priv->ieee->host_open_frag = 0;
8655
8656 if ((priv->pci_dev->device == 0x4223) ||
8657 (priv->pci_dev->device == 0x4224)) {
8658 if (option == 1)
8659 printk(KERN_INFO DRV_NAME
8660 ": Detected Intel PRO/Wireless 2915ABG Network "
8661 "Connection\n");
8662 priv->ieee->abg_true = 1;
8663 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8664 modulation = LIBIPW_OFDM_MODULATION |
8665 LIBIPW_CCK_MODULATION;
8666 priv->adapter = IPW_2915ABG;
8667 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8668 } else {
8669 if (option == 1)
8670 printk(KERN_INFO DRV_NAME
8671 ": Detected Intel PRO/Wireless 2200BG Network "
8672 "Connection\n");
8673
8674 priv->ieee->abg_true = 0;
8675 band = LIBIPW_24GHZ_BAND;
8676 modulation = LIBIPW_OFDM_MODULATION |
8677 LIBIPW_CCK_MODULATION;
8678 priv->adapter = IPW_2200BG;
8679 priv->ieee->mode = IEEE_G | IEEE_B;
8680 }
8681
8682 priv->ieee->freq_band = band;
8683 priv->ieee->modulation = modulation;
8684
8685 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8686
8687 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8688 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8689
8690 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8691 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8692 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8693
8694 /* If power management is turned on, default to AC mode */
8695 priv->power_mode = IPW_POWER_AC;
8696 priv->tx_power = IPW_TX_POWER_DEFAULT;
8697
8698 return old_mode == priv->ieee->iw_mode;
8699 }
8700
8701 /*
8702 * This file defines the Wireless Extension handlers. It does not
8703 * define any methods of hardware manipulation and relies on the
8704 * functions defined in ipw_main to provide the HW interaction.
8705 *
8706 * The exception to this is the use of the ipw_get_ordinal()
8707 * function used to poll the hardware vs. making unnecessary calls.
8708 *
8709 */
8710
ipw_set_channel(struct ipw_priv * priv,u8 channel)8711 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8712 {
8713 if (channel == 0) {
8714 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8715 priv->config &= ~CFG_STATIC_CHANNEL;
8716 IPW_DEBUG_ASSOC("Attempting to associate with new "
8717 "parameters.\n");
8718 ipw_associate(priv);
8719 return 0;
8720 }
8721
8722 priv->config |= CFG_STATIC_CHANNEL;
8723
8724 if (priv->channel == channel) {
8725 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8726 channel);
8727 return 0;
8728 }
8729
8730 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8731 priv->channel = channel;
8732
8733 #ifdef CONFIG_IPW2200_MONITOR
8734 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8735 int i;
8736 if (priv->status & STATUS_SCANNING) {
8737 IPW_DEBUG_SCAN("Scan abort triggered due to "
8738 "channel change.\n");
8739 ipw_abort_scan(priv);
8740 }
8741
8742 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8743 udelay(10);
8744
8745 if (priv->status & STATUS_SCANNING)
8746 IPW_DEBUG_SCAN("Still scanning...\n");
8747 else
8748 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8749 1000 - i);
8750
8751 return 0;
8752 }
8753 #endif /* CONFIG_IPW2200_MONITOR */
8754
8755 /* Network configuration changed -- force [re]association */
8756 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8757 if (!ipw_disassociate(priv))
8758 ipw_associate(priv);
8759
8760 return 0;
8761 }
8762
ipw_wx_set_freq(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8763 static int ipw_wx_set_freq(struct net_device *dev,
8764 struct iw_request_info *info,
8765 union iwreq_data *wrqu, char *extra)
8766 {
8767 struct ipw_priv *priv = libipw_priv(dev);
8768 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8769 struct iw_freq *fwrq = &wrqu->freq;
8770 int ret = 0, i;
8771 u8 channel, flags;
8772 int band;
8773
8774 if (fwrq->m == 0) {
8775 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8776 mutex_lock(&priv->mutex);
8777 ret = ipw_set_channel(priv, 0);
8778 mutex_unlock(&priv->mutex);
8779 return ret;
8780 }
8781 /* if setting by freq convert to channel */
8782 if (fwrq->e == 1) {
8783 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8784 if (channel == 0)
8785 return -EINVAL;
8786 } else
8787 channel = fwrq->m;
8788
8789 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8790 return -EINVAL;
8791
8792 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8793 i = libipw_channel_to_index(priv->ieee, channel);
8794 if (i == -1)
8795 return -EINVAL;
8796
8797 flags = (band == LIBIPW_24GHZ_BAND) ?
8798 geo->bg[i].flags : geo->a[i].flags;
8799 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8800 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8801 return -EINVAL;
8802 }
8803 }
8804
8805 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8806 mutex_lock(&priv->mutex);
8807 ret = ipw_set_channel(priv, channel);
8808 mutex_unlock(&priv->mutex);
8809 return ret;
8810 }
8811
ipw_wx_get_freq(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8812 static int ipw_wx_get_freq(struct net_device *dev,
8813 struct iw_request_info *info,
8814 union iwreq_data *wrqu, char *extra)
8815 {
8816 struct ipw_priv *priv = libipw_priv(dev);
8817
8818 wrqu->freq.e = 0;
8819
8820 /* If we are associated, trying to associate, or have a statically
8821 * configured CHANNEL then return that; otherwise return ANY */
8822 mutex_lock(&priv->mutex);
8823 if (priv->config & CFG_STATIC_CHANNEL ||
8824 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8825 int i;
8826
8827 i = libipw_channel_to_index(priv->ieee, priv->channel);
8828 BUG_ON(i == -1);
8829 wrqu->freq.e = 1;
8830
8831 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8832 case LIBIPW_52GHZ_BAND:
8833 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8834 break;
8835
8836 case LIBIPW_24GHZ_BAND:
8837 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8838 break;
8839
8840 default:
8841 BUG();
8842 }
8843 } else
8844 wrqu->freq.m = 0;
8845
8846 mutex_unlock(&priv->mutex);
8847 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8848 return 0;
8849 }
8850
ipw_wx_set_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8851 static int ipw_wx_set_mode(struct net_device *dev,
8852 struct iw_request_info *info,
8853 union iwreq_data *wrqu, char *extra)
8854 {
8855 struct ipw_priv *priv = libipw_priv(dev);
8856 int err = 0;
8857
8858 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8859
8860 switch (wrqu->mode) {
8861 #ifdef CONFIG_IPW2200_MONITOR
8862 case IW_MODE_MONITOR:
8863 #endif
8864 case IW_MODE_ADHOC:
8865 case IW_MODE_INFRA:
8866 break;
8867 case IW_MODE_AUTO:
8868 wrqu->mode = IW_MODE_INFRA;
8869 break;
8870 default:
8871 return -EINVAL;
8872 }
8873 if (wrqu->mode == priv->ieee->iw_mode)
8874 return 0;
8875
8876 mutex_lock(&priv->mutex);
8877
8878 ipw_sw_reset(priv, 0);
8879
8880 #ifdef CONFIG_IPW2200_MONITOR
8881 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8882 priv->net_dev->type = ARPHRD_ETHER;
8883
8884 if (wrqu->mode == IW_MODE_MONITOR)
8885 #ifdef CONFIG_IPW2200_RADIOTAP
8886 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8887 #else
8888 priv->net_dev->type = ARPHRD_IEEE80211;
8889 #endif
8890 #endif /* CONFIG_IPW2200_MONITOR */
8891
8892 /* Free the existing firmware and reset the fw_loaded
8893 * flag so ipw_load() will bring in the new firmware */
8894 free_firmware();
8895
8896 priv->ieee->iw_mode = wrqu->mode;
8897
8898 schedule_work(&priv->adapter_restart);
8899 mutex_unlock(&priv->mutex);
8900 return err;
8901 }
8902
ipw_wx_get_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8903 static int ipw_wx_get_mode(struct net_device *dev,
8904 struct iw_request_info *info,
8905 union iwreq_data *wrqu, char *extra)
8906 {
8907 struct ipw_priv *priv = libipw_priv(dev);
8908 mutex_lock(&priv->mutex);
8909 wrqu->mode = priv->ieee->iw_mode;
8910 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8911 mutex_unlock(&priv->mutex);
8912 return 0;
8913 }
8914
8915 /* Values are in microsecond */
8916 static const s32 timeout_duration[] = {
8917 350000,
8918 250000,
8919 75000,
8920 37000,
8921 25000,
8922 };
8923
8924 static const s32 period_duration[] = {
8925 400000,
8926 700000,
8927 1000000,
8928 1000000,
8929 1000000
8930 };
8931
ipw_wx_get_range(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8932 static int ipw_wx_get_range(struct net_device *dev,
8933 struct iw_request_info *info,
8934 union iwreq_data *wrqu, char *extra)
8935 {
8936 struct ipw_priv *priv = libipw_priv(dev);
8937 struct iw_range *range = (struct iw_range *)extra;
8938 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8939 int i = 0, j;
8940
8941 wrqu->data.length = sizeof(*range);
8942 memset(range, 0, sizeof(*range));
8943
8944 /* 54Mbs == ~27 Mb/s real (802.11g) */
8945 range->throughput = 27 * 1000 * 1000;
8946
8947 range->max_qual.qual = 100;
8948 /* TODO: Find real max RSSI and stick here */
8949 range->max_qual.level = 0;
8950 range->max_qual.noise = 0;
8951 range->max_qual.updated = 7; /* Updated all three */
8952
8953 range->avg_qual.qual = 70;
8954 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8955 range->avg_qual.level = 0; /* FIXME to real average level */
8956 range->avg_qual.noise = 0;
8957 range->avg_qual.updated = 7; /* Updated all three */
8958 mutex_lock(&priv->mutex);
8959 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8960
8961 for (i = 0; i < range->num_bitrates; i++)
8962 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8963 500000;
8964
8965 range->max_rts = DEFAULT_RTS_THRESHOLD;
8966 range->min_frag = MIN_FRAG_THRESHOLD;
8967 range->max_frag = MAX_FRAG_THRESHOLD;
8968
8969 range->encoding_size[0] = 5;
8970 range->encoding_size[1] = 13;
8971 range->num_encoding_sizes = 2;
8972 range->max_encoding_tokens = WEP_KEYS;
8973
8974 /* Set the Wireless Extension versions */
8975 range->we_version_compiled = WIRELESS_EXT;
8976 range->we_version_source = 18;
8977
8978 i = 0;
8979 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8980 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8981 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8982 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8983 continue;
8984
8985 range->freq[i].i = geo->bg[j].channel;
8986 range->freq[i].m = geo->bg[j].freq * 100000;
8987 range->freq[i].e = 1;
8988 i++;
8989 }
8990 }
8991
8992 if (priv->ieee->mode & IEEE_A) {
8993 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8994 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8995 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8996 continue;
8997
8998 range->freq[i].i = geo->a[j].channel;
8999 range->freq[i].m = geo->a[j].freq * 100000;
9000 range->freq[i].e = 1;
9001 i++;
9002 }
9003 }
9004
9005 range->num_channels = i;
9006 range->num_frequency = i;
9007
9008 mutex_unlock(&priv->mutex);
9009
9010 /* Event capability (kernel + driver) */
9011 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9012 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9013 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9014 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9015 range->event_capa[1] = IW_EVENT_CAPA_K_1;
9016
9017 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9018 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9019
9020 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9021
9022 IPW_DEBUG_WX("GET Range\n");
9023 return 0;
9024 }
9025
ipw_wx_set_wap(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9026 static int ipw_wx_set_wap(struct net_device *dev,
9027 struct iw_request_info *info,
9028 union iwreq_data *wrqu, char *extra)
9029 {
9030 struct ipw_priv *priv = libipw_priv(dev);
9031
9032 static const unsigned char any[] = {
9033 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9034 };
9035 static const unsigned char off[] = {
9036 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9037 };
9038
9039 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9040 return -EINVAL;
9041 mutex_lock(&priv->mutex);
9042 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9043 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9044 /* we disable mandatory BSSID association */
9045 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9046 priv->config &= ~CFG_STATIC_BSSID;
9047 IPW_DEBUG_ASSOC("Attempting to associate with new "
9048 "parameters.\n");
9049 ipw_associate(priv);
9050 mutex_unlock(&priv->mutex);
9051 return 0;
9052 }
9053
9054 priv->config |= CFG_STATIC_BSSID;
9055 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9056 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9057 mutex_unlock(&priv->mutex);
9058 return 0;
9059 }
9060
9061 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9062 wrqu->ap_addr.sa_data);
9063
9064 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9065
9066 /* Network configuration changed -- force [re]association */
9067 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9068 if (!ipw_disassociate(priv))
9069 ipw_associate(priv);
9070
9071 mutex_unlock(&priv->mutex);
9072 return 0;
9073 }
9074
ipw_wx_get_wap(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9075 static int ipw_wx_get_wap(struct net_device *dev,
9076 struct iw_request_info *info,
9077 union iwreq_data *wrqu, char *extra)
9078 {
9079 struct ipw_priv *priv = libipw_priv(dev);
9080
9081 /* If we are associated, trying to associate, or have a statically
9082 * configured BSSID then return that; otherwise return ANY */
9083 mutex_lock(&priv->mutex);
9084 if (priv->config & CFG_STATIC_BSSID ||
9085 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9086 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9087 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9088 } else
9089 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9090
9091 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9092 wrqu->ap_addr.sa_data);
9093 mutex_unlock(&priv->mutex);
9094 return 0;
9095 }
9096
ipw_wx_set_essid(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9097 static int ipw_wx_set_essid(struct net_device *dev,
9098 struct iw_request_info *info,
9099 union iwreq_data *wrqu, char *extra)
9100 {
9101 struct ipw_priv *priv = libipw_priv(dev);
9102 int length;
9103 DECLARE_SSID_BUF(ssid);
9104
9105 mutex_lock(&priv->mutex);
9106
9107 if (!wrqu->essid.flags)
9108 {
9109 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9110 ipw_disassociate(priv);
9111 priv->config &= ~CFG_STATIC_ESSID;
9112 ipw_associate(priv);
9113 mutex_unlock(&priv->mutex);
9114 return 0;
9115 }
9116
9117 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9118
9119 priv->config |= CFG_STATIC_ESSID;
9120
9121 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9122 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9123 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9124 mutex_unlock(&priv->mutex);
9125 return 0;
9126 }
9127
9128 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9129 print_ssid(ssid, extra, length), length);
9130
9131 priv->essid_len = length;
9132 memcpy(priv->essid, extra, priv->essid_len);
9133
9134 /* Network configuration changed -- force [re]association */
9135 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9136 if (!ipw_disassociate(priv))
9137 ipw_associate(priv);
9138
9139 mutex_unlock(&priv->mutex);
9140 return 0;
9141 }
9142
ipw_wx_get_essid(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9143 static int ipw_wx_get_essid(struct net_device *dev,
9144 struct iw_request_info *info,
9145 union iwreq_data *wrqu, char *extra)
9146 {
9147 struct ipw_priv *priv = libipw_priv(dev);
9148 DECLARE_SSID_BUF(ssid);
9149
9150 /* If we are associated, trying to associate, or have a statically
9151 * configured ESSID then return that; otherwise return ANY */
9152 mutex_lock(&priv->mutex);
9153 if (priv->config & CFG_STATIC_ESSID ||
9154 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9155 IPW_DEBUG_WX("Getting essid: '%s'\n",
9156 print_ssid(ssid, priv->essid, priv->essid_len));
9157 memcpy(extra, priv->essid, priv->essid_len);
9158 wrqu->essid.length = priv->essid_len;
9159 wrqu->essid.flags = 1; /* active */
9160 } else {
9161 IPW_DEBUG_WX("Getting essid: ANY\n");
9162 wrqu->essid.length = 0;
9163 wrqu->essid.flags = 0; /* active */
9164 }
9165 mutex_unlock(&priv->mutex);
9166 return 0;
9167 }
9168
ipw_wx_set_nick(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9169 static int ipw_wx_set_nick(struct net_device *dev,
9170 struct iw_request_info *info,
9171 union iwreq_data *wrqu, char *extra)
9172 {
9173 struct ipw_priv *priv = libipw_priv(dev);
9174
9175 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9176 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9177 return -E2BIG;
9178 mutex_lock(&priv->mutex);
9179 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9180 memset(priv->nick, 0, sizeof(priv->nick));
9181 memcpy(priv->nick, extra, wrqu->data.length);
9182 IPW_DEBUG_TRACE("<<\n");
9183 mutex_unlock(&priv->mutex);
9184 return 0;
9185
9186 }
9187
ipw_wx_get_nick(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9188 static int ipw_wx_get_nick(struct net_device *dev,
9189 struct iw_request_info *info,
9190 union iwreq_data *wrqu, char *extra)
9191 {
9192 struct ipw_priv *priv = libipw_priv(dev);
9193 IPW_DEBUG_WX("Getting nick\n");
9194 mutex_lock(&priv->mutex);
9195 wrqu->data.length = strlen(priv->nick);
9196 memcpy(extra, priv->nick, wrqu->data.length);
9197 wrqu->data.flags = 1; /* active */
9198 mutex_unlock(&priv->mutex);
9199 return 0;
9200 }
9201
ipw_wx_set_sens(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9202 static int ipw_wx_set_sens(struct net_device *dev,
9203 struct iw_request_info *info,
9204 union iwreq_data *wrqu, char *extra)
9205 {
9206 struct ipw_priv *priv = libipw_priv(dev);
9207 int err = 0;
9208
9209 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9210 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9211 mutex_lock(&priv->mutex);
9212
9213 if (wrqu->sens.fixed == 0)
9214 {
9215 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9216 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9217 goto out;
9218 }
9219 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9220 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9221 err = -EINVAL;
9222 goto out;
9223 }
9224
9225 priv->roaming_threshold = wrqu->sens.value;
9226 priv->disassociate_threshold = 3*wrqu->sens.value;
9227 out:
9228 mutex_unlock(&priv->mutex);
9229 return err;
9230 }
9231
ipw_wx_get_sens(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9232 static int ipw_wx_get_sens(struct net_device *dev,
9233 struct iw_request_info *info,
9234 union iwreq_data *wrqu, char *extra)
9235 {
9236 struct ipw_priv *priv = libipw_priv(dev);
9237 mutex_lock(&priv->mutex);
9238 wrqu->sens.fixed = 1;
9239 wrqu->sens.value = priv->roaming_threshold;
9240 mutex_unlock(&priv->mutex);
9241
9242 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9243 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9244
9245 return 0;
9246 }
9247
ipw_wx_set_rate(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9248 static int ipw_wx_set_rate(struct net_device *dev,
9249 struct iw_request_info *info,
9250 union iwreq_data *wrqu, char *extra)
9251 {
9252 /* TODO: We should use semaphores or locks for access to priv */
9253 struct ipw_priv *priv = libipw_priv(dev);
9254 u32 target_rate = wrqu->bitrate.value;
9255 u32 fixed, mask;
9256
9257 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9258 /* value = X, fixed = 1 means only rate X */
9259 /* value = X, fixed = 0 means all rates lower equal X */
9260
9261 if (target_rate == -1) {
9262 fixed = 0;
9263 mask = LIBIPW_DEFAULT_RATES_MASK;
9264 /* Now we should reassociate */
9265 goto apply;
9266 }
9267
9268 mask = 0;
9269 fixed = wrqu->bitrate.fixed;
9270
9271 if (target_rate == 1000000 || !fixed)
9272 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9273 if (target_rate == 1000000)
9274 goto apply;
9275
9276 if (target_rate == 2000000 || !fixed)
9277 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9278 if (target_rate == 2000000)
9279 goto apply;
9280
9281 if (target_rate == 5500000 || !fixed)
9282 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9283 if (target_rate == 5500000)
9284 goto apply;
9285
9286 if (target_rate == 6000000 || !fixed)
9287 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9288 if (target_rate == 6000000)
9289 goto apply;
9290
9291 if (target_rate == 9000000 || !fixed)
9292 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9293 if (target_rate == 9000000)
9294 goto apply;
9295
9296 if (target_rate == 11000000 || !fixed)
9297 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9298 if (target_rate == 11000000)
9299 goto apply;
9300
9301 if (target_rate == 12000000 || !fixed)
9302 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9303 if (target_rate == 12000000)
9304 goto apply;
9305
9306 if (target_rate == 18000000 || !fixed)
9307 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9308 if (target_rate == 18000000)
9309 goto apply;
9310
9311 if (target_rate == 24000000 || !fixed)
9312 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9313 if (target_rate == 24000000)
9314 goto apply;
9315
9316 if (target_rate == 36000000 || !fixed)
9317 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9318 if (target_rate == 36000000)
9319 goto apply;
9320
9321 if (target_rate == 48000000 || !fixed)
9322 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9323 if (target_rate == 48000000)
9324 goto apply;
9325
9326 if (target_rate == 54000000 || !fixed)
9327 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9328 if (target_rate == 54000000)
9329 goto apply;
9330
9331 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9332 return -EINVAL;
9333
9334 apply:
9335 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9336 mask, fixed ? "fixed" : "sub-rates");
9337 mutex_lock(&priv->mutex);
9338 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9339 priv->config &= ~CFG_FIXED_RATE;
9340 ipw_set_fixed_rate(priv, priv->ieee->mode);
9341 } else
9342 priv->config |= CFG_FIXED_RATE;
9343
9344 if (priv->rates_mask == mask) {
9345 IPW_DEBUG_WX("Mask set to current mask.\n");
9346 mutex_unlock(&priv->mutex);
9347 return 0;
9348 }
9349
9350 priv->rates_mask = mask;
9351
9352 /* Network configuration changed -- force [re]association */
9353 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9354 if (!ipw_disassociate(priv))
9355 ipw_associate(priv);
9356
9357 mutex_unlock(&priv->mutex);
9358 return 0;
9359 }
9360
ipw_wx_get_rate(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9361 static int ipw_wx_get_rate(struct net_device *dev,
9362 struct iw_request_info *info,
9363 union iwreq_data *wrqu, char *extra)
9364 {
9365 struct ipw_priv *priv = libipw_priv(dev);
9366 mutex_lock(&priv->mutex);
9367 wrqu->bitrate.value = priv->last_rate;
9368 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9369 mutex_unlock(&priv->mutex);
9370 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9371 return 0;
9372 }
9373
ipw_wx_set_rts(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9374 static int ipw_wx_set_rts(struct net_device *dev,
9375 struct iw_request_info *info,
9376 union iwreq_data *wrqu, char *extra)
9377 {
9378 struct ipw_priv *priv = libipw_priv(dev);
9379 mutex_lock(&priv->mutex);
9380 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9381 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9382 else {
9383 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9384 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9385 mutex_unlock(&priv->mutex);
9386 return -EINVAL;
9387 }
9388 priv->rts_threshold = wrqu->rts.value;
9389 }
9390
9391 ipw_send_rts_threshold(priv, priv->rts_threshold);
9392 mutex_unlock(&priv->mutex);
9393 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9394 return 0;
9395 }
9396
ipw_wx_get_rts(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9397 static int ipw_wx_get_rts(struct net_device *dev,
9398 struct iw_request_info *info,
9399 union iwreq_data *wrqu, char *extra)
9400 {
9401 struct ipw_priv *priv = libipw_priv(dev);
9402 mutex_lock(&priv->mutex);
9403 wrqu->rts.value = priv->rts_threshold;
9404 wrqu->rts.fixed = 0; /* no auto select */
9405 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9406 mutex_unlock(&priv->mutex);
9407 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9408 return 0;
9409 }
9410
ipw_wx_set_txpow(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9411 static int ipw_wx_set_txpow(struct net_device *dev,
9412 struct iw_request_info *info,
9413 union iwreq_data *wrqu, char *extra)
9414 {
9415 struct ipw_priv *priv = libipw_priv(dev);
9416 int err = 0;
9417
9418 mutex_lock(&priv->mutex);
9419 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9420 err = -EINPROGRESS;
9421 goto out;
9422 }
9423
9424 if (!wrqu->power.fixed)
9425 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9426
9427 if (wrqu->power.flags != IW_TXPOW_DBM) {
9428 err = -EINVAL;
9429 goto out;
9430 }
9431
9432 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9433 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9434 err = -EINVAL;
9435 goto out;
9436 }
9437
9438 priv->tx_power = wrqu->power.value;
9439 err = ipw_set_tx_power(priv);
9440 out:
9441 mutex_unlock(&priv->mutex);
9442 return err;
9443 }
9444
ipw_wx_get_txpow(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9445 static int ipw_wx_get_txpow(struct net_device *dev,
9446 struct iw_request_info *info,
9447 union iwreq_data *wrqu, char *extra)
9448 {
9449 struct ipw_priv *priv = libipw_priv(dev);
9450 mutex_lock(&priv->mutex);
9451 wrqu->power.value = priv->tx_power;
9452 wrqu->power.fixed = 1;
9453 wrqu->power.flags = IW_TXPOW_DBM;
9454 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9455 mutex_unlock(&priv->mutex);
9456
9457 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9458 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9459
9460 return 0;
9461 }
9462
ipw_wx_set_frag(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9463 static int ipw_wx_set_frag(struct net_device *dev,
9464 struct iw_request_info *info,
9465 union iwreq_data *wrqu, char *extra)
9466 {
9467 struct ipw_priv *priv = libipw_priv(dev);
9468 mutex_lock(&priv->mutex);
9469 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9470 priv->ieee->fts = DEFAULT_FTS;
9471 else {
9472 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9473 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9474 mutex_unlock(&priv->mutex);
9475 return -EINVAL;
9476 }
9477
9478 priv->ieee->fts = wrqu->frag.value & ~0x1;
9479 }
9480
9481 ipw_send_frag_threshold(priv, wrqu->frag.value);
9482 mutex_unlock(&priv->mutex);
9483 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9484 return 0;
9485 }
9486
ipw_wx_get_frag(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9487 static int ipw_wx_get_frag(struct net_device *dev,
9488 struct iw_request_info *info,
9489 union iwreq_data *wrqu, char *extra)
9490 {
9491 struct ipw_priv *priv = libipw_priv(dev);
9492 mutex_lock(&priv->mutex);
9493 wrqu->frag.value = priv->ieee->fts;
9494 wrqu->frag.fixed = 0; /* no auto select */
9495 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9496 mutex_unlock(&priv->mutex);
9497 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9498
9499 return 0;
9500 }
9501
ipw_wx_set_retry(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9502 static int ipw_wx_set_retry(struct net_device *dev,
9503 struct iw_request_info *info,
9504 union iwreq_data *wrqu, char *extra)
9505 {
9506 struct ipw_priv *priv = libipw_priv(dev);
9507
9508 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9509 return -EINVAL;
9510
9511 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9512 return 0;
9513
9514 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9515 return -EINVAL;
9516
9517 mutex_lock(&priv->mutex);
9518 if (wrqu->retry.flags & IW_RETRY_SHORT)
9519 priv->short_retry_limit = (u8) wrqu->retry.value;
9520 else if (wrqu->retry.flags & IW_RETRY_LONG)
9521 priv->long_retry_limit = (u8) wrqu->retry.value;
9522 else {
9523 priv->short_retry_limit = (u8) wrqu->retry.value;
9524 priv->long_retry_limit = (u8) wrqu->retry.value;
9525 }
9526
9527 ipw_send_retry_limit(priv, priv->short_retry_limit,
9528 priv->long_retry_limit);
9529 mutex_unlock(&priv->mutex);
9530 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9531 priv->short_retry_limit, priv->long_retry_limit);
9532 return 0;
9533 }
9534
ipw_wx_get_retry(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9535 static int ipw_wx_get_retry(struct net_device *dev,
9536 struct iw_request_info *info,
9537 union iwreq_data *wrqu, char *extra)
9538 {
9539 struct ipw_priv *priv = libipw_priv(dev);
9540
9541 mutex_lock(&priv->mutex);
9542 wrqu->retry.disabled = 0;
9543
9544 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9545 mutex_unlock(&priv->mutex);
9546 return -EINVAL;
9547 }
9548
9549 if (wrqu->retry.flags & IW_RETRY_LONG) {
9550 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9551 wrqu->retry.value = priv->long_retry_limit;
9552 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9553 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9554 wrqu->retry.value = priv->short_retry_limit;
9555 } else {
9556 wrqu->retry.flags = IW_RETRY_LIMIT;
9557 wrqu->retry.value = priv->short_retry_limit;
9558 }
9559 mutex_unlock(&priv->mutex);
9560
9561 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9562
9563 return 0;
9564 }
9565
ipw_wx_set_scan(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9566 static int ipw_wx_set_scan(struct net_device *dev,
9567 struct iw_request_info *info,
9568 union iwreq_data *wrqu, char *extra)
9569 {
9570 struct ipw_priv *priv = libipw_priv(dev);
9571 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9572 struct delayed_work *work = NULL;
9573
9574 mutex_lock(&priv->mutex);
9575
9576 priv->user_requested_scan = 1;
9577
9578 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9579 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9580 int len = min((int)req->essid_len,
9581 (int)sizeof(priv->direct_scan_ssid));
9582 memcpy(priv->direct_scan_ssid, req->essid, len);
9583 priv->direct_scan_ssid_len = len;
9584 work = &priv->request_direct_scan;
9585 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9586 work = &priv->request_passive_scan;
9587 }
9588 } else {
9589 /* Normal active broadcast scan */
9590 work = &priv->request_scan;
9591 }
9592
9593 mutex_unlock(&priv->mutex);
9594
9595 IPW_DEBUG_WX("Start scan\n");
9596
9597 schedule_delayed_work(work, 0);
9598
9599 return 0;
9600 }
9601
ipw_wx_get_scan(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9602 static int ipw_wx_get_scan(struct net_device *dev,
9603 struct iw_request_info *info,
9604 union iwreq_data *wrqu, char *extra)
9605 {
9606 struct ipw_priv *priv = libipw_priv(dev);
9607 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9608 }
9609
ipw_wx_set_encode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * key)9610 static int ipw_wx_set_encode(struct net_device *dev,
9611 struct iw_request_info *info,
9612 union iwreq_data *wrqu, char *key)
9613 {
9614 struct ipw_priv *priv = libipw_priv(dev);
9615 int ret;
9616 u32 cap = priv->capability;
9617
9618 mutex_lock(&priv->mutex);
9619 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9620
9621 /* In IBSS mode, we need to notify the firmware to update
9622 * the beacon info after we changed the capability. */
9623 if (cap != priv->capability &&
9624 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9625 priv->status & STATUS_ASSOCIATED)
9626 ipw_disassociate(priv);
9627
9628 mutex_unlock(&priv->mutex);
9629 return ret;
9630 }
9631
ipw_wx_get_encode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * key)9632 static int ipw_wx_get_encode(struct net_device *dev,
9633 struct iw_request_info *info,
9634 union iwreq_data *wrqu, char *key)
9635 {
9636 struct ipw_priv *priv = libipw_priv(dev);
9637 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9638 }
9639
ipw_wx_set_power(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9640 static int ipw_wx_set_power(struct net_device *dev,
9641 struct iw_request_info *info,
9642 union iwreq_data *wrqu, char *extra)
9643 {
9644 struct ipw_priv *priv = libipw_priv(dev);
9645 int err;
9646 mutex_lock(&priv->mutex);
9647 if (wrqu->power.disabled) {
9648 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9649 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9650 if (err) {
9651 IPW_DEBUG_WX("failed setting power mode.\n");
9652 mutex_unlock(&priv->mutex);
9653 return err;
9654 }
9655 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9656 mutex_unlock(&priv->mutex);
9657 return 0;
9658 }
9659
9660 switch (wrqu->power.flags & IW_POWER_MODE) {
9661 case IW_POWER_ON: /* If not specified */
9662 case IW_POWER_MODE: /* If set all mask */
9663 case IW_POWER_ALL_R: /* If explicitly state all */
9664 break;
9665 default: /* Otherwise we don't support it */
9666 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9667 wrqu->power.flags);
9668 mutex_unlock(&priv->mutex);
9669 return -EOPNOTSUPP;
9670 }
9671
9672 /* If the user hasn't specified a power management mode yet, default
9673 * to BATTERY */
9674 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9675 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9676 else
9677 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9678
9679 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9680 if (err) {
9681 IPW_DEBUG_WX("failed setting power mode.\n");
9682 mutex_unlock(&priv->mutex);
9683 return err;
9684 }
9685
9686 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9687 mutex_unlock(&priv->mutex);
9688 return 0;
9689 }
9690
ipw_wx_get_power(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9691 static int ipw_wx_get_power(struct net_device *dev,
9692 struct iw_request_info *info,
9693 union iwreq_data *wrqu, char *extra)
9694 {
9695 struct ipw_priv *priv = libipw_priv(dev);
9696 mutex_lock(&priv->mutex);
9697 if (!(priv->power_mode & IPW_POWER_ENABLED))
9698 wrqu->power.disabled = 1;
9699 else
9700 wrqu->power.disabled = 0;
9701
9702 mutex_unlock(&priv->mutex);
9703 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9704
9705 return 0;
9706 }
9707
ipw_wx_set_powermode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9708 static int ipw_wx_set_powermode(struct net_device *dev,
9709 struct iw_request_info *info,
9710 union iwreq_data *wrqu, char *extra)
9711 {
9712 struct ipw_priv *priv = libipw_priv(dev);
9713 int mode = *(int *)extra;
9714 int err;
9715
9716 mutex_lock(&priv->mutex);
9717 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9718 mode = IPW_POWER_AC;
9719
9720 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9721 err = ipw_send_power_mode(priv, mode);
9722 if (err) {
9723 IPW_DEBUG_WX("failed setting power mode.\n");
9724 mutex_unlock(&priv->mutex);
9725 return err;
9726 }
9727 priv->power_mode = IPW_POWER_ENABLED | mode;
9728 }
9729 mutex_unlock(&priv->mutex);
9730 return 0;
9731 }
9732
9733 #define MAX_WX_STRING 80
ipw_wx_get_powermode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9734 static int ipw_wx_get_powermode(struct net_device *dev,
9735 struct iw_request_info *info,
9736 union iwreq_data *wrqu, char *extra)
9737 {
9738 struct ipw_priv *priv = libipw_priv(dev);
9739 int level = IPW_POWER_LEVEL(priv->power_mode);
9740 char *p = extra;
9741
9742 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9743
9744 switch (level) {
9745 case IPW_POWER_AC:
9746 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9747 break;
9748 case IPW_POWER_BATTERY:
9749 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9750 break;
9751 default:
9752 p += snprintf(p, MAX_WX_STRING - (p - extra),
9753 "(Timeout %dms, Period %dms)",
9754 timeout_duration[level - 1] / 1000,
9755 period_duration[level - 1] / 1000);
9756 }
9757
9758 if (!(priv->power_mode & IPW_POWER_ENABLED))
9759 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9760
9761 wrqu->data.length = p - extra + 1;
9762
9763 return 0;
9764 }
9765
ipw_wx_set_wireless_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9766 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9767 struct iw_request_info *info,
9768 union iwreq_data *wrqu, char *extra)
9769 {
9770 struct ipw_priv *priv = libipw_priv(dev);
9771 int mode = *(int *)extra;
9772 u8 band = 0, modulation = 0;
9773
9774 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9775 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9776 return -EINVAL;
9777 }
9778 mutex_lock(&priv->mutex);
9779 if (priv->adapter == IPW_2915ABG) {
9780 priv->ieee->abg_true = 1;
9781 if (mode & IEEE_A) {
9782 band |= LIBIPW_52GHZ_BAND;
9783 modulation |= LIBIPW_OFDM_MODULATION;
9784 } else
9785 priv->ieee->abg_true = 0;
9786 } else {
9787 if (mode & IEEE_A) {
9788 IPW_WARNING("Attempt to set 2200BG into "
9789 "802.11a mode\n");
9790 mutex_unlock(&priv->mutex);
9791 return -EINVAL;
9792 }
9793
9794 priv->ieee->abg_true = 0;
9795 }
9796
9797 if (mode & IEEE_B) {
9798 band |= LIBIPW_24GHZ_BAND;
9799 modulation |= LIBIPW_CCK_MODULATION;
9800 } else
9801 priv->ieee->abg_true = 0;
9802
9803 if (mode & IEEE_G) {
9804 band |= LIBIPW_24GHZ_BAND;
9805 modulation |= LIBIPW_OFDM_MODULATION;
9806 } else
9807 priv->ieee->abg_true = 0;
9808
9809 priv->ieee->mode = mode;
9810 priv->ieee->freq_band = band;
9811 priv->ieee->modulation = modulation;
9812 init_supported_rates(priv, &priv->rates);
9813
9814 /* Network configuration changed -- force [re]association */
9815 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9816 if (!ipw_disassociate(priv)) {
9817 ipw_send_supported_rates(priv, &priv->rates);
9818 ipw_associate(priv);
9819 }
9820
9821 /* Update the band LEDs */
9822 ipw_led_band_on(priv);
9823
9824 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9825 mode & IEEE_A ? 'a' : '.',
9826 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9827 mutex_unlock(&priv->mutex);
9828 return 0;
9829 }
9830
ipw_wx_get_wireless_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9831 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9832 struct iw_request_info *info,
9833 union iwreq_data *wrqu, char *extra)
9834 {
9835 struct ipw_priv *priv = libipw_priv(dev);
9836 mutex_lock(&priv->mutex);
9837 switch (priv->ieee->mode) {
9838 case IEEE_A:
9839 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9840 break;
9841 case IEEE_B:
9842 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9843 break;
9844 case IEEE_A | IEEE_B:
9845 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9846 break;
9847 case IEEE_G:
9848 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9849 break;
9850 case IEEE_A | IEEE_G:
9851 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9852 break;
9853 case IEEE_B | IEEE_G:
9854 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9855 break;
9856 case IEEE_A | IEEE_B | IEEE_G:
9857 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9858 break;
9859 default:
9860 strncpy(extra, "unknown", MAX_WX_STRING);
9861 break;
9862 }
9863
9864 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9865
9866 wrqu->data.length = strlen(extra) + 1;
9867 mutex_unlock(&priv->mutex);
9868
9869 return 0;
9870 }
9871
ipw_wx_set_preamble(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9872 static int ipw_wx_set_preamble(struct net_device *dev,
9873 struct iw_request_info *info,
9874 union iwreq_data *wrqu, char *extra)
9875 {
9876 struct ipw_priv *priv = libipw_priv(dev);
9877 int mode = *(int *)extra;
9878 mutex_lock(&priv->mutex);
9879 /* Switching from SHORT -> LONG requires a disassociation */
9880 if (mode == 1) {
9881 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9882 priv->config |= CFG_PREAMBLE_LONG;
9883
9884 /* Network configuration changed -- force [re]association */
9885 IPW_DEBUG_ASSOC
9886 ("[re]association triggered due to preamble change.\n");
9887 if (!ipw_disassociate(priv))
9888 ipw_associate(priv);
9889 }
9890 goto done;
9891 }
9892
9893 if (mode == 0) {
9894 priv->config &= ~CFG_PREAMBLE_LONG;
9895 goto done;
9896 }
9897 mutex_unlock(&priv->mutex);
9898 return -EINVAL;
9899
9900 done:
9901 mutex_unlock(&priv->mutex);
9902 return 0;
9903 }
9904
ipw_wx_get_preamble(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9905 static int ipw_wx_get_preamble(struct net_device *dev,
9906 struct iw_request_info *info,
9907 union iwreq_data *wrqu, char *extra)
9908 {
9909 struct ipw_priv *priv = libipw_priv(dev);
9910 mutex_lock(&priv->mutex);
9911 if (priv->config & CFG_PREAMBLE_LONG)
9912 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9913 else
9914 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9915 mutex_unlock(&priv->mutex);
9916 return 0;
9917 }
9918
9919 #ifdef CONFIG_IPW2200_MONITOR
ipw_wx_set_monitor(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9920 static int ipw_wx_set_monitor(struct net_device *dev,
9921 struct iw_request_info *info,
9922 union iwreq_data *wrqu, char *extra)
9923 {
9924 struct ipw_priv *priv = libipw_priv(dev);
9925 int *parms = (int *)extra;
9926 int enable = (parms[0] > 0);
9927 mutex_lock(&priv->mutex);
9928 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9929 if (enable) {
9930 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9931 #ifdef CONFIG_IPW2200_RADIOTAP
9932 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9933 #else
9934 priv->net_dev->type = ARPHRD_IEEE80211;
9935 #endif
9936 schedule_work(&priv->adapter_restart);
9937 }
9938
9939 ipw_set_channel(priv, parms[1]);
9940 } else {
9941 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9942 mutex_unlock(&priv->mutex);
9943 return 0;
9944 }
9945 priv->net_dev->type = ARPHRD_ETHER;
9946 schedule_work(&priv->adapter_restart);
9947 }
9948 mutex_unlock(&priv->mutex);
9949 return 0;
9950 }
9951
9952 #endif /* CONFIG_IPW2200_MONITOR */
9953
ipw_wx_reset(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9954 static int ipw_wx_reset(struct net_device *dev,
9955 struct iw_request_info *info,
9956 union iwreq_data *wrqu, char *extra)
9957 {
9958 struct ipw_priv *priv = libipw_priv(dev);
9959 IPW_DEBUG_WX("RESET\n");
9960 schedule_work(&priv->adapter_restart);
9961 return 0;
9962 }
9963
ipw_wx_sw_reset(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9964 static int ipw_wx_sw_reset(struct net_device *dev,
9965 struct iw_request_info *info,
9966 union iwreq_data *wrqu, char *extra)
9967 {
9968 struct ipw_priv *priv = libipw_priv(dev);
9969 union iwreq_data wrqu_sec = {
9970 .encoding = {
9971 .flags = IW_ENCODE_DISABLED,
9972 },
9973 };
9974 int ret;
9975
9976 IPW_DEBUG_WX("SW_RESET\n");
9977
9978 mutex_lock(&priv->mutex);
9979
9980 ret = ipw_sw_reset(priv, 2);
9981 if (!ret) {
9982 free_firmware();
9983 ipw_adapter_restart(priv);
9984 }
9985
9986 /* The SW reset bit might have been toggled on by the 'disable'
9987 * module parameter, so take appropriate action */
9988 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9989
9990 mutex_unlock(&priv->mutex);
9991 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9992 mutex_lock(&priv->mutex);
9993
9994 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9995 /* Configuration likely changed -- force [re]association */
9996 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9997 "reset.\n");
9998 if (!ipw_disassociate(priv))
9999 ipw_associate(priv);
10000 }
10001
10002 mutex_unlock(&priv->mutex);
10003
10004 return 0;
10005 }
10006
10007 /* Rebase the WE IOCTLs to zero for the handler array */
10008 static iw_handler ipw_wx_handlers[] = {
10009 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10010 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10011 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10012 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10013 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10014 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10015 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10016 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10017 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10018 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10019 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10020 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10021 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10022 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10023 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10024 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10025 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10026 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10027 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10028 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10029 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10030 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10031 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10032 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10033 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10034 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10035 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10036 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10037 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10038 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10039 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10040 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10041 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10042 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10043 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10044 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10045 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10046 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10047 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10048 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10049 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10050 };
10051
10052 enum {
10053 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10054 IPW_PRIV_GET_POWER,
10055 IPW_PRIV_SET_MODE,
10056 IPW_PRIV_GET_MODE,
10057 IPW_PRIV_SET_PREAMBLE,
10058 IPW_PRIV_GET_PREAMBLE,
10059 IPW_PRIV_RESET,
10060 IPW_PRIV_SW_RESET,
10061 #ifdef CONFIG_IPW2200_MONITOR
10062 IPW_PRIV_SET_MONITOR,
10063 #endif
10064 };
10065
10066 static struct iw_priv_args ipw_priv_args[] = {
10067 {
10068 .cmd = IPW_PRIV_SET_POWER,
10069 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10070 .name = "set_power"},
10071 {
10072 .cmd = IPW_PRIV_GET_POWER,
10073 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10074 .name = "get_power"},
10075 {
10076 .cmd = IPW_PRIV_SET_MODE,
10077 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10078 .name = "set_mode"},
10079 {
10080 .cmd = IPW_PRIV_GET_MODE,
10081 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10082 .name = "get_mode"},
10083 {
10084 .cmd = IPW_PRIV_SET_PREAMBLE,
10085 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10086 .name = "set_preamble"},
10087 {
10088 .cmd = IPW_PRIV_GET_PREAMBLE,
10089 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10090 .name = "get_preamble"},
10091 {
10092 IPW_PRIV_RESET,
10093 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10094 {
10095 IPW_PRIV_SW_RESET,
10096 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10097 #ifdef CONFIG_IPW2200_MONITOR
10098 {
10099 IPW_PRIV_SET_MONITOR,
10100 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10101 #endif /* CONFIG_IPW2200_MONITOR */
10102 };
10103
10104 static iw_handler ipw_priv_handler[] = {
10105 ipw_wx_set_powermode,
10106 ipw_wx_get_powermode,
10107 ipw_wx_set_wireless_mode,
10108 ipw_wx_get_wireless_mode,
10109 ipw_wx_set_preamble,
10110 ipw_wx_get_preamble,
10111 ipw_wx_reset,
10112 ipw_wx_sw_reset,
10113 #ifdef CONFIG_IPW2200_MONITOR
10114 ipw_wx_set_monitor,
10115 #endif
10116 };
10117
10118 static struct iw_handler_def ipw_wx_handler_def = {
10119 .standard = ipw_wx_handlers,
10120 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10121 .num_private = ARRAY_SIZE(ipw_priv_handler),
10122 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10123 .private = ipw_priv_handler,
10124 .private_args = ipw_priv_args,
10125 .get_wireless_stats = ipw_get_wireless_stats,
10126 };
10127
10128 /*
10129 * Get wireless statistics.
10130 * Called by /proc/net/wireless
10131 * Also called by SIOCGIWSTATS
10132 */
ipw_get_wireless_stats(struct net_device * dev)10133 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10134 {
10135 struct ipw_priv *priv = libipw_priv(dev);
10136 struct iw_statistics *wstats;
10137
10138 wstats = &priv->wstats;
10139
10140 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10141 * netdev->get_wireless_stats seems to be called before fw is
10142 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10143 * and associated; if not associcated, the values are all meaningless
10144 * anyway, so set them all to NULL and INVALID */
10145 if (!(priv->status & STATUS_ASSOCIATED)) {
10146 wstats->miss.beacon = 0;
10147 wstats->discard.retries = 0;
10148 wstats->qual.qual = 0;
10149 wstats->qual.level = 0;
10150 wstats->qual.noise = 0;
10151 wstats->qual.updated = 7;
10152 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10153 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10154 return wstats;
10155 }
10156
10157 wstats->qual.qual = priv->quality;
10158 wstats->qual.level = priv->exp_avg_rssi;
10159 wstats->qual.noise = priv->exp_avg_noise;
10160 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10161 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10162
10163 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10164 wstats->discard.retries = priv->last_tx_failures;
10165 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10166
10167 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10168 goto fail_get_ordinal;
10169 wstats->discard.retries += tx_retry; */
10170
10171 return wstats;
10172 }
10173
10174 /* net device stuff */
10175
init_sys_config(struct ipw_sys_config * sys_config)10176 static void init_sys_config(struct ipw_sys_config *sys_config)
10177 {
10178 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10179 sys_config->bt_coexistence = 0;
10180 sys_config->answer_broadcast_ssid_probe = 0;
10181 sys_config->accept_all_data_frames = 0;
10182 sys_config->accept_non_directed_frames = 1;
10183 sys_config->exclude_unicast_unencrypted = 0;
10184 sys_config->disable_unicast_decryption = 1;
10185 sys_config->exclude_multicast_unencrypted = 0;
10186 sys_config->disable_multicast_decryption = 1;
10187 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10188 antenna = CFG_SYS_ANTENNA_BOTH;
10189 sys_config->antenna_diversity = antenna;
10190 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10191 sys_config->dot11g_auto_detection = 0;
10192 sys_config->enable_cts_to_self = 0;
10193 sys_config->bt_coexist_collision_thr = 0;
10194 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10195 sys_config->silence_threshold = 0x1e;
10196 }
10197
ipw_net_open(struct net_device * dev)10198 static int ipw_net_open(struct net_device *dev)
10199 {
10200 IPW_DEBUG_INFO("dev->open\n");
10201 netif_start_queue(dev);
10202 return 0;
10203 }
10204
ipw_net_stop(struct net_device * dev)10205 static int ipw_net_stop(struct net_device *dev)
10206 {
10207 IPW_DEBUG_INFO("dev->close\n");
10208 netif_stop_queue(dev);
10209 return 0;
10210 }
10211
10212 /*
10213 todo:
10214
10215 modify to send one tfd per fragment instead of using chunking. otherwise
10216 we need to heavily modify the libipw_skb_to_txb.
10217 */
10218
ipw_tx_skb(struct ipw_priv * priv,struct libipw_txb * txb,int pri)10219 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10220 int pri)
10221 {
10222 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10223 txb->fragments[0]->data;
10224 int i = 0;
10225 struct tfd_frame *tfd;
10226 #ifdef CONFIG_IPW2200_QOS
10227 int tx_id = ipw_get_tx_queue_number(priv, pri);
10228 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10229 #else
10230 struct clx2_tx_queue *txq = &priv->txq[0];
10231 #endif
10232 struct clx2_queue *q = &txq->q;
10233 u8 id, hdr_len, unicast;
10234 int fc;
10235
10236 if (!(priv->status & STATUS_ASSOCIATED))
10237 goto drop;
10238
10239 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10240 switch (priv->ieee->iw_mode) {
10241 case IW_MODE_ADHOC:
10242 unicast = !is_multicast_ether_addr(hdr->addr1);
10243 id = ipw_find_station(priv, hdr->addr1);
10244 if (id == IPW_INVALID_STATION) {
10245 id = ipw_add_station(priv, hdr->addr1);
10246 if (id == IPW_INVALID_STATION) {
10247 IPW_WARNING("Attempt to send data to "
10248 "invalid cell: %pM\n",
10249 hdr->addr1);
10250 goto drop;
10251 }
10252 }
10253 break;
10254
10255 case IW_MODE_INFRA:
10256 default:
10257 unicast = !is_multicast_ether_addr(hdr->addr3);
10258 id = 0;
10259 break;
10260 }
10261
10262 tfd = &txq->bd[q->first_empty];
10263 txq->txb[q->first_empty] = txb;
10264 memset(tfd, 0, sizeof(*tfd));
10265 tfd->u.data.station_number = id;
10266
10267 tfd->control_flags.message_type = TX_FRAME_TYPE;
10268 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10269
10270 tfd->u.data.cmd_id = DINO_CMD_TX;
10271 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10272
10273 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10274 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10275 else
10276 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10277
10278 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10279 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10280
10281 fc = le16_to_cpu(hdr->frame_ctl);
10282 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10283
10284 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10285
10286 if (likely(unicast))
10287 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10288
10289 if (txb->encrypted && !priv->ieee->host_encrypt) {
10290 switch (priv->ieee->sec.level) {
10291 case SEC_LEVEL_3:
10292 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10293 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10294 /* XXX: ACK flag must be set for CCMP even if it
10295 * is a multicast/broadcast packet, because CCMP
10296 * group communication encrypted by GTK is
10297 * actually done by the AP. */
10298 if (!unicast)
10299 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10300
10301 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10302 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10303 tfd->u.data.key_index = 0;
10304 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10305 break;
10306 case SEC_LEVEL_2:
10307 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10308 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10309 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10310 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10311 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10312 break;
10313 case SEC_LEVEL_1:
10314 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10315 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10316 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10317 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10318 40)
10319 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10320 else
10321 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10322 break;
10323 case SEC_LEVEL_0:
10324 break;
10325 default:
10326 printk(KERN_ERR "Unknown security level %d\n",
10327 priv->ieee->sec.level);
10328 break;
10329 }
10330 } else
10331 /* No hardware encryption */
10332 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10333
10334 #ifdef CONFIG_IPW2200_QOS
10335 if (fc & IEEE80211_STYPE_QOS_DATA)
10336 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10337 #endif /* CONFIG_IPW2200_QOS */
10338
10339 /* payload */
10340 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10341 txb->nr_frags));
10342 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10343 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10344 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10345 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10346 i, le32_to_cpu(tfd->u.data.num_chunks),
10347 txb->fragments[i]->len - hdr_len);
10348 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10349 i, tfd->u.data.num_chunks,
10350 txb->fragments[i]->len - hdr_len);
10351 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10352 txb->fragments[i]->len - hdr_len);
10353
10354 tfd->u.data.chunk_ptr[i] =
10355 cpu_to_le32(pci_map_single
10356 (priv->pci_dev,
10357 txb->fragments[i]->data + hdr_len,
10358 txb->fragments[i]->len - hdr_len,
10359 PCI_DMA_TODEVICE));
10360 tfd->u.data.chunk_len[i] =
10361 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10362 }
10363
10364 if (i != txb->nr_frags) {
10365 struct sk_buff *skb;
10366 u16 remaining_bytes = 0;
10367 int j;
10368
10369 for (j = i; j < txb->nr_frags; j++)
10370 remaining_bytes += txb->fragments[j]->len - hdr_len;
10371
10372 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10373 remaining_bytes);
10374 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10375 if (skb != NULL) {
10376 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10377 for (j = i; j < txb->nr_frags; j++) {
10378 int size = txb->fragments[j]->len - hdr_len;
10379
10380 printk(KERN_INFO "Adding frag %d %d...\n",
10381 j, size);
10382 memcpy(skb_put(skb, size),
10383 txb->fragments[j]->data + hdr_len, size);
10384 }
10385 dev_kfree_skb_any(txb->fragments[i]);
10386 txb->fragments[i] = skb;
10387 tfd->u.data.chunk_ptr[i] =
10388 cpu_to_le32(pci_map_single
10389 (priv->pci_dev, skb->data,
10390 remaining_bytes,
10391 PCI_DMA_TODEVICE));
10392
10393 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10394 }
10395 }
10396
10397 /* kick DMA */
10398 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10399 ipw_write32(priv, q->reg_w, q->first_empty);
10400
10401 if (ipw_tx_queue_space(q) < q->high_mark)
10402 netif_stop_queue(priv->net_dev);
10403
10404 return NETDEV_TX_OK;
10405
10406 drop:
10407 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10408 libipw_txb_free(txb);
10409 return NETDEV_TX_OK;
10410 }
10411
ipw_net_is_queue_full(struct net_device * dev,int pri)10412 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10413 {
10414 struct ipw_priv *priv = libipw_priv(dev);
10415 #ifdef CONFIG_IPW2200_QOS
10416 int tx_id = ipw_get_tx_queue_number(priv, pri);
10417 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10418 #else
10419 struct clx2_tx_queue *txq = &priv->txq[0];
10420 #endif /* CONFIG_IPW2200_QOS */
10421
10422 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10423 return 1;
10424
10425 return 0;
10426 }
10427
10428 #ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_handle_promiscuous_tx(struct ipw_priv * priv,struct libipw_txb * txb)10429 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10430 struct libipw_txb *txb)
10431 {
10432 struct libipw_rx_stats dummystats;
10433 struct ieee80211_hdr *hdr;
10434 u8 n;
10435 u16 filter = priv->prom_priv->filter;
10436 int hdr_only = 0;
10437
10438 if (filter & IPW_PROM_NO_TX)
10439 return;
10440
10441 memset(&dummystats, 0, sizeof(dummystats));
10442
10443 /* Filtering of fragment chains is done against the first fragment */
10444 hdr = (void *)txb->fragments[0]->data;
10445 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10446 if (filter & IPW_PROM_NO_MGMT)
10447 return;
10448 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10449 hdr_only = 1;
10450 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10451 if (filter & IPW_PROM_NO_CTL)
10452 return;
10453 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10454 hdr_only = 1;
10455 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10456 if (filter & IPW_PROM_NO_DATA)
10457 return;
10458 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10459 hdr_only = 1;
10460 }
10461
10462 for(n=0; n<txb->nr_frags; ++n) {
10463 struct sk_buff *src = txb->fragments[n];
10464 struct sk_buff *dst;
10465 struct ieee80211_radiotap_header *rt_hdr;
10466 int len;
10467
10468 if (hdr_only) {
10469 hdr = (void *)src->data;
10470 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10471 } else
10472 len = src->len;
10473
10474 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10475 if (!dst)
10476 continue;
10477
10478 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10479
10480 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10481 rt_hdr->it_pad = 0;
10482 rt_hdr->it_present = 0; /* after all, it's just an idea */
10483 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10484
10485 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10486 ieee80211chan2mhz(priv->channel));
10487 if (priv->channel > 14) /* 802.11a */
10488 *(__le16*)skb_put(dst, sizeof(u16)) =
10489 cpu_to_le16(IEEE80211_CHAN_OFDM |
10490 IEEE80211_CHAN_5GHZ);
10491 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10492 *(__le16*)skb_put(dst, sizeof(u16)) =
10493 cpu_to_le16(IEEE80211_CHAN_CCK |
10494 IEEE80211_CHAN_2GHZ);
10495 else /* 802.11g */
10496 *(__le16*)skb_put(dst, sizeof(u16)) =
10497 cpu_to_le16(IEEE80211_CHAN_OFDM |
10498 IEEE80211_CHAN_2GHZ);
10499
10500 rt_hdr->it_len = cpu_to_le16(dst->len);
10501
10502 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10503
10504 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10505 dev_kfree_skb_any(dst);
10506 }
10507 }
10508 #endif
10509
ipw_net_hard_start_xmit(struct libipw_txb * txb,struct net_device * dev,int pri)10510 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10511 struct net_device *dev, int pri)
10512 {
10513 struct ipw_priv *priv = libipw_priv(dev);
10514 unsigned long flags;
10515 netdev_tx_t ret;
10516
10517 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10518 spin_lock_irqsave(&priv->lock, flags);
10519
10520 #ifdef CONFIG_IPW2200_PROMISCUOUS
10521 if (rtap_iface && netif_running(priv->prom_net_dev))
10522 ipw_handle_promiscuous_tx(priv, txb);
10523 #endif
10524
10525 ret = ipw_tx_skb(priv, txb, pri);
10526 if (ret == NETDEV_TX_OK)
10527 __ipw_led_activity_on(priv);
10528 spin_unlock_irqrestore(&priv->lock, flags);
10529
10530 return ret;
10531 }
10532
ipw_net_set_multicast_list(struct net_device * dev)10533 static void ipw_net_set_multicast_list(struct net_device *dev)
10534 {
10535
10536 }
10537
ipw_net_set_mac_address(struct net_device * dev,void * p)10538 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10539 {
10540 struct ipw_priv *priv = libipw_priv(dev);
10541 struct sockaddr *addr = p;
10542
10543 if (!is_valid_ether_addr(addr->sa_data))
10544 return -EADDRNOTAVAIL;
10545 mutex_lock(&priv->mutex);
10546 priv->config |= CFG_CUSTOM_MAC;
10547 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10548 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10549 priv->net_dev->name, priv->mac_addr);
10550 schedule_work(&priv->adapter_restart);
10551 mutex_unlock(&priv->mutex);
10552 return 0;
10553 }
10554
ipw_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)10555 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10556 struct ethtool_drvinfo *info)
10557 {
10558 struct ipw_priv *p = libipw_priv(dev);
10559 char vers[64];
10560 char date[32];
10561 u32 len;
10562
10563 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10564 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10565
10566 len = sizeof(vers);
10567 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10568 len = sizeof(date);
10569 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10570
10571 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10572 vers, date);
10573 strlcpy(info->bus_info, pci_name(p->pci_dev),
10574 sizeof(info->bus_info));
10575 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10576 }
10577
ipw_ethtool_get_link(struct net_device * dev)10578 static u32 ipw_ethtool_get_link(struct net_device *dev)
10579 {
10580 struct ipw_priv *priv = libipw_priv(dev);
10581 return (priv->status & STATUS_ASSOCIATED) != 0;
10582 }
10583
ipw_ethtool_get_eeprom_len(struct net_device * dev)10584 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10585 {
10586 return IPW_EEPROM_IMAGE_SIZE;
10587 }
10588
ipw_ethtool_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * bytes)10589 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10590 struct ethtool_eeprom *eeprom, u8 * bytes)
10591 {
10592 struct ipw_priv *p = libipw_priv(dev);
10593
10594 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10595 return -EINVAL;
10596 mutex_lock(&p->mutex);
10597 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10598 mutex_unlock(&p->mutex);
10599 return 0;
10600 }
10601
ipw_ethtool_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * bytes)10602 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10603 struct ethtool_eeprom *eeprom, u8 * bytes)
10604 {
10605 struct ipw_priv *p = libipw_priv(dev);
10606 int i;
10607
10608 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10609 return -EINVAL;
10610 mutex_lock(&p->mutex);
10611 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10612 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10613 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10614 mutex_unlock(&p->mutex);
10615 return 0;
10616 }
10617
10618 static const struct ethtool_ops ipw_ethtool_ops = {
10619 .get_link = ipw_ethtool_get_link,
10620 .get_drvinfo = ipw_ethtool_get_drvinfo,
10621 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10622 .get_eeprom = ipw_ethtool_get_eeprom,
10623 .set_eeprom = ipw_ethtool_set_eeprom,
10624 };
10625
ipw_isr(int irq,void * data)10626 static irqreturn_t ipw_isr(int irq, void *data)
10627 {
10628 struct ipw_priv *priv = data;
10629 u32 inta, inta_mask;
10630
10631 if (!priv)
10632 return IRQ_NONE;
10633
10634 spin_lock(&priv->irq_lock);
10635
10636 if (!(priv->status & STATUS_INT_ENABLED)) {
10637 /* IRQ is disabled */
10638 goto none;
10639 }
10640
10641 inta = ipw_read32(priv, IPW_INTA_RW);
10642 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10643
10644 if (inta == 0xFFFFFFFF) {
10645 /* Hardware disappeared */
10646 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10647 goto none;
10648 }
10649
10650 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10651 /* Shared interrupt */
10652 goto none;
10653 }
10654
10655 /* tell the device to stop sending interrupts */
10656 __ipw_disable_interrupts(priv);
10657
10658 /* ack current interrupts */
10659 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10660 ipw_write32(priv, IPW_INTA_RW, inta);
10661
10662 /* Cache INTA value for our tasklet */
10663 priv->isr_inta = inta;
10664
10665 tasklet_schedule(&priv->irq_tasklet);
10666
10667 spin_unlock(&priv->irq_lock);
10668
10669 return IRQ_HANDLED;
10670 none:
10671 spin_unlock(&priv->irq_lock);
10672 return IRQ_NONE;
10673 }
10674
ipw_rf_kill(void * adapter)10675 static void ipw_rf_kill(void *adapter)
10676 {
10677 struct ipw_priv *priv = adapter;
10678 unsigned long flags;
10679
10680 spin_lock_irqsave(&priv->lock, flags);
10681
10682 if (rf_kill_active(priv)) {
10683 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10684 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10685 goto exit_unlock;
10686 }
10687
10688 /* RF Kill is now disabled, so bring the device back up */
10689
10690 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10691 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10692 "device\n");
10693
10694 /* we can not do an adapter restart while inside an irq lock */
10695 schedule_work(&priv->adapter_restart);
10696 } else
10697 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10698 "enabled\n");
10699
10700 exit_unlock:
10701 spin_unlock_irqrestore(&priv->lock, flags);
10702 }
10703
ipw_bg_rf_kill(struct work_struct * work)10704 static void ipw_bg_rf_kill(struct work_struct *work)
10705 {
10706 struct ipw_priv *priv =
10707 container_of(work, struct ipw_priv, rf_kill.work);
10708 mutex_lock(&priv->mutex);
10709 ipw_rf_kill(priv);
10710 mutex_unlock(&priv->mutex);
10711 }
10712
ipw_link_up(struct ipw_priv * priv)10713 static void ipw_link_up(struct ipw_priv *priv)
10714 {
10715 priv->last_seq_num = -1;
10716 priv->last_frag_num = -1;
10717 priv->last_packet_time = 0;
10718
10719 netif_carrier_on(priv->net_dev);
10720
10721 cancel_delayed_work(&priv->request_scan);
10722 cancel_delayed_work(&priv->request_direct_scan);
10723 cancel_delayed_work(&priv->request_passive_scan);
10724 cancel_delayed_work(&priv->scan_event);
10725 ipw_reset_stats(priv);
10726 /* Ensure the rate is updated immediately */
10727 priv->last_rate = ipw_get_current_rate(priv);
10728 ipw_gather_stats(priv);
10729 ipw_led_link_up(priv);
10730 notify_wx_assoc_event(priv);
10731
10732 if (priv->config & CFG_BACKGROUND_SCAN)
10733 schedule_delayed_work(&priv->request_scan, HZ);
10734 }
10735
ipw_bg_link_up(struct work_struct * work)10736 static void ipw_bg_link_up(struct work_struct *work)
10737 {
10738 struct ipw_priv *priv =
10739 container_of(work, struct ipw_priv, link_up);
10740 mutex_lock(&priv->mutex);
10741 ipw_link_up(priv);
10742 mutex_unlock(&priv->mutex);
10743 }
10744
ipw_link_down(struct ipw_priv * priv)10745 static void ipw_link_down(struct ipw_priv *priv)
10746 {
10747 ipw_led_link_down(priv);
10748 netif_carrier_off(priv->net_dev);
10749 notify_wx_assoc_event(priv);
10750
10751 /* Cancel any queued work ... */
10752 cancel_delayed_work(&priv->request_scan);
10753 cancel_delayed_work(&priv->request_direct_scan);
10754 cancel_delayed_work(&priv->request_passive_scan);
10755 cancel_delayed_work(&priv->adhoc_check);
10756 cancel_delayed_work(&priv->gather_stats);
10757
10758 ipw_reset_stats(priv);
10759
10760 if (!(priv->status & STATUS_EXIT_PENDING)) {
10761 /* Queue up another scan... */
10762 schedule_delayed_work(&priv->request_scan, 0);
10763 } else
10764 cancel_delayed_work(&priv->scan_event);
10765 }
10766
ipw_bg_link_down(struct work_struct * work)10767 static void ipw_bg_link_down(struct work_struct *work)
10768 {
10769 struct ipw_priv *priv =
10770 container_of(work, struct ipw_priv, link_down);
10771 mutex_lock(&priv->mutex);
10772 ipw_link_down(priv);
10773 mutex_unlock(&priv->mutex);
10774 }
10775
ipw_setup_deferred_work(struct ipw_priv * priv)10776 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10777 {
10778 int ret = 0;
10779
10780 init_waitqueue_head(&priv->wait_command_queue);
10781 init_waitqueue_head(&priv->wait_state);
10782
10783 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10784 INIT_WORK(&priv->associate, ipw_bg_associate);
10785 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10786 INIT_WORK(&priv->system_config, ipw_system_config);
10787 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10788 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10789 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10790 INIT_WORK(&priv->up, ipw_bg_up);
10791 INIT_WORK(&priv->down, ipw_bg_down);
10792 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10793 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10794 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10795 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10796 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10797 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10798 INIT_WORK(&priv->roam, ipw_bg_roam);
10799 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10800 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10801 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10802 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10803 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10804 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10805 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10806
10807 #ifdef CONFIG_IPW2200_QOS
10808 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10809 #endif /* CONFIG_IPW2200_QOS */
10810
10811 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10812 ipw_irq_tasklet, (unsigned long)priv);
10813
10814 return ret;
10815 }
10816
shim__set_security(struct net_device * dev,struct libipw_security * sec)10817 static void shim__set_security(struct net_device *dev,
10818 struct libipw_security *sec)
10819 {
10820 struct ipw_priv *priv = libipw_priv(dev);
10821 int i;
10822 for (i = 0; i < 4; i++) {
10823 if (sec->flags & (1 << i)) {
10824 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10825 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10826 if (sec->key_sizes[i] == 0)
10827 priv->ieee->sec.flags &= ~(1 << i);
10828 else {
10829 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10830 sec->key_sizes[i]);
10831 priv->ieee->sec.flags |= (1 << i);
10832 }
10833 priv->status |= STATUS_SECURITY_UPDATED;
10834 } else if (sec->level != SEC_LEVEL_1)
10835 priv->ieee->sec.flags &= ~(1 << i);
10836 }
10837
10838 if (sec->flags & SEC_ACTIVE_KEY) {
10839 if (sec->active_key <= 3) {
10840 priv->ieee->sec.active_key = sec->active_key;
10841 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10842 } else
10843 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10844 priv->status |= STATUS_SECURITY_UPDATED;
10845 } else
10846 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10847
10848 if ((sec->flags & SEC_AUTH_MODE) &&
10849 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10850 priv->ieee->sec.auth_mode = sec->auth_mode;
10851 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10852 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10853 priv->capability |= CAP_SHARED_KEY;
10854 else
10855 priv->capability &= ~CAP_SHARED_KEY;
10856 priv->status |= STATUS_SECURITY_UPDATED;
10857 }
10858
10859 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10860 priv->ieee->sec.flags |= SEC_ENABLED;
10861 priv->ieee->sec.enabled = sec->enabled;
10862 priv->status |= STATUS_SECURITY_UPDATED;
10863 if (sec->enabled)
10864 priv->capability |= CAP_PRIVACY_ON;
10865 else
10866 priv->capability &= ~CAP_PRIVACY_ON;
10867 }
10868
10869 if (sec->flags & SEC_ENCRYPT)
10870 priv->ieee->sec.encrypt = sec->encrypt;
10871
10872 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10873 priv->ieee->sec.level = sec->level;
10874 priv->ieee->sec.flags |= SEC_LEVEL;
10875 priv->status |= STATUS_SECURITY_UPDATED;
10876 }
10877
10878 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10879 ipw_set_hwcrypto_keys(priv);
10880
10881 /* To match current functionality of ipw2100 (which works well w/
10882 * various supplicants, we don't force a disassociate if the
10883 * privacy capability changes ... */
10884 #if 0
10885 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10886 (((priv->assoc_request.capability &
10887 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10888 (!(priv->assoc_request.capability &
10889 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10890 IPW_DEBUG_ASSOC("Disassociating due to capability "
10891 "change.\n");
10892 ipw_disassociate(priv);
10893 }
10894 #endif
10895 }
10896
init_supported_rates(struct ipw_priv * priv,struct ipw_supported_rates * rates)10897 static int init_supported_rates(struct ipw_priv *priv,
10898 struct ipw_supported_rates *rates)
10899 {
10900 /* TODO: Mask out rates based on priv->rates_mask */
10901
10902 memset(rates, 0, sizeof(*rates));
10903 /* configure supported rates */
10904 switch (priv->ieee->freq_band) {
10905 case LIBIPW_52GHZ_BAND:
10906 rates->ieee_mode = IPW_A_MODE;
10907 rates->purpose = IPW_RATE_CAPABILITIES;
10908 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10909 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10910 break;
10911
10912 default: /* Mixed or 2.4Ghz */
10913 rates->ieee_mode = IPW_G_MODE;
10914 rates->purpose = IPW_RATE_CAPABILITIES;
10915 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10916 LIBIPW_CCK_DEFAULT_RATES_MASK);
10917 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10918 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10919 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10920 }
10921 break;
10922 }
10923
10924 return 0;
10925 }
10926
ipw_config(struct ipw_priv * priv)10927 static int ipw_config(struct ipw_priv *priv)
10928 {
10929 /* This is only called from ipw_up, which resets/reloads the firmware
10930 so, we don't need to first disable the card before we configure
10931 it */
10932 if (ipw_set_tx_power(priv))
10933 goto error;
10934
10935 /* initialize adapter address */
10936 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10937 goto error;
10938
10939 /* set basic system config settings */
10940 init_sys_config(&priv->sys_config);
10941
10942 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10943 * Does not support BT priority yet (don't abort or defer our Tx) */
10944 if (bt_coexist) {
10945 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10946
10947 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10948 priv->sys_config.bt_coexistence
10949 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10950 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10951 priv->sys_config.bt_coexistence
10952 |= CFG_BT_COEXISTENCE_OOB;
10953 }
10954
10955 #ifdef CONFIG_IPW2200_PROMISCUOUS
10956 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10957 priv->sys_config.accept_all_data_frames = 1;
10958 priv->sys_config.accept_non_directed_frames = 1;
10959 priv->sys_config.accept_all_mgmt_bcpr = 1;
10960 priv->sys_config.accept_all_mgmt_frames = 1;
10961 }
10962 #endif
10963
10964 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10965 priv->sys_config.answer_broadcast_ssid_probe = 1;
10966 else
10967 priv->sys_config.answer_broadcast_ssid_probe = 0;
10968
10969 if (ipw_send_system_config(priv))
10970 goto error;
10971
10972 init_supported_rates(priv, &priv->rates);
10973 if (ipw_send_supported_rates(priv, &priv->rates))
10974 goto error;
10975
10976 /* Set request-to-send threshold */
10977 if (priv->rts_threshold) {
10978 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10979 goto error;
10980 }
10981 #ifdef CONFIG_IPW2200_QOS
10982 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10983 ipw_qos_activate(priv, NULL);
10984 #endif /* CONFIG_IPW2200_QOS */
10985
10986 if (ipw_set_random_seed(priv))
10987 goto error;
10988
10989 /* final state transition to the RUN state */
10990 if (ipw_send_host_complete(priv))
10991 goto error;
10992
10993 priv->status |= STATUS_INIT;
10994
10995 ipw_led_init(priv);
10996 ipw_led_radio_on(priv);
10997 priv->notif_missed_beacons = 0;
10998
10999 /* Set hardware WEP key if it is configured. */
11000 if ((priv->capability & CAP_PRIVACY_ON) &&
11001 (priv->ieee->sec.level == SEC_LEVEL_1) &&
11002 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
11003 ipw_set_hwcrypto_keys(priv);
11004
11005 return 0;
11006
11007 error:
11008 return -EIO;
11009 }
11010
11011 /*
11012 * NOTE:
11013 *
11014 * These tables have been tested in conjunction with the
11015 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11016 *
11017 * Altering this values, using it on other hardware, or in geographies
11018 * not intended for resale of the above mentioned Intel adapters has
11019 * not been tested.
11020 *
11021 * Remember to update the table in README.ipw2200 when changing this
11022 * table.
11023 *
11024 */
11025 static const struct libipw_geo ipw_geos[] = {
11026 { /* Restricted */
11027 "---",
11028 .bg_channels = 11,
11029 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11030 {2427, 4}, {2432, 5}, {2437, 6},
11031 {2442, 7}, {2447, 8}, {2452, 9},
11032 {2457, 10}, {2462, 11}},
11033 },
11034
11035 { /* Custom US/Canada */
11036 "ZZF",
11037 .bg_channels = 11,
11038 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11039 {2427, 4}, {2432, 5}, {2437, 6},
11040 {2442, 7}, {2447, 8}, {2452, 9},
11041 {2457, 10}, {2462, 11}},
11042 .a_channels = 8,
11043 .a = {{5180, 36},
11044 {5200, 40},
11045 {5220, 44},
11046 {5240, 48},
11047 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11048 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11049 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11050 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11051 },
11052
11053 { /* Rest of World */
11054 "ZZD",
11055 .bg_channels = 13,
11056 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11057 {2427, 4}, {2432, 5}, {2437, 6},
11058 {2442, 7}, {2447, 8}, {2452, 9},
11059 {2457, 10}, {2462, 11}, {2467, 12},
11060 {2472, 13}},
11061 },
11062
11063 { /* Custom USA & Europe & High */
11064 "ZZA",
11065 .bg_channels = 11,
11066 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11067 {2427, 4}, {2432, 5}, {2437, 6},
11068 {2442, 7}, {2447, 8}, {2452, 9},
11069 {2457, 10}, {2462, 11}},
11070 .a_channels = 13,
11071 .a = {{5180, 36},
11072 {5200, 40},
11073 {5220, 44},
11074 {5240, 48},
11075 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11076 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11077 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11078 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11079 {5745, 149},
11080 {5765, 153},
11081 {5785, 157},
11082 {5805, 161},
11083 {5825, 165}},
11084 },
11085
11086 { /* Custom NA & Europe */
11087 "ZZB",
11088 .bg_channels = 11,
11089 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11090 {2427, 4}, {2432, 5}, {2437, 6},
11091 {2442, 7}, {2447, 8}, {2452, 9},
11092 {2457, 10}, {2462, 11}},
11093 .a_channels = 13,
11094 .a = {{5180, 36},
11095 {5200, 40},
11096 {5220, 44},
11097 {5240, 48},
11098 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11099 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11100 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11101 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11102 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11103 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11104 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11105 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11106 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11107 },
11108
11109 { /* Custom Japan */
11110 "ZZC",
11111 .bg_channels = 11,
11112 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11113 {2427, 4}, {2432, 5}, {2437, 6},
11114 {2442, 7}, {2447, 8}, {2452, 9},
11115 {2457, 10}, {2462, 11}},
11116 .a_channels = 4,
11117 .a = {{5170, 34}, {5190, 38},
11118 {5210, 42}, {5230, 46}},
11119 },
11120
11121 { /* Custom */
11122 "ZZM",
11123 .bg_channels = 11,
11124 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11125 {2427, 4}, {2432, 5}, {2437, 6},
11126 {2442, 7}, {2447, 8}, {2452, 9},
11127 {2457, 10}, {2462, 11}},
11128 },
11129
11130 { /* Europe */
11131 "ZZE",
11132 .bg_channels = 13,
11133 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11134 {2427, 4}, {2432, 5}, {2437, 6},
11135 {2442, 7}, {2447, 8}, {2452, 9},
11136 {2457, 10}, {2462, 11}, {2467, 12},
11137 {2472, 13}},
11138 .a_channels = 19,
11139 .a = {{5180, 36},
11140 {5200, 40},
11141 {5220, 44},
11142 {5240, 48},
11143 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11144 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11145 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11146 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11147 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11148 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11149 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11150 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11151 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11152 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11153 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11154 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11155 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11156 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11157 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11158 },
11159
11160 { /* Custom Japan */
11161 "ZZJ",
11162 .bg_channels = 14,
11163 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11164 {2427, 4}, {2432, 5}, {2437, 6},
11165 {2442, 7}, {2447, 8}, {2452, 9},
11166 {2457, 10}, {2462, 11}, {2467, 12},
11167 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11168 .a_channels = 4,
11169 .a = {{5170, 34}, {5190, 38},
11170 {5210, 42}, {5230, 46}},
11171 },
11172
11173 { /* Rest of World */
11174 "ZZR",
11175 .bg_channels = 14,
11176 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11177 {2427, 4}, {2432, 5}, {2437, 6},
11178 {2442, 7}, {2447, 8}, {2452, 9},
11179 {2457, 10}, {2462, 11}, {2467, 12},
11180 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11181 LIBIPW_CH_PASSIVE_ONLY}},
11182 },
11183
11184 { /* High Band */
11185 "ZZH",
11186 .bg_channels = 13,
11187 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11188 {2427, 4}, {2432, 5}, {2437, 6},
11189 {2442, 7}, {2447, 8}, {2452, 9},
11190 {2457, 10}, {2462, 11},
11191 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11192 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11193 .a_channels = 4,
11194 .a = {{5745, 149}, {5765, 153},
11195 {5785, 157}, {5805, 161}},
11196 },
11197
11198 { /* Custom Europe */
11199 "ZZG",
11200 .bg_channels = 13,
11201 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11202 {2427, 4}, {2432, 5}, {2437, 6},
11203 {2442, 7}, {2447, 8}, {2452, 9},
11204 {2457, 10}, {2462, 11},
11205 {2467, 12}, {2472, 13}},
11206 .a_channels = 4,
11207 .a = {{5180, 36}, {5200, 40},
11208 {5220, 44}, {5240, 48}},
11209 },
11210
11211 { /* Europe */
11212 "ZZK",
11213 .bg_channels = 13,
11214 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11215 {2427, 4}, {2432, 5}, {2437, 6},
11216 {2442, 7}, {2447, 8}, {2452, 9},
11217 {2457, 10}, {2462, 11},
11218 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11219 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11220 .a_channels = 24,
11221 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11222 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11223 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11224 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11225 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11226 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11227 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11228 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11229 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11230 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11231 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11232 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11233 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11234 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11235 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11236 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11237 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11238 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11239 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11240 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11241 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11242 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11243 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11244 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11245 },
11246
11247 { /* Europe */
11248 "ZZL",
11249 .bg_channels = 11,
11250 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11251 {2427, 4}, {2432, 5}, {2437, 6},
11252 {2442, 7}, {2447, 8}, {2452, 9},
11253 {2457, 10}, {2462, 11}},
11254 .a_channels = 13,
11255 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11256 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11257 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11258 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11259 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11260 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11261 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11262 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11263 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11264 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11265 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11266 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11267 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11268 }
11269 };
11270
11271 #define MAX_HW_RESTARTS 5
ipw_up(struct ipw_priv * priv)11272 static int ipw_up(struct ipw_priv *priv)
11273 {
11274 int rc, i, j;
11275
11276 /* Age scan list entries found before suspend */
11277 if (priv->suspend_time) {
11278 libipw_networks_age(priv->ieee, priv->suspend_time);
11279 priv->suspend_time = 0;
11280 }
11281
11282 if (priv->status & STATUS_EXIT_PENDING)
11283 return -EIO;
11284
11285 if (cmdlog && !priv->cmdlog) {
11286 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11287 GFP_KERNEL);
11288 if (priv->cmdlog == NULL) {
11289 IPW_ERROR("Error allocating %d command log entries.\n",
11290 cmdlog);
11291 return -ENOMEM;
11292 } else {
11293 priv->cmdlog_len = cmdlog;
11294 }
11295 }
11296
11297 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11298 /* Load the microcode, firmware, and eeprom.
11299 * Also start the clocks. */
11300 rc = ipw_load(priv);
11301 if (rc) {
11302 IPW_ERROR("Unable to load firmware: %d\n", rc);
11303 return rc;
11304 }
11305
11306 ipw_init_ordinals(priv);
11307 if (!(priv->config & CFG_CUSTOM_MAC))
11308 eeprom_parse_mac(priv, priv->mac_addr);
11309 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11310 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11311
11312 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11313 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11314 ipw_geos[j].name, 3))
11315 break;
11316 }
11317 if (j == ARRAY_SIZE(ipw_geos)) {
11318 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11319 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11320 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11321 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11322 j = 0;
11323 }
11324 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11325 IPW_WARNING("Could not set geography.");
11326 return 0;
11327 }
11328
11329 if (priv->status & STATUS_RF_KILL_SW) {
11330 IPW_WARNING("Radio disabled by module parameter.\n");
11331 return 0;
11332 } else if (rf_kill_active(priv)) {
11333 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11334 "Kill switch must be turned off for "
11335 "wireless networking to work.\n");
11336 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11337 return 0;
11338 }
11339
11340 rc = ipw_config(priv);
11341 if (!rc) {
11342 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11343
11344 /* If configure to try and auto-associate, kick
11345 * off a scan. */
11346 schedule_delayed_work(&priv->request_scan, 0);
11347
11348 return 0;
11349 }
11350
11351 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11352 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11353 i, MAX_HW_RESTARTS);
11354
11355 /* We had an error bringing up the hardware, so take it
11356 * all the way back down so we can try again */
11357 ipw_down(priv);
11358 }
11359
11360 /* tried to restart and config the device for as long as our
11361 * patience could withstand */
11362 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11363
11364 return -EIO;
11365 }
11366
ipw_bg_up(struct work_struct * work)11367 static void ipw_bg_up(struct work_struct *work)
11368 {
11369 struct ipw_priv *priv =
11370 container_of(work, struct ipw_priv, up);
11371 mutex_lock(&priv->mutex);
11372 ipw_up(priv);
11373 mutex_unlock(&priv->mutex);
11374 }
11375
ipw_deinit(struct ipw_priv * priv)11376 static void ipw_deinit(struct ipw_priv *priv)
11377 {
11378 int i;
11379
11380 if (priv->status & STATUS_SCANNING) {
11381 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11382 ipw_abort_scan(priv);
11383 }
11384
11385 if (priv->status & STATUS_ASSOCIATED) {
11386 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11387 ipw_disassociate(priv);
11388 }
11389
11390 ipw_led_shutdown(priv);
11391
11392 /* Wait up to 1s for status to change to not scanning and not
11393 * associated (disassociation can take a while for a ful 802.11
11394 * exchange */
11395 for (i = 1000; i && (priv->status &
11396 (STATUS_DISASSOCIATING |
11397 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11398 udelay(10);
11399
11400 if (priv->status & (STATUS_DISASSOCIATING |
11401 STATUS_ASSOCIATED | STATUS_SCANNING))
11402 IPW_DEBUG_INFO("Still associated or scanning...\n");
11403 else
11404 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11405
11406 /* Attempt to disable the card */
11407 ipw_send_card_disable(priv, 0);
11408
11409 priv->status &= ~STATUS_INIT;
11410 }
11411
ipw_down(struct ipw_priv * priv)11412 static void ipw_down(struct ipw_priv *priv)
11413 {
11414 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11415
11416 priv->status |= STATUS_EXIT_PENDING;
11417
11418 if (ipw_is_init(priv))
11419 ipw_deinit(priv);
11420
11421 /* Wipe out the EXIT_PENDING status bit if we are not actually
11422 * exiting the module */
11423 if (!exit_pending)
11424 priv->status &= ~STATUS_EXIT_PENDING;
11425
11426 /* tell the device to stop sending interrupts */
11427 ipw_disable_interrupts(priv);
11428
11429 /* Clear all bits but the RF Kill */
11430 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11431 netif_carrier_off(priv->net_dev);
11432
11433 ipw_stop_nic(priv);
11434
11435 ipw_led_radio_off(priv);
11436 }
11437
ipw_bg_down(struct work_struct * work)11438 static void ipw_bg_down(struct work_struct *work)
11439 {
11440 struct ipw_priv *priv =
11441 container_of(work, struct ipw_priv, down);
11442 mutex_lock(&priv->mutex);
11443 ipw_down(priv);
11444 mutex_unlock(&priv->mutex);
11445 }
11446
11447 /* Called by register_netdev() */
ipw_net_init(struct net_device * dev)11448 static int ipw_net_init(struct net_device *dev)
11449 {
11450 int rc = 0;
11451 struct ipw_priv *priv = libipw_priv(dev);
11452
11453 mutex_lock(&priv->mutex);
11454 if (ipw_up(priv))
11455 rc = -EIO;
11456 mutex_unlock(&priv->mutex);
11457
11458 return rc;
11459 }
11460
ipw_wdev_init(struct net_device * dev)11461 static int ipw_wdev_init(struct net_device *dev)
11462 {
11463 int i, rc = 0;
11464 struct ipw_priv *priv = libipw_priv(dev);
11465 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11466 struct wireless_dev *wdev = &priv->ieee->wdev;
11467
11468 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11469
11470 /* fill-out priv->ieee->bg_band */
11471 if (geo->bg_channels) {
11472 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11473
11474 bg_band->band = IEEE80211_BAND_2GHZ;
11475 bg_band->n_channels = geo->bg_channels;
11476 bg_band->channels = kcalloc(geo->bg_channels,
11477 sizeof(struct ieee80211_channel),
11478 GFP_KERNEL);
11479 if (!bg_band->channels) {
11480 rc = -ENOMEM;
11481 goto out;
11482 }
11483 /* translate geo->bg to bg_band.channels */
11484 for (i = 0; i < geo->bg_channels; i++) {
11485 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11486 bg_band->channels[i].center_freq = geo->bg[i].freq;
11487 bg_band->channels[i].hw_value = geo->bg[i].channel;
11488 bg_band->channels[i].max_power = geo->bg[i].max_power;
11489 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11490 bg_band->channels[i].flags |=
11491 IEEE80211_CHAN_PASSIVE_SCAN;
11492 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11493 bg_band->channels[i].flags |=
11494 IEEE80211_CHAN_NO_IBSS;
11495 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11496 bg_band->channels[i].flags |=
11497 IEEE80211_CHAN_RADAR;
11498 /* No equivalent for LIBIPW_CH_80211H_RULES,
11499 LIBIPW_CH_UNIFORM_SPREADING, or
11500 LIBIPW_CH_B_ONLY... */
11501 }
11502 /* point at bitrate info */
11503 bg_band->bitrates = ipw2200_bg_rates;
11504 bg_band->n_bitrates = ipw2200_num_bg_rates;
11505
11506 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11507 }
11508
11509 /* fill-out priv->ieee->a_band */
11510 if (geo->a_channels) {
11511 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11512
11513 a_band->band = IEEE80211_BAND_5GHZ;
11514 a_band->n_channels = geo->a_channels;
11515 a_band->channels = kcalloc(geo->a_channels,
11516 sizeof(struct ieee80211_channel),
11517 GFP_KERNEL);
11518 if (!a_band->channels) {
11519 rc = -ENOMEM;
11520 goto out;
11521 }
11522 /* translate geo->a to a_band.channels */
11523 for (i = 0; i < geo->a_channels; i++) {
11524 a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11525 a_band->channels[i].center_freq = geo->a[i].freq;
11526 a_band->channels[i].hw_value = geo->a[i].channel;
11527 a_band->channels[i].max_power = geo->a[i].max_power;
11528 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11529 a_band->channels[i].flags |=
11530 IEEE80211_CHAN_PASSIVE_SCAN;
11531 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11532 a_band->channels[i].flags |=
11533 IEEE80211_CHAN_NO_IBSS;
11534 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11535 a_band->channels[i].flags |=
11536 IEEE80211_CHAN_RADAR;
11537 /* No equivalent for LIBIPW_CH_80211H_RULES,
11538 LIBIPW_CH_UNIFORM_SPREADING, or
11539 LIBIPW_CH_B_ONLY... */
11540 }
11541 /* point at bitrate info */
11542 a_band->bitrates = ipw2200_a_rates;
11543 a_band->n_bitrates = ipw2200_num_a_rates;
11544
11545 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11546 }
11547
11548 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11549 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11550
11551 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11552
11553 /* With that information in place, we can now register the wiphy... */
11554 if (wiphy_register(wdev->wiphy))
11555 rc = -EIO;
11556 out:
11557 return rc;
11558 }
11559
11560 /* PCI driver stuff */
11561 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11562 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11563 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11564 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11565 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11566 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11567 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11568 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11569 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11570 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11571 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11572 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11573 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11574 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11575 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11576 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11577 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11578 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11579 {PCI_VDEVICE(INTEL, 0x104f), 0},
11580 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11581 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11582 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11583 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11584
11585 /* required last entry */
11586 {0,}
11587 };
11588
11589 MODULE_DEVICE_TABLE(pci, card_ids);
11590
11591 static struct attribute *ipw_sysfs_entries[] = {
11592 &dev_attr_rf_kill.attr,
11593 &dev_attr_direct_dword.attr,
11594 &dev_attr_indirect_byte.attr,
11595 &dev_attr_indirect_dword.attr,
11596 &dev_attr_mem_gpio_reg.attr,
11597 &dev_attr_command_event_reg.attr,
11598 &dev_attr_nic_type.attr,
11599 &dev_attr_status.attr,
11600 &dev_attr_cfg.attr,
11601 &dev_attr_error.attr,
11602 &dev_attr_event_log.attr,
11603 &dev_attr_cmd_log.attr,
11604 &dev_attr_eeprom_delay.attr,
11605 &dev_attr_ucode_version.attr,
11606 &dev_attr_rtc.attr,
11607 &dev_attr_scan_age.attr,
11608 &dev_attr_led.attr,
11609 &dev_attr_speed_scan.attr,
11610 &dev_attr_net_stats.attr,
11611 &dev_attr_channels.attr,
11612 #ifdef CONFIG_IPW2200_PROMISCUOUS
11613 &dev_attr_rtap_iface.attr,
11614 &dev_attr_rtap_filter.attr,
11615 #endif
11616 NULL
11617 };
11618
11619 static struct attribute_group ipw_attribute_group = {
11620 .name = NULL, /* put in device directory */
11621 .attrs = ipw_sysfs_entries,
11622 };
11623
11624 #ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_prom_open(struct net_device * dev)11625 static int ipw_prom_open(struct net_device *dev)
11626 {
11627 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11628 struct ipw_priv *priv = prom_priv->priv;
11629
11630 IPW_DEBUG_INFO("prom dev->open\n");
11631 netif_carrier_off(dev);
11632
11633 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11634 priv->sys_config.accept_all_data_frames = 1;
11635 priv->sys_config.accept_non_directed_frames = 1;
11636 priv->sys_config.accept_all_mgmt_bcpr = 1;
11637 priv->sys_config.accept_all_mgmt_frames = 1;
11638
11639 ipw_send_system_config(priv);
11640 }
11641
11642 return 0;
11643 }
11644
ipw_prom_stop(struct net_device * dev)11645 static int ipw_prom_stop(struct net_device *dev)
11646 {
11647 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11648 struct ipw_priv *priv = prom_priv->priv;
11649
11650 IPW_DEBUG_INFO("prom dev->stop\n");
11651
11652 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11653 priv->sys_config.accept_all_data_frames = 0;
11654 priv->sys_config.accept_non_directed_frames = 0;
11655 priv->sys_config.accept_all_mgmt_bcpr = 0;
11656 priv->sys_config.accept_all_mgmt_frames = 0;
11657
11658 ipw_send_system_config(priv);
11659 }
11660
11661 return 0;
11662 }
11663
ipw_prom_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)11664 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11665 struct net_device *dev)
11666 {
11667 IPW_DEBUG_INFO("prom dev->xmit\n");
11668 dev_kfree_skb(skb);
11669 return NETDEV_TX_OK;
11670 }
11671
11672 static const struct net_device_ops ipw_prom_netdev_ops = {
11673 .ndo_open = ipw_prom_open,
11674 .ndo_stop = ipw_prom_stop,
11675 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11676 .ndo_change_mtu = libipw_change_mtu,
11677 .ndo_set_mac_address = eth_mac_addr,
11678 .ndo_validate_addr = eth_validate_addr,
11679 };
11680
ipw_prom_alloc(struct ipw_priv * priv)11681 static int ipw_prom_alloc(struct ipw_priv *priv)
11682 {
11683 int rc = 0;
11684
11685 if (priv->prom_net_dev)
11686 return -EPERM;
11687
11688 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11689 if (priv->prom_net_dev == NULL)
11690 return -ENOMEM;
11691
11692 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11693 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11694 priv->prom_priv->priv = priv;
11695
11696 strcpy(priv->prom_net_dev->name, "rtap%d");
11697 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11698
11699 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11700 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11701
11702 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11703 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11704
11705 rc = register_netdev(priv->prom_net_dev);
11706 if (rc) {
11707 free_libipw(priv->prom_net_dev, 1);
11708 priv->prom_net_dev = NULL;
11709 return rc;
11710 }
11711
11712 return 0;
11713 }
11714
ipw_prom_free(struct ipw_priv * priv)11715 static void ipw_prom_free(struct ipw_priv *priv)
11716 {
11717 if (!priv->prom_net_dev)
11718 return;
11719
11720 unregister_netdev(priv->prom_net_dev);
11721 free_libipw(priv->prom_net_dev, 1);
11722
11723 priv->prom_net_dev = NULL;
11724 }
11725
11726 #endif
11727
11728 static const struct net_device_ops ipw_netdev_ops = {
11729 .ndo_init = ipw_net_init,
11730 .ndo_open = ipw_net_open,
11731 .ndo_stop = ipw_net_stop,
11732 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11733 .ndo_set_mac_address = ipw_net_set_mac_address,
11734 .ndo_start_xmit = libipw_xmit,
11735 .ndo_change_mtu = libipw_change_mtu,
11736 .ndo_validate_addr = eth_validate_addr,
11737 };
11738
ipw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)11739 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11740 const struct pci_device_id *ent)
11741 {
11742 int err = 0;
11743 struct net_device *net_dev;
11744 void __iomem *base;
11745 u32 length, val;
11746 struct ipw_priv *priv;
11747 int i;
11748
11749 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11750 if (net_dev == NULL) {
11751 err = -ENOMEM;
11752 goto out;
11753 }
11754
11755 priv = libipw_priv(net_dev);
11756 priv->ieee = netdev_priv(net_dev);
11757
11758 priv->net_dev = net_dev;
11759 priv->pci_dev = pdev;
11760 ipw_debug_level = debug;
11761 spin_lock_init(&priv->irq_lock);
11762 spin_lock_init(&priv->lock);
11763 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11764 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11765
11766 mutex_init(&priv->mutex);
11767 if (pci_enable_device(pdev)) {
11768 err = -ENODEV;
11769 goto out_free_libipw;
11770 }
11771
11772 pci_set_master(pdev);
11773
11774 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11775 if (!err)
11776 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11777 if (err) {
11778 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11779 goto out_pci_disable_device;
11780 }
11781
11782 pci_set_drvdata(pdev, priv);
11783
11784 err = pci_request_regions(pdev, DRV_NAME);
11785 if (err)
11786 goto out_pci_disable_device;
11787
11788 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11789 * PCI Tx retries from interfering with C3 CPU state */
11790 pci_read_config_dword(pdev, 0x40, &val);
11791 if ((val & 0x0000ff00) != 0)
11792 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11793
11794 length = pci_resource_len(pdev, 0);
11795 priv->hw_len = length;
11796
11797 base = pci_ioremap_bar(pdev, 0);
11798 if (!base) {
11799 err = -ENODEV;
11800 goto out_pci_release_regions;
11801 }
11802
11803 priv->hw_base = base;
11804 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11805 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11806
11807 err = ipw_setup_deferred_work(priv);
11808 if (err) {
11809 IPW_ERROR("Unable to setup deferred work\n");
11810 goto out_iounmap;
11811 }
11812
11813 ipw_sw_reset(priv, 1);
11814
11815 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11816 if (err) {
11817 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11818 goto out_iounmap;
11819 }
11820
11821 SET_NETDEV_DEV(net_dev, &pdev->dev);
11822
11823 mutex_lock(&priv->mutex);
11824
11825 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11826 priv->ieee->set_security = shim__set_security;
11827 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11828
11829 #ifdef CONFIG_IPW2200_QOS
11830 priv->ieee->is_qos_active = ipw_is_qos_active;
11831 priv->ieee->handle_probe_response = ipw_handle_beacon;
11832 priv->ieee->handle_beacon = ipw_handle_probe_response;
11833 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11834 #endif /* CONFIG_IPW2200_QOS */
11835
11836 priv->ieee->perfect_rssi = -20;
11837 priv->ieee->worst_rssi = -85;
11838
11839 net_dev->netdev_ops = &ipw_netdev_ops;
11840 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11841 net_dev->wireless_data = &priv->wireless_data;
11842 net_dev->wireless_handlers = &ipw_wx_handler_def;
11843 net_dev->ethtool_ops = &ipw_ethtool_ops;
11844 net_dev->irq = pdev->irq;
11845 net_dev->base_addr = (unsigned long)priv->hw_base;
11846 net_dev->mem_start = pci_resource_start(pdev, 0);
11847 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11848
11849 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11850 if (err) {
11851 IPW_ERROR("failed to create sysfs device attributes\n");
11852 mutex_unlock(&priv->mutex);
11853 goto out_release_irq;
11854 }
11855
11856 mutex_unlock(&priv->mutex);
11857 err = register_netdev(net_dev);
11858 if (err) {
11859 IPW_ERROR("failed to register network device\n");
11860 goto out_remove_sysfs;
11861 }
11862
11863 err = ipw_wdev_init(net_dev);
11864 if (err) {
11865 IPW_ERROR("failed to register wireless device\n");
11866 goto out_unregister_netdev;
11867 }
11868
11869 #ifdef CONFIG_IPW2200_PROMISCUOUS
11870 if (rtap_iface) {
11871 err = ipw_prom_alloc(priv);
11872 if (err) {
11873 IPW_ERROR("Failed to register promiscuous network "
11874 "device (error %d).\n", err);
11875 wiphy_unregister(priv->ieee->wdev.wiphy);
11876 kfree(priv->ieee->a_band.channels);
11877 kfree(priv->ieee->bg_band.channels);
11878 goto out_unregister_netdev;
11879 }
11880 }
11881 #endif
11882
11883 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11884 "channels, %d 802.11a channels)\n",
11885 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11886 priv->ieee->geo.a_channels);
11887
11888 return 0;
11889
11890 out_unregister_netdev:
11891 unregister_netdev(priv->net_dev);
11892 out_remove_sysfs:
11893 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11894 out_release_irq:
11895 free_irq(pdev->irq, priv);
11896 out_iounmap:
11897 iounmap(priv->hw_base);
11898 out_pci_release_regions:
11899 pci_release_regions(pdev);
11900 out_pci_disable_device:
11901 pci_disable_device(pdev);
11902 pci_set_drvdata(pdev, NULL);
11903 out_free_libipw:
11904 free_libipw(priv->net_dev, 0);
11905 out:
11906 return err;
11907 }
11908
ipw_pci_remove(struct pci_dev * pdev)11909 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11910 {
11911 struct ipw_priv *priv = pci_get_drvdata(pdev);
11912 struct list_head *p, *q;
11913 int i;
11914
11915 if (!priv)
11916 return;
11917
11918 mutex_lock(&priv->mutex);
11919
11920 priv->status |= STATUS_EXIT_PENDING;
11921 ipw_down(priv);
11922 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11923
11924 mutex_unlock(&priv->mutex);
11925
11926 unregister_netdev(priv->net_dev);
11927
11928 if (priv->rxq) {
11929 ipw_rx_queue_free(priv, priv->rxq);
11930 priv->rxq = NULL;
11931 }
11932 ipw_tx_queue_free(priv);
11933
11934 if (priv->cmdlog) {
11935 kfree(priv->cmdlog);
11936 priv->cmdlog = NULL;
11937 }
11938
11939 /* make sure all works are inactive */
11940 cancel_delayed_work_sync(&priv->adhoc_check);
11941 cancel_work_sync(&priv->associate);
11942 cancel_work_sync(&priv->disassociate);
11943 cancel_work_sync(&priv->system_config);
11944 cancel_work_sync(&priv->rx_replenish);
11945 cancel_work_sync(&priv->adapter_restart);
11946 cancel_delayed_work_sync(&priv->rf_kill);
11947 cancel_work_sync(&priv->up);
11948 cancel_work_sync(&priv->down);
11949 cancel_delayed_work_sync(&priv->request_scan);
11950 cancel_delayed_work_sync(&priv->request_direct_scan);
11951 cancel_delayed_work_sync(&priv->request_passive_scan);
11952 cancel_delayed_work_sync(&priv->scan_event);
11953 cancel_delayed_work_sync(&priv->gather_stats);
11954 cancel_work_sync(&priv->abort_scan);
11955 cancel_work_sync(&priv->roam);
11956 cancel_delayed_work_sync(&priv->scan_check);
11957 cancel_work_sync(&priv->link_up);
11958 cancel_work_sync(&priv->link_down);
11959 cancel_delayed_work_sync(&priv->led_link_on);
11960 cancel_delayed_work_sync(&priv->led_link_off);
11961 cancel_delayed_work_sync(&priv->led_act_off);
11962 cancel_work_sync(&priv->merge_networks);
11963
11964 /* Free MAC hash list for ADHOC */
11965 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11966 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11967 list_del(p);
11968 kfree(list_entry(p, struct ipw_ibss_seq, list));
11969 }
11970 }
11971
11972 kfree(priv->error);
11973 priv->error = NULL;
11974
11975 #ifdef CONFIG_IPW2200_PROMISCUOUS
11976 ipw_prom_free(priv);
11977 #endif
11978
11979 free_irq(pdev->irq, priv);
11980 iounmap(priv->hw_base);
11981 pci_release_regions(pdev);
11982 pci_disable_device(pdev);
11983 pci_set_drvdata(pdev, NULL);
11984 /* wiphy_unregister needs to be here, before free_libipw */
11985 wiphy_unregister(priv->ieee->wdev.wiphy);
11986 kfree(priv->ieee->a_band.channels);
11987 kfree(priv->ieee->bg_band.channels);
11988 free_libipw(priv->net_dev, 0);
11989 free_firmware();
11990 }
11991
11992 #ifdef CONFIG_PM
ipw_pci_suspend(struct pci_dev * pdev,pm_message_t state)11993 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11994 {
11995 struct ipw_priv *priv = pci_get_drvdata(pdev);
11996 struct net_device *dev = priv->net_dev;
11997
11998 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11999
12000 /* Take down the device; powers it off, etc. */
12001 ipw_down(priv);
12002
12003 /* Remove the PRESENT state of the device */
12004 netif_device_detach(dev);
12005
12006 pci_save_state(pdev);
12007 pci_disable_device(pdev);
12008 pci_set_power_state(pdev, pci_choose_state(pdev, state));
12009
12010 priv->suspend_at = get_seconds();
12011
12012 return 0;
12013 }
12014
ipw_pci_resume(struct pci_dev * pdev)12015 static int ipw_pci_resume(struct pci_dev *pdev)
12016 {
12017 struct ipw_priv *priv = pci_get_drvdata(pdev);
12018 struct net_device *dev = priv->net_dev;
12019 int err;
12020 u32 val;
12021
12022 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
12023
12024 pci_set_power_state(pdev, PCI_D0);
12025 err = pci_enable_device(pdev);
12026 if (err) {
12027 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
12028 dev->name);
12029 return err;
12030 }
12031 pci_restore_state(pdev);
12032
12033 /*
12034 * Suspend/Resume resets the PCI configuration space, so we have to
12035 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12036 * from interfering with C3 CPU state. pci_restore_state won't help
12037 * here since it only restores the first 64 bytes pci config header.
12038 */
12039 pci_read_config_dword(pdev, 0x40, &val);
12040 if ((val & 0x0000ff00) != 0)
12041 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12042
12043 /* Set the device back into the PRESENT state; this will also wake
12044 * the queue of needed */
12045 netif_device_attach(dev);
12046
12047 priv->suspend_time = get_seconds() - priv->suspend_at;
12048
12049 /* Bring the device back up */
12050 schedule_work(&priv->up);
12051
12052 return 0;
12053 }
12054 #endif
12055
ipw_pci_shutdown(struct pci_dev * pdev)12056 static void ipw_pci_shutdown(struct pci_dev *pdev)
12057 {
12058 struct ipw_priv *priv = pci_get_drvdata(pdev);
12059
12060 /* Take down the device; powers it off, etc. */
12061 ipw_down(priv);
12062
12063 pci_disable_device(pdev);
12064 }
12065
12066 /* driver initialization stuff */
12067 static struct pci_driver ipw_driver = {
12068 .name = DRV_NAME,
12069 .id_table = card_ids,
12070 .probe = ipw_pci_probe,
12071 .remove = __devexit_p(ipw_pci_remove),
12072 #ifdef CONFIG_PM
12073 .suspend = ipw_pci_suspend,
12074 .resume = ipw_pci_resume,
12075 #endif
12076 .shutdown = ipw_pci_shutdown,
12077 };
12078
ipw_init(void)12079 static int __init ipw_init(void)
12080 {
12081 int ret;
12082
12083 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12084 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12085
12086 ret = pci_register_driver(&ipw_driver);
12087 if (ret) {
12088 IPW_ERROR("Unable to initialize PCI module\n");
12089 return ret;
12090 }
12091
12092 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12093 if (ret) {
12094 IPW_ERROR("Unable to create driver sysfs file\n");
12095 pci_unregister_driver(&ipw_driver);
12096 return ret;
12097 }
12098
12099 return ret;
12100 }
12101
ipw_exit(void)12102 static void __exit ipw_exit(void)
12103 {
12104 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12105 pci_unregister_driver(&ipw_driver);
12106 }
12107
12108 module_param(disable, int, 0444);
12109 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12110
12111 module_param(associate, int, 0444);
12112 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12113
12114 module_param(auto_create, int, 0444);
12115 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12116
12117 module_param_named(led, led_support, int, 0444);
12118 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12119
12120 module_param(debug, int, 0444);
12121 MODULE_PARM_DESC(debug, "debug output mask");
12122
12123 module_param_named(channel, default_channel, int, 0444);
12124 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12125
12126 #ifdef CONFIG_IPW2200_PROMISCUOUS
12127 module_param(rtap_iface, int, 0444);
12128 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12129 #endif
12130
12131 #ifdef CONFIG_IPW2200_QOS
12132 module_param(qos_enable, int, 0444);
12133 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12134
12135 module_param(qos_burst_enable, int, 0444);
12136 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12137
12138 module_param(qos_no_ack_mask, int, 0444);
12139 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12140
12141 module_param(burst_duration_CCK, int, 0444);
12142 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12143
12144 module_param(burst_duration_OFDM, int, 0444);
12145 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12146 #endif /* CONFIG_IPW2200_QOS */
12147
12148 #ifdef CONFIG_IPW2200_MONITOR
12149 module_param_named(mode, network_mode, int, 0444);
12150 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12151 #else
12152 module_param_named(mode, network_mode, int, 0444);
12153 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12154 #endif
12155
12156 module_param(bt_coexist, int, 0444);
12157 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12158
12159 module_param(hwcrypto, int, 0444);
12160 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12161
12162 module_param(cmdlog, int, 0444);
12163 MODULE_PARM_DESC(cmdlog,
12164 "allocate a ring buffer for logging firmware commands");
12165
12166 module_param(roaming, int, 0444);
12167 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12168
12169 module_param(antenna, int, 0444);
12170 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12171
12172 module_exit(ipw_exit);
12173 module_init(ipw_init);
12174