1 /*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <linux/random.h>
44 #include <net/mac80211.h>
45 #include <net/cfg80211.h>
46 #include "hw.h"
47 #include "carl9170.h"
48 #include "cmd.h"
49
50 static bool modparam_nohwcrypt;
51 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53
54 int modparam_noht;
55 module_param_named(noht, modparam_noht, int, 0444);
56 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57
58 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate) | (_txpidx) << 4, \
62 }
63
64 struct ieee80211_rate __carl9170_ratetable[] = {
65 RATE(10, 0, 0, 0),
66 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(60, 0xb, 0, 0),
70 RATE(90, 0xf, 0, 0),
71 RATE(120, 0xa, 0, 0),
72 RATE(180, 0xe, 0, 0),
73 RATE(240, 0x9, 0, 0),
74 RATE(360, 0xd, 1, 0),
75 RATE(480, 0x8, 2, 0),
76 RATE(540, 0xc, 3, 0),
77 };
78 #undef RATE
79
80 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
81 #define carl9170_g_ratetable_size 12
82 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
83 #define carl9170_a_ratetable_size 8
84
85 /*
86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87 * array in phy.c so that we don't have to do frequency lookups!
88 */
89 #define CHAN(_freq, _idx) { \
90 .center_freq = (_freq), \
91 .hw_value = (_idx), \
92 .max_power = 18, /* XXX */ \
93 }
94
95 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 CHAN(2412, 0),
97 CHAN(2417, 1),
98 CHAN(2422, 2),
99 CHAN(2427, 3),
100 CHAN(2432, 4),
101 CHAN(2437, 5),
102 CHAN(2442, 6),
103 CHAN(2447, 7),
104 CHAN(2452, 8),
105 CHAN(2457, 9),
106 CHAN(2462, 10),
107 CHAN(2467, 11),
108 CHAN(2472, 12),
109 CHAN(2484, 13),
110 };
111
112 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 CHAN(4920, 14),
114 CHAN(4940, 15),
115 CHAN(4960, 16),
116 CHAN(4980, 17),
117 CHAN(5040, 18),
118 CHAN(5060, 19),
119 CHAN(5080, 20),
120 CHAN(5180, 21),
121 CHAN(5200, 22),
122 CHAN(5220, 23),
123 CHAN(5240, 24),
124 CHAN(5260, 25),
125 CHAN(5280, 26),
126 CHAN(5300, 27),
127 CHAN(5320, 28),
128 CHAN(5500, 29),
129 CHAN(5520, 30),
130 CHAN(5540, 31),
131 CHAN(5560, 32),
132 CHAN(5580, 33),
133 CHAN(5600, 34),
134 CHAN(5620, 35),
135 CHAN(5640, 36),
136 CHAN(5660, 37),
137 CHAN(5680, 38),
138 CHAN(5700, 39),
139 CHAN(5745, 40),
140 CHAN(5765, 41),
141 CHAN(5785, 42),
142 CHAN(5805, 43),
143 CHAN(5825, 44),
144 CHAN(5170, 45),
145 CHAN(5190, 46),
146 CHAN(5210, 47),
147 CHAN(5230, 48),
148 };
149 #undef CHAN
150
151 #define CARL9170_HT_CAP \
152 { \
153 .ht_supported = true, \
154 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
155 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
156 IEEE80211_HT_CAP_SGI_40 | \
157 IEEE80211_HT_CAP_DSSSCCK40 | \
158 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
160 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
161 .mcs = { \
162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
163 .rx_highest = cpu_to_le16(300), \
164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
165 }, \
166 }
167
168 static struct ieee80211_supported_band carl9170_band_2GHz = {
169 .channels = carl9170_2ghz_chantable,
170 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
171 .bitrates = carl9170_g_ratetable,
172 .n_bitrates = carl9170_g_ratetable_size,
173 .ht_cap = CARL9170_HT_CAP,
174 };
175
176 static struct ieee80211_supported_band carl9170_band_5GHz = {
177 .channels = carl9170_5ghz_chantable,
178 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
179 .bitrates = carl9170_a_ratetable,
180 .n_bitrates = carl9170_a_ratetable_size,
181 .ht_cap = CARL9170_HT_CAP,
182 };
183
carl9170_ampdu_gc(struct ar9170 * ar)184 static void carl9170_ampdu_gc(struct ar9170 *ar)
185 {
186 struct carl9170_sta_tid *tid_info;
187 LIST_HEAD(tid_gc);
188
189 rcu_read_lock();
190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 spin_lock_bh(&ar->tx_ampdu_list_lock);
192 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 tid_info->state = CARL9170_TID_STATE_KILLED;
194 list_del_rcu(&tid_info->list);
195 ar->tx_ampdu_list_len--;
196 list_add_tail(&tid_info->tmp_list, &tid_gc);
197 }
198 spin_unlock_bh(&ar->tx_ampdu_list_lock);
199
200 }
201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 rcu_read_unlock();
203
204 synchronize_rcu();
205
206 while (!list_empty(&tid_gc)) {
207 struct sk_buff *skb;
208 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 tmp_list);
210
211 while ((skb = __skb_dequeue(&tid_info->queue)))
212 carl9170_tx_status(ar, skb, false);
213
214 list_del_init(&tid_info->tmp_list);
215 kfree(tid_info);
216 }
217 }
218
carl9170_flush(struct ar9170 * ar,bool drop_queued)219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220 {
221 if (drop_queued) {
222 int i;
223
224 /*
225 * We can only drop frames which have not been uploaded
226 * to the device yet.
227 */
228
229 for (i = 0; i < ar->hw->queues; i++) {
230 struct sk_buff *skb;
231
232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 struct ieee80211_tx_info *info;
234
235 info = IEEE80211_SKB_CB(skb);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 atomic_dec(&ar->tx_ampdu_upload);
238
239 carl9170_tx_status(ar, skb, false);
240 }
241 }
242 }
243
244 /* Wait for all other outstanding frames to timeout. */
245 if (atomic_read(&ar->tx_total_queued))
246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247 }
248
carl9170_flush_ba(struct ar9170 * ar)249 static void carl9170_flush_ba(struct ar9170 *ar)
250 {
251 struct sk_buff_head free;
252 struct carl9170_sta_tid *tid_info;
253 struct sk_buff *skb;
254
255 __skb_queue_head_init(&free);
256
257 rcu_read_lock();
258 spin_lock_bh(&ar->tx_ampdu_list_lock);
259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 tid_info->state = CARL9170_TID_STATE_SUSPEND;
262
263 spin_lock(&tid_info->lock);
264 while ((skb = __skb_dequeue(&tid_info->queue)))
265 __skb_queue_tail(&free, skb);
266 spin_unlock(&tid_info->lock);
267 }
268 }
269 spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 rcu_read_unlock();
271
272 while ((skb = __skb_dequeue(&free)))
273 carl9170_tx_status(ar, skb, false);
274 }
275
carl9170_zap_queues(struct ar9170 * ar)276 static void carl9170_zap_queues(struct ar9170 *ar)
277 {
278 struct carl9170_vif_info *cvif;
279 unsigned int i;
280
281 carl9170_ampdu_gc(ar);
282
283 carl9170_flush_ba(ar);
284 carl9170_flush(ar, true);
285
286 for (i = 0; i < ar->hw->queues; i++) {
287 spin_lock_bh(&ar->tx_status[i].lock);
288 while (!skb_queue_empty(&ar->tx_status[i])) {
289 struct sk_buff *skb;
290
291 skb = skb_peek(&ar->tx_status[i]);
292 carl9170_tx_get_skb(skb);
293 spin_unlock_bh(&ar->tx_status[i].lock);
294 carl9170_tx_drop(ar, skb);
295 spin_lock_bh(&ar->tx_status[i].lock);
296 carl9170_tx_put_skb(skb);
297 }
298 spin_unlock_bh(&ar->tx_status[i].lock);
299 }
300
301 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304
305 /* reinitialize queues statistics */
306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 for (i = 0; i < ar->hw->queues; i++)
308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309
310 bitmap_zero(ar->mem_bitmap, ar->fw.mem_blocks);
311
312 rcu_read_lock();
313 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
314 spin_lock_bh(&ar->beacon_lock);
315 dev_kfree_skb_any(cvif->beacon);
316 cvif->beacon = NULL;
317 spin_unlock_bh(&ar->beacon_lock);
318 }
319 rcu_read_unlock();
320
321 atomic_set(&ar->tx_ampdu_upload, 0);
322 atomic_set(&ar->tx_ampdu_scheduler, 0);
323 atomic_set(&ar->tx_total_pending, 0);
324 atomic_set(&ar->tx_total_queued, 0);
325 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
326 }
327
328 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
329 do { \
330 queue.aifs = ai_fs; \
331 queue.cw_min = cwmin; \
332 queue.cw_max = cwmax; \
333 queue.txop = _txop; \
334 } while (0)
335
carl9170_op_start(struct ieee80211_hw * hw)336 static int carl9170_op_start(struct ieee80211_hw *hw)
337 {
338 struct ar9170 *ar = hw->priv;
339 int err, i;
340
341 mutex_lock(&ar->mutex);
342
343 carl9170_zap_queues(ar);
344
345 /* reset QoS defaults */
346 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
351
352 ar->current_factor = ar->current_density = -1;
353 /* "The first key is unique." */
354 ar->usedkeys = 1;
355 ar->filter_state = 0;
356 ar->ps.last_action = jiffies;
357 ar->ps.last_slept = jiffies;
358 ar->erp_mode = CARL9170_ERP_AUTO;
359
360 /* Set "disable hw crypto offload" whenever the module parameter
361 * nohwcrypt is true or if the firmware does not support it.
362 */
363 ar->disable_offload = modparam_nohwcrypt |
364 ar->fw.disable_offload_fw;
365 ar->rx_software_decryption = ar->disable_offload;
366
367 for (i = 0; i < ar->hw->queues; i++) {
368 ar->queue_stop_timeout[i] = jiffies;
369 ar->max_queue_stop_timeout[i] = 0;
370 }
371
372 atomic_set(&ar->mem_allocs, 0);
373
374 err = carl9170_usb_open(ar);
375 if (err)
376 goto out;
377
378 err = carl9170_init_mac(ar);
379 if (err)
380 goto out;
381
382 err = carl9170_set_qos(ar);
383 if (err)
384 goto out;
385
386 if (ar->fw.rx_filter) {
387 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
388 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
389 if (err)
390 goto out;
391 }
392
393 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
394 AR9170_DMA_TRIGGER_RXQ);
395 if (err)
396 goto out;
397
398 /* Clear key-cache */
399 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
400 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
401 0, NULL, 0);
402 if (err)
403 goto out;
404
405 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
406 1, NULL, 0);
407 if (err)
408 goto out;
409
410 if (i < AR9170_CAM_MAX_USER) {
411 err = carl9170_disable_key(ar, i);
412 if (err)
413 goto out;
414 }
415 }
416
417 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
418
419 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
420 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
421
422 ieee80211_wake_queues(ar->hw);
423 err = 0;
424
425 out:
426 mutex_unlock(&ar->mutex);
427 return err;
428 }
429
carl9170_cancel_worker(struct ar9170 * ar)430 static void carl9170_cancel_worker(struct ar9170 *ar)
431 {
432 cancel_delayed_work_sync(&ar->stat_work);
433 cancel_delayed_work_sync(&ar->tx_janitor);
434 #ifdef CONFIG_CARL9170_LEDS
435 cancel_delayed_work_sync(&ar->led_work);
436 #endif /* CONFIG_CARL9170_LEDS */
437 cancel_work_sync(&ar->ps_work);
438 cancel_work_sync(&ar->ping_work);
439 cancel_work_sync(&ar->ampdu_work);
440 }
441
carl9170_op_stop(struct ieee80211_hw * hw)442 static void carl9170_op_stop(struct ieee80211_hw *hw)
443 {
444 struct ar9170 *ar = hw->priv;
445
446 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
447
448 ieee80211_stop_queues(ar->hw);
449
450 mutex_lock(&ar->mutex);
451 if (IS_ACCEPTING_CMD(ar)) {
452 RCU_INIT_POINTER(ar->beacon_iter, NULL);
453
454 carl9170_led_set_state(ar, 0);
455
456 /* stop DMA */
457 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
458 carl9170_usb_stop(ar);
459 }
460
461 carl9170_zap_queues(ar);
462 mutex_unlock(&ar->mutex);
463
464 carl9170_cancel_worker(ar);
465 }
466
carl9170_restart_work(struct work_struct * work)467 static void carl9170_restart_work(struct work_struct *work)
468 {
469 struct ar9170 *ar = container_of(work, struct ar9170,
470 restart_work);
471 int err = -EIO;
472
473 ar->usedkeys = 0;
474 ar->filter_state = 0;
475 carl9170_cancel_worker(ar);
476
477 mutex_lock(&ar->mutex);
478 if (!ar->force_usb_reset) {
479 err = carl9170_usb_restart(ar);
480 if (net_ratelimit()) {
481 if (err)
482 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
483 else
484 dev_info(&ar->udev->dev, "device restarted successfully.\n");
485 }
486 }
487 carl9170_zap_queues(ar);
488 mutex_unlock(&ar->mutex);
489
490 if (!err && !ar->force_usb_reset) {
491 ar->restart_counter++;
492 atomic_set(&ar->pending_restarts, 0);
493
494 ieee80211_restart_hw(ar->hw);
495 } else {
496 /*
497 * The reset was unsuccessful and the device seems to
498 * be dead. But there's still one option: a low-level
499 * usb subsystem reset...
500 */
501
502 carl9170_usb_reset(ar);
503 }
504 }
505
carl9170_restart(struct ar9170 * ar,const enum carl9170_restart_reasons r)506 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
507 {
508 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
509
510 /*
511 * Sometimes, an error can trigger several different reset events.
512 * By ignoring these *surplus* reset events, the device won't be
513 * killed again, right after it has recovered.
514 */
515 if (atomic_inc_return(&ar->pending_restarts) > 1) {
516 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
517 return;
518 }
519
520 ieee80211_stop_queues(ar->hw);
521
522 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
523
524 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
525 !WARN_ON(r >= __CARL9170_RR_LAST))
526 ar->last_reason = r;
527
528 if (!ar->registered)
529 return;
530
531 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
532 ar->force_usb_reset = true;
533
534 ieee80211_queue_work(ar->hw, &ar->restart_work);
535
536 /*
537 * At this point, the device instance might have vanished/disabled.
538 * So, don't put any code which access the ar9170 struct
539 * without proper protection.
540 */
541 }
542
carl9170_ping_work(struct work_struct * work)543 static void carl9170_ping_work(struct work_struct *work)
544 {
545 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
546 int err;
547
548 if (!IS_STARTED(ar))
549 return;
550
551 mutex_lock(&ar->mutex);
552 err = carl9170_echo_test(ar, 0xdeadbeef);
553 if (err)
554 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
555 mutex_unlock(&ar->mutex);
556 }
557
carl9170_init_interface(struct ar9170 * ar,struct ieee80211_vif * vif)558 static int carl9170_init_interface(struct ar9170 *ar,
559 struct ieee80211_vif *vif)
560 {
561 struct ath_common *common = &ar->common;
562 int err;
563
564 if (!vif) {
565 WARN_ON_ONCE(IS_STARTED(ar));
566 return 0;
567 }
568
569 memcpy(common->macaddr, vif->addr, ETH_ALEN);
570
571 /* We have to fall back to software crypto, whenever
572 * the user choose to participates in an IBSS. HW
573 * offload for IBSS RSN is not supported by this driver.
574 *
575 * NOTE: If the previous main interface has already
576 * disabled hw crypto offload, we have to keep this
577 * previous disable_offload setting as it was.
578 * Altough ideally, we should notify mac80211 and tell
579 * it to forget about any HW crypto offload for now.
580 */
581 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
582 (vif->type != NL80211_IFTYPE_AP));
583
584 /* The driver used to have P2P GO+CLIENT support,
585 * but since this was dropped and we don't know if
586 * there are any gremlins lurking in the shadows,
587 * so best we keep HW offload disabled for P2P.
588 */
589 ar->disable_offload |= vif->p2p;
590
591 ar->rx_software_decryption = ar->disable_offload;
592
593 err = carl9170_set_operating_mode(ar);
594 return err;
595 }
596
carl9170_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)597 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
598 struct ieee80211_vif *vif)
599 {
600 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
601 struct ieee80211_vif *main_vif, *old_main = NULL;
602 struct ar9170 *ar = hw->priv;
603 int vif_id = -1, err = 0;
604
605 mutex_lock(&ar->mutex);
606 rcu_read_lock();
607 if (vif_priv->active) {
608 /*
609 * Skip the interface structure initialization,
610 * if the vif survived the _restart call.
611 */
612 vif_id = vif_priv->id;
613 vif_priv->enable_beacon = false;
614
615 spin_lock_bh(&ar->beacon_lock);
616 dev_kfree_skb_any(vif_priv->beacon);
617 vif_priv->beacon = NULL;
618 spin_unlock_bh(&ar->beacon_lock);
619
620 goto init;
621 }
622
623 /* Because the AR9170 HW's MAC doesn't provide full support for
624 * multiple, independent interfaces [of different operation modes].
625 * We have to select ONE main interface [main mode of HW], but we
626 * can have multiple slaves [AKA: entry in the ACK-table].
627 *
628 * The first (from HEAD/TOP) interface in the ar->vif_list is
629 * always the main intf. All following intfs in this list
630 * are considered to be slave intfs.
631 */
632 main_vif = carl9170_get_main_vif(ar);
633
634 if (main_vif) {
635 switch (main_vif->type) {
636 case NL80211_IFTYPE_STATION:
637 if (vif->type == NL80211_IFTYPE_STATION)
638 break;
639
640 err = -EBUSY;
641 rcu_read_unlock();
642
643 goto unlock;
644
645 case NL80211_IFTYPE_MESH_POINT:
646 case NL80211_IFTYPE_AP:
647 if ((vif->type == NL80211_IFTYPE_STATION) ||
648 (vif->type == NL80211_IFTYPE_AP) ||
649 (vif->type == NL80211_IFTYPE_MESH_POINT))
650 break;
651
652 err = -EBUSY;
653 rcu_read_unlock();
654 goto unlock;
655
656 default:
657 rcu_read_unlock();
658 goto unlock;
659 }
660 }
661
662 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
663
664 if (vif_id < 0) {
665 rcu_read_unlock();
666
667 err = -ENOSPC;
668 goto unlock;
669 }
670
671 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
672
673 vif_priv->active = true;
674 vif_priv->id = vif_id;
675 vif_priv->enable_beacon = false;
676 ar->vifs++;
677 if (old_main) {
678 /* We end up in here, if the main interface is being replaced.
679 * Put the new main interface at the HEAD of the list and the
680 * previous inteface will automatically become second in line.
681 */
682 list_add_rcu(&vif_priv->list, &ar->vif_list);
683 } else {
684 /* Add new inteface. If the list is empty, it will become the
685 * main inteface, otherwise it will be slave.
686 */
687 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
688 }
689 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
690
691 init:
692 main_vif = carl9170_get_main_vif(ar);
693
694 if (main_vif == vif) {
695 rcu_assign_pointer(ar->beacon_iter, vif_priv);
696 rcu_read_unlock();
697
698 if (old_main) {
699 struct carl9170_vif_info *old_main_priv =
700 (void *) old_main->drv_priv;
701 /* downgrade old main intf to slave intf.
702 * NOTE: We are no longer under rcu_read_lock.
703 * But we are still holding ar->mutex, so the
704 * vif data [id, addr] is safe.
705 */
706 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
707 old_main->addr);
708 if (err)
709 goto unlock;
710 }
711
712 err = carl9170_init_interface(ar, vif);
713 if (err)
714 goto unlock;
715 } else {
716 rcu_read_unlock();
717 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
718
719 if (err)
720 goto unlock;
721 }
722
723 if (ar->fw.tx_seq_table) {
724 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
725 0);
726 if (err)
727 goto unlock;
728 }
729
730 unlock:
731 if (err && (vif_id >= 0)) {
732 vif_priv->active = false;
733 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
734 ar->vifs--;
735 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
736 list_del_rcu(&vif_priv->list);
737 mutex_unlock(&ar->mutex);
738 synchronize_rcu();
739 } else {
740 if (ar->vifs > 1)
741 ar->ps.off_override |= PS_OFF_VIF;
742
743 mutex_unlock(&ar->mutex);
744 }
745
746 return err;
747 }
748
carl9170_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)749 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
750 struct ieee80211_vif *vif)
751 {
752 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
753 struct ieee80211_vif *main_vif;
754 struct ar9170 *ar = hw->priv;
755 unsigned int id;
756
757 mutex_lock(&ar->mutex);
758
759 if (WARN_ON_ONCE(!vif_priv->active))
760 goto unlock;
761
762 ar->vifs--;
763
764 rcu_read_lock();
765 main_vif = carl9170_get_main_vif(ar);
766
767 id = vif_priv->id;
768
769 vif_priv->active = false;
770 WARN_ON(vif_priv->enable_beacon);
771 vif_priv->enable_beacon = false;
772 list_del_rcu(&vif_priv->list);
773 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
774
775 if (vif == main_vif) {
776 rcu_read_unlock();
777
778 if (ar->vifs) {
779 WARN_ON(carl9170_init_interface(ar,
780 carl9170_get_main_vif(ar)));
781 } else {
782 carl9170_set_operating_mode(ar);
783 }
784 } else {
785 rcu_read_unlock();
786
787 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
788 }
789
790 carl9170_update_beacon(ar, false);
791 carl9170_flush_cab(ar, id);
792
793 spin_lock_bh(&ar->beacon_lock);
794 dev_kfree_skb_any(vif_priv->beacon);
795 vif_priv->beacon = NULL;
796 spin_unlock_bh(&ar->beacon_lock);
797
798 bitmap_release_region(&ar->vif_bitmap, id, 0);
799
800 carl9170_set_beacon_timers(ar);
801
802 if (ar->vifs == 1)
803 ar->ps.off_override &= ~PS_OFF_VIF;
804
805 unlock:
806 mutex_unlock(&ar->mutex);
807
808 synchronize_rcu();
809 }
810
carl9170_ps_check(struct ar9170 * ar)811 void carl9170_ps_check(struct ar9170 *ar)
812 {
813 ieee80211_queue_work(ar->hw, &ar->ps_work);
814 }
815
816 /* caller must hold ar->mutex */
carl9170_ps_update(struct ar9170 * ar)817 static int carl9170_ps_update(struct ar9170 *ar)
818 {
819 bool ps = false;
820 int err = 0;
821
822 if (!ar->ps.off_override)
823 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
824
825 if (ps != ar->ps.state) {
826 err = carl9170_powersave(ar, ps);
827 if (err)
828 return err;
829
830 if (ar->ps.state && !ps) {
831 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
832 ar->ps.last_action);
833 }
834
835 if (ps)
836 ar->ps.last_slept = jiffies;
837
838 ar->ps.last_action = jiffies;
839 ar->ps.state = ps;
840 }
841
842 return 0;
843 }
844
carl9170_ps_work(struct work_struct * work)845 static void carl9170_ps_work(struct work_struct *work)
846 {
847 struct ar9170 *ar = container_of(work, struct ar9170,
848 ps_work);
849 mutex_lock(&ar->mutex);
850 if (IS_STARTED(ar))
851 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
852 mutex_unlock(&ar->mutex);
853 }
854
carl9170_update_survey(struct ar9170 * ar,bool flush,bool noise)855 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
856 {
857 int err;
858
859 if (noise) {
860 err = carl9170_get_noisefloor(ar);
861 if (err)
862 return err;
863 }
864
865 if (ar->fw.hw_counters) {
866 err = carl9170_collect_tally(ar);
867 if (err)
868 return err;
869 }
870
871 if (flush)
872 memset(&ar->tally, 0, sizeof(ar->tally));
873
874 return 0;
875 }
876
carl9170_stat_work(struct work_struct * work)877 static void carl9170_stat_work(struct work_struct *work)
878 {
879 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
880 int err;
881
882 mutex_lock(&ar->mutex);
883 err = carl9170_update_survey(ar, false, true);
884 mutex_unlock(&ar->mutex);
885
886 if (err)
887 return;
888
889 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
890 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
891 }
892
carl9170_op_config(struct ieee80211_hw * hw,u32 changed)893 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
894 {
895 struct ar9170 *ar = hw->priv;
896 int err = 0;
897
898 mutex_lock(&ar->mutex);
899 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
900 /* TODO */
901 err = 0;
902 }
903
904 if (changed & IEEE80211_CONF_CHANGE_PS) {
905 err = carl9170_ps_update(ar);
906 if (err)
907 goto out;
908 }
909
910 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
911 /* TODO */
912 err = 0;
913 }
914
915 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
916 enum nl80211_channel_type channel_type =
917 cfg80211_get_chandef_type(&hw->conf.chandef);
918
919 /* adjust slot time for 5 GHz */
920 err = carl9170_set_slot_time(ar);
921 if (err)
922 goto out;
923
924 err = carl9170_update_survey(ar, true, false);
925 if (err)
926 goto out;
927
928 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
929 channel_type);
930 if (err)
931 goto out;
932
933 err = carl9170_update_survey(ar, false, true);
934 if (err)
935 goto out;
936
937 err = carl9170_set_dyn_sifs_ack(ar);
938 if (err)
939 goto out;
940
941 err = carl9170_set_rts_cts_rate(ar);
942 if (err)
943 goto out;
944 }
945
946 if (changed & IEEE80211_CONF_CHANGE_POWER) {
947 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
948 if (err)
949 goto out;
950 }
951
952 out:
953 mutex_unlock(&ar->mutex);
954 return err;
955 }
956
carl9170_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)957 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
958 struct netdev_hw_addr_list *mc_list)
959 {
960 struct netdev_hw_addr *ha;
961 u64 mchash;
962
963 /* always get broadcast frames */
964 mchash = 1ULL << (0xff >> 2);
965
966 netdev_hw_addr_list_for_each(ha, mc_list)
967 mchash |= 1ULL << (ha->addr[5] >> 2);
968
969 return mchash;
970 }
971
carl9170_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * new_flags,u64 multicast)972 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
973 unsigned int changed_flags,
974 unsigned int *new_flags,
975 u64 multicast)
976 {
977 struct ar9170 *ar = hw->priv;
978
979 /* mask supported flags */
980 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
981
982 if (!IS_ACCEPTING_CMD(ar))
983 return;
984
985 mutex_lock(&ar->mutex);
986
987 ar->filter_state = *new_flags;
988 /*
989 * We can support more by setting the sniffer bit and
990 * then checking the error flags, later.
991 */
992
993 if (*new_flags & FIF_ALLMULTI)
994 multicast = ~0ULL;
995
996 if (multicast != ar->cur_mc_hash)
997 WARN_ON(carl9170_update_multicast(ar, multicast));
998
999 if (changed_flags & FIF_OTHER_BSS) {
1000 ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1001
1002 WARN_ON(carl9170_set_operating_mode(ar));
1003 }
1004
1005 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1006 u32 rx_filter = 0;
1007
1008 if (!ar->fw.ba_filter)
1009 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1010
1011 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1012 rx_filter |= CARL9170_RX_FILTER_BAD;
1013
1014 if (!(*new_flags & FIF_CONTROL))
1015 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1016
1017 if (!(*new_flags & FIF_PSPOLL))
1018 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1019
1020 if (!(*new_flags & FIF_OTHER_BSS)) {
1021 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1022 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1023 }
1024
1025 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1026 }
1027
1028 mutex_unlock(&ar->mutex);
1029 }
1030
1031
carl9170_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)1032 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1033 struct ieee80211_vif *vif,
1034 struct ieee80211_bss_conf *bss_conf,
1035 u32 changed)
1036 {
1037 struct ar9170 *ar = hw->priv;
1038 struct ath_common *common = &ar->common;
1039 int err = 0;
1040 struct carl9170_vif_info *vif_priv;
1041 struct ieee80211_vif *main_vif;
1042
1043 mutex_lock(&ar->mutex);
1044 vif_priv = (void *) vif->drv_priv;
1045 main_vif = carl9170_get_main_vif(ar);
1046 if (WARN_ON(!main_vif))
1047 goto out;
1048
1049 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1050 struct carl9170_vif_info *iter;
1051 int i = 0;
1052
1053 vif_priv->enable_beacon = bss_conf->enable_beacon;
1054 rcu_read_lock();
1055 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1056 if (iter->active && iter->enable_beacon)
1057 i++;
1058
1059 }
1060 rcu_read_unlock();
1061
1062 ar->beacon_enabled = i;
1063 }
1064
1065 if (changed & BSS_CHANGED_BEACON) {
1066 err = carl9170_update_beacon(ar, false);
1067 if (err)
1068 goto out;
1069 }
1070
1071 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1072 BSS_CHANGED_BEACON_INT)) {
1073
1074 if (main_vif != vif) {
1075 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1076 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1077 }
1078
1079 /*
1080 * Therefore a hard limit for the broadcast traffic should
1081 * prevent false alarms.
1082 */
1083 if (vif->type != NL80211_IFTYPE_STATION &&
1084 (bss_conf->beacon_int * bss_conf->dtim_period >=
1085 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1086 err = -EINVAL;
1087 goto out;
1088 }
1089
1090 err = carl9170_set_beacon_timers(ar);
1091 if (err)
1092 goto out;
1093 }
1094
1095 if (changed & BSS_CHANGED_HT) {
1096 /* TODO */
1097 err = 0;
1098 if (err)
1099 goto out;
1100 }
1101
1102 if (main_vif != vif)
1103 goto out;
1104
1105 /*
1106 * The following settings can only be changed by the
1107 * master interface.
1108 */
1109
1110 if (changed & BSS_CHANGED_BSSID) {
1111 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1112 err = carl9170_set_operating_mode(ar);
1113 if (err)
1114 goto out;
1115 }
1116
1117 if (changed & BSS_CHANGED_ASSOC) {
1118 ar->common.curaid = bss_conf->aid;
1119 err = carl9170_set_beacon_timers(ar);
1120 if (err)
1121 goto out;
1122 }
1123
1124 if (changed & BSS_CHANGED_ERP_SLOT) {
1125 err = carl9170_set_slot_time(ar);
1126 if (err)
1127 goto out;
1128 }
1129
1130 if (changed & BSS_CHANGED_BASIC_RATES) {
1131 err = carl9170_set_mac_rates(ar);
1132 if (err)
1133 goto out;
1134 }
1135
1136 out:
1137 WARN_ON_ONCE(err && IS_STARTED(ar));
1138 mutex_unlock(&ar->mutex);
1139 }
1140
carl9170_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1141 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1142 struct ieee80211_vif *vif)
1143 {
1144 struct ar9170 *ar = hw->priv;
1145 struct carl9170_tsf_rsp tsf;
1146 int err;
1147
1148 mutex_lock(&ar->mutex);
1149 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1150 0, NULL, sizeof(tsf), &tsf);
1151 mutex_unlock(&ar->mutex);
1152 if (WARN_ON(err))
1153 return 0;
1154
1155 return le64_to_cpu(tsf.tsf_64);
1156 }
1157
carl9170_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)1158 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1159 struct ieee80211_vif *vif,
1160 struct ieee80211_sta *sta,
1161 struct ieee80211_key_conf *key)
1162 {
1163 struct ar9170 *ar = hw->priv;
1164 int err = 0, i;
1165 u8 ktype;
1166
1167 if (ar->disable_offload || !vif)
1168 return -EOPNOTSUPP;
1169
1170 /* Fall back to software encryption whenever the driver is connected
1171 * to more than one network.
1172 *
1173 * This is very unfortunate, because some machines cannot handle
1174 * the high througput speed in 802.11n networks.
1175 */
1176
1177 if (!is_main_vif(ar, vif)) {
1178 mutex_lock(&ar->mutex);
1179 goto err_softw;
1180 }
1181
1182 /*
1183 * While the hardware supports *catch-all* key, for offloading
1184 * group-key en-/de-cryption. The way of how the hardware
1185 * decides which keyId maps to which key, remains a mystery...
1186 */
1187 if ((vif->type != NL80211_IFTYPE_STATION &&
1188 vif->type != NL80211_IFTYPE_ADHOC) &&
1189 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1190 return -EOPNOTSUPP;
1191
1192 switch (key->cipher) {
1193 case WLAN_CIPHER_SUITE_WEP40:
1194 ktype = AR9170_ENC_ALG_WEP64;
1195 break;
1196 case WLAN_CIPHER_SUITE_WEP104:
1197 ktype = AR9170_ENC_ALG_WEP128;
1198 break;
1199 case WLAN_CIPHER_SUITE_TKIP:
1200 ktype = AR9170_ENC_ALG_TKIP;
1201 break;
1202 case WLAN_CIPHER_SUITE_CCMP:
1203 ktype = AR9170_ENC_ALG_AESCCMP;
1204 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1205 break;
1206 default:
1207 return -EOPNOTSUPP;
1208 }
1209
1210 mutex_lock(&ar->mutex);
1211 if (cmd == SET_KEY) {
1212 if (!IS_STARTED(ar)) {
1213 err = -EOPNOTSUPP;
1214 goto out;
1215 }
1216
1217 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1218 sta = NULL;
1219
1220 i = 64 + key->keyidx;
1221 } else {
1222 for (i = 0; i < 64; i++)
1223 if (!(ar->usedkeys & BIT(i)))
1224 break;
1225 if (i == 64)
1226 goto err_softw;
1227 }
1228
1229 key->hw_key_idx = i;
1230
1231 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1232 ktype, 0, key->key,
1233 min_t(u8, 16, key->keylen));
1234 if (err)
1235 goto out;
1236
1237 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1238 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1239 NULL, ktype, 1,
1240 key->key + 16, 16);
1241 if (err)
1242 goto out;
1243
1244 /*
1245 * hardware is not capable generating MMIC
1246 * of fragmented frames!
1247 */
1248 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1249 }
1250
1251 if (i < 64)
1252 ar->usedkeys |= BIT(i);
1253
1254 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1255 } else {
1256 if (!IS_STARTED(ar)) {
1257 /* The device is gone... together with the key ;-) */
1258 err = 0;
1259 goto out;
1260 }
1261
1262 if (key->hw_key_idx < 64) {
1263 ar->usedkeys &= ~BIT(key->hw_key_idx);
1264 } else {
1265 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1266 AR9170_ENC_ALG_NONE, 0,
1267 NULL, 0);
1268 if (err)
1269 goto out;
1270
1271 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1272 err = carl9170_upload_key(ar, key->hw_key_idx,
1273 NULL,
1274 AR9170_ENC_ALG_NONE,
1275 1, NULL, 0);
1276 if (err)
1277 goto out;
1278 }
1279
1280 }
1281
1282 err = carl9170_disable_key(ar, key->hw_key_idx);
1283 if (err)
1284 goto out;
1285 }
1286
1287 out:
1288 mutex_unlock(&ar->mutex);
1289 return err;
1290
1291 err_softw:
1292 if (!ar->rx_software_decryption) {
1293 ar->rx_software_decryption = true;
1294 carl9170_set_operating_mode(ar);
1295 }
1296 mutex_unlock(&ar->mutex);
1297 return -ENOSPC;
1298 }
1299
carl9170_op_sta_add(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1300 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1301 struct ieee80211_vif *vif,
1302 struct ieee80211_sta *sta)
1303 {
1304 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1305 unsigned int i;
1306
1307 atomic_set(&sta_info->pending_frames, 0);
1308
1309 if (sta->deflink.ht_cap.ht_supported) {
1310 if (sta->deflink.ht_cap.ampdu_density > 6) {
1311 /*
1312 * HW does support 16us AMPDU density.
1313 * No HT-Xmit for station.
1314 */
1315
1316 return 0;
1317 }
1318
1319 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1320 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1321
1322 sta_info->ampdu_max_len = 1 << (3 + sta->deflink.ht_cap.ampdu_factor);
1323 sta_info->ht_sta = true;
1324 }
1325
1326 return 0;
1327 }
1328
carl9170_op_sta_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1329 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1330 struct ieee80211_vif *vif,
1331 struct ieee80211_sta *sta)
1332 {
1333 struct ar9170 *ar = hw->priv;
1334 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1335 unsigned int i;
1336 bool cleanup = false;
1337
1338 if (sta->deflink.ht_cap.ht_supported) {
1339
1340 sta_info->ht_sta = false;
1341
1342 rcu_read_lock();
1343 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1344 struct carl9170_sta_tid *tid_info;
1345
1346 tid_info = rcu_dereference(sta_info->agg[i]);
1347 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1348
1349 if (!tid_info)
1350 continue;
1351
1352 spin_lock_bh(&ar->tx_ampdu_list_lock);
1353 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1354 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1355 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1356 cleanup = true;
1357 }
1358 rcu_read_unlock();
1359
1360 if (cleanup)
1361 carl9170_ampdu_gc(ar);
1362 }
1363
1364 return 0;
1365 }
1366
carl9170_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * param)1367 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1368 struct ieee80211_vif *vif, u16 queue,
1369 const struct ieee80211_tx_queue_params *param)
1370 {
1371 struct ar9170 *ar = hw->priv;
1372 int ret;
1373
1374 mutex_lock(&ar->mutex);
1375 memcpy(&ar->edcf[ar9170_qmap(queue)], param, sizeof(*param));
1376 ret = carl9170_set_qos(ar);
1377 mutex_unlock(&ar->mutex);
1378 return ret;
1379 }
1380
carl9170_ampdu_work(struct work_struct * work)1381 static void carl9170_ampdu_work(struct work_struct *work)
1382 {
1383 struct ar9170 *ar = container_of(work, struct ar9170,
1384 ampdu_work);
1385
1386 if (!IS_STARTED(ar))
1387 return;
1388
1389 mutex_lock(&ar->mutex);
1390 carl9170_ampdu_gc(ar);
1391 mutex_unlock(&ar->mutex);
1392 }
1393
carl9170_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)1394 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1395 struct ieee80211_vif *vif,
1396 struct ieee80211_ampdu_params *params)
1397 {
1398 struct ieee80211_sta *sta = params->sta;
1399 enum ieee80211_ampdu_mlme_action action = params->action;
1400 u16 tid = params->tid;
1401 u16 *ssn = ¶ms->ssn;
1402 struct ar9170 *ar = hw->priv;
1403 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1404 struct carl9170_sta_tid *tid_info;
1405
1406 if (modparam_noht)
1407 return -EOPNOTSUPP;
1408
1409 switch (action) {
1410 case IEEE80211_AMPDU_TX_START:
1411 if (!sta_info->ht_sta)
1412 return -EOPNOTSUPP;
1413
1414 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1415 GFP_KERNEL);
1416 if (!tid_info)
1417 return -ENOMEM;
1418
1419 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1420 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1421 tid_info->tid = tid;
1422 tid_info->max = sta_info->ampdu_max_len;
1423 tid_info->sta = sta;
1424 tid_info->vif = vif;
1425
1426 INIT_LIST_HEAD(&tid_info->list);
1427 INIT_LIST_HEAD(&tid_info->tmp_list);
1428 skb_queue_head_init(&tid_info->queue);
1429 spin_lock_init(&tid_info->lock);
1430
1431 spin_lock_bh(&ar->tx_ampdu_list_lock);
1432 ar->tx_ampdu_list_len++;
1433 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1434 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1435 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1436
1437 return IEEE80211_AMPDU_TX_START_IMMEDIATE;
1438
1439 case IEEE80211_AMPDU_TX_STOP_CONT:
1440 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1441 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1442 rcu_read_lock();
1443 tid_info = rcu_dereference(sta_info->agg[tid]);
1444 if (tid_info) {
1445 spin_lock_bh(&ar->tx_ampdu_list_lock);
1446 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1447 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1448 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1449 }
1450
1451 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1452 rcu_read_unlock();
1453
1454 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1455 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1456 break;
1457
1458 case IEEE80211_AMPDU_TX_OPERATIONAL:
1459 rcu_read_lock();
1460 tid_info = rcu_dereference(sta_info->agg[tid]);
1461
1462 sta_info->stats[tid].clear = true;
1463 sta_info->stats[tid].req = false;
1464
1465 if (tid_info) {
1466 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1467 tid_info->state = CARL9170_TID_STATE_IDLE;
1468 }
1469 rcu_read_unlock();
1470
1471 if (WARN_ON_ONCE(!tid_info))
1472 return -EFAULT;
1473
1474 break;
1475
1476 case IEEE80211_AMPDU_RX_START:
1477 case IEEE80211_AMPDU_RX_STOP:
1478 /* Handled by hardware */
1479 break;
1480
1481 default:
1482 return -EOPNOTSUPP;
1483 }
1484
1485 return 0;
1486 }
1487
1488 #ifdef CONFIG_CARL9170_WPC
carl9170_register_wps_button(struct ar9170 * ar)1489 static int carl9170_register_wps_button(struct ar9170 *ar)
1490 {
1491 struct input_dev *input;
1492 int err;
1493
1494 if (!(ar->features & CARL9170_WPS_BUTTON))
1495 return 0;
1496
1497 input = devm_input_allocate_device(&ar->udev->dev);
1498 if (!input)
1499 return -ENOMEM;
1500
1501 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1502 wiphy_name(ar->hw->wiphy));
1503
1504 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1505 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1506
1507 input->name = ar->wps.name;
1508 input->phys = ar->wps.phys;
1509 input->id.bustype = BUS_USB;
1510 input->dev.parent = &ar->hw->wiphy->dev;
1511
1512 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1513
1514 err = input_register_device(input);
1515 if (err)
1516 return err;
1517
1518 ar->wps.pbc = input;
1519 return 0;
1520 }
1521 #endif /* CONFIG_CARL9170_WPC */
1522
1523 #ifdef CONFIG_CARL9170_HWRNG
carl9170_rng_get(struct ar9170 * ar)1524 static int carl9170_rng_get(struct ar9170 *ar)
1525 {
1526
1527 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1528 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1529
1530 static const __le32 rng_load[RW] = {
1531 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1532
1533 u32 buf[RW];
1534
1535 unsigned int i, off = 0, transfer, count;
1536 int err;
1537
1538 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1539
1540 if (!IS_ACCEPTING_CMD(ar))
1541 return -EAGAIN;
1542
1543 count = ARRAY_SIZE(ar->rng.cache);
1544 while (count) {
1545 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1546 RB, (u8 *) rng_load,
1547 RB, (u8 *) buf);
1548 if (err)
1549 return err;
1550
1551 transfer = min_t(unsigned int, count, RW);
1552 for (i = 0; i < transfer; i++)
1553 ar->rng.cache[off + i] = buf[i];
1554
1555 off += transfer;
1556 count -= transfer;
1557 }
1558
1559 ar->rng.cache_idx = 0;
1560
1561 #undef RW
1562 #undef RB
1563 return 0;
1564 }
1565
carl9170_rng_read(struct hwrng * rng,u32 * data)1566 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1567 {
1568 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1569 int ret = -EIO;
1570
1571 mutex_lock(&ar->mutex);
1572 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1573 ret = carl9170_rng_get(ar);
1574 if (ret) {
1575 mutex_unlock(&ar->mutex);
1576 return ret;
1577 }
1578 }
1579
1580 *data = ar->rng.cache[ar->rng.cache_idx++];
1581 mutex_unlock(&ar->mutex);
1582
1583 return sizeof(u16);
1584 }
1585
carl9170_register_hwrng(struct ar9170 * ar)1586 static int carl9170_register_hwrng(struct ar9170 *ar)
1587 {
1588 int err;
1589
1590 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1591 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1592 ar->rng.rng.name = ar->rng.name;
1593 ar->rng.rng.data_read = carl9170_rng_read;
1594 ar->rng.rng.priv = (unsigned long)ar;
1595
1596 err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
1597 if (err) {
1598 dev_err(&ar->udev->dev, "Failed to register the random "
1599 "number generator (%d)\n", err);
1600 return err;
1601 }
1602
1603 return carl9170_rng_get(ar);
1604 }
1605 #endif /* CONFIG_CARL9170_HWRNG */
1606
carl9170_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1607 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1608 struct survey_info *survey)
1609 {
1610 struct ar9170 *ar = hw->priv;
1611 struct ieee80211_channel *chan;
1612 struct ieee80211_supported_band *band;
1613 int err, b, i;
1614
1615 chan = ar->channel;
1616 if (!chan)
1617 return -ENODEV;
1618
1619 if (idx == chan->hw_value) {
1620 mutex_lock(&ar->mutex);
1621 err = carl9170_update_survey(ar, false, true);
1622 mutex_unlock(&ar->mutex);
1623 if (err)
1624 return err;
1625 }
1626
1627 for (b = 0; b < NUM_NL80211_BANDS; b++) {
1628 band = ar->hw->wiphy->bands[b];
1629
1630 if (!band)
1631 continue;
1632
1633 for (i = 0; i < band->n_channels; i++) {
1634 if (band->channels[i].hw_value == idx) {
1635 chan = &band->channels[i];
1636 goto found;
1637 }
1638 }
1639 }
1640 return -ENOENT;
1641
1642 found:
1643 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1644
1645 survey->channel = chan;
1646 survey->filled = SURVEY_INFO_NOISE_DBM;
1647
1648 if (ar->channel == chan)
1649 survey->filled |= SURVEY_INFO_IN_USE;
1650
1651 if (ar->fw.hw_counters) {
1652 survey->filled |= SURVEY_INFO_TIME |
1653 SURVEY_INFO_TIME_BUSY |
1654 SURVEY_INFO_TIME_TX;
1655 }
1656
1657 return 0;
1658 }
1659
carl9170_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)1660 static void carl9170_op_flush(struct ieee80211_hw *hw,
1661 struct ieee80211_vif *vif,
1662 u32 queues, bool drop)
1663 {
1664 struct ar9170 *ar = hw->priv;
1665 unsigned int vid;
1666
1667 mutex_lock(&ar->mutex);
1668 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1669 carl9170_flush_cab(ar, vid);
1670
1671 carl9170_flush(ar, drop);
1672 mutex_unlock(&ar->mutex);
1673 }
1674
carl9170_op_get_stats(struct ieee80211_hw * hw,struct ieee80211_low_level_stats * stats)1675 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1676 struct ieee80211_low_level_stats *stats)
1677 {
1678 struct ar9170 *ar = hw->priv;
1679
1680 memset(stats, 0, sizeof(*stats));
1681 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1682 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1683 return 0;
1684 }
1685
carl9170_op_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)1686 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1687 struct ieee80211_vif *vif,
1688 enum sta_notify_cmd cmd,
1689 struct ieee80211_sta *sta)
1690 {
1691 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1692
1693 switch (cmd) {
1694 case STA_NOTIFY_SLEEP:
1695 sta_info->sleeping = true;
1696 if (atomic_read(&sta_info->pending_frames))
1697 ieee80211_sta_block_awake(hw, sta, true);
1698 break;
1699
1700 case STA_NOTIFY_AWAKE:
1701 sta_info->sleeping = false;
1702 break;
1703 }
1704 }
1705
carl9170_tx_frames_pending(struct ieee80211_hw * hw)1706 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1707 {
1708 struct ar9170 *ar = hw->priv;
1709
1710 return !!atomic_read(&ar->tx_total_queued);
1711 }
1712
1713 static const struct ieee80211_ops carl9170_ops = {
1714 .start = carl9170_op_start,
1715 .stop = carl9170_op_stop,
1716 .tx = carl9170_op_tx,
1717 .flush = carl9170_op_flush,
1718 .add_interface = carl9170_op_add_interface,
1719 .remove_interface = carl9170_op_remove_interface,
1720 .config = carl9170_op_config,
1721 .prepare_multicast = carl9170_op_prepare_multicast,
1722 .configure_filter = carl9170_op_configure_filter,
1723 .conf_tx = carl9170_op_conf_tx,
1724 .bss_info_changed = carl9170_op_bss_info_changed,
1725 .get_tsf = carl9170_op_get_tsf,
1726 .set_key = carl9170_op_set_key,
1727 .sta_add = carl9170_op_sta_add,
1728 .sta_remove = carl9170_op_sta_remove,
1729 .sta_notify = carl9170_op_sta_notify,
1730 .get_survey = carl9170_op_get_survey,
1731 .get_stats = carl9170_op_get_stats,
1732 .ampdu_action = carl9170_op_ampdu_action,
1733 .tx_frames_pending = carl9170_tx_frames_pending,
1734 };
1735
carl9170_alloc(size_t priv_size)1736 void *carl9170_alloc(size_t priv_size)
1737 {
1738 struct ieee80211_hw *hw;
1739 struct ar9170 *ar;
1740 struct sk_buff *skb;
1741 int i;
1742
1743 /*
1744 * this buffer is used for rx stream reconstruction.
1745 * Under heavy load this device (or the transport layer?)
1746 * tends to split the streams into separate rx descriptors.
1747 */
1748
1749 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1750 if (!skb)
1751 goto err_nomem;
1752
1753 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1754 if (!hw)
1755 goto err_nomem;
1756
1757 ar = hw->priv;
1758 ar->hw = hw;
1759 ar->rx_failover = skb;
1760
1761 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1762 ar->rx_has_plcp = false;
1763
1764 /*
1765 * Here's a hidden pitfall!
1766 *
1767 * All 4 AC queues work perfectly well under _legacy_ operation.
1768 * However as soon as aggregation is enabled, the traffic flow
1769 * gets very bumpy. Therefore we have to _switch_ to a
1770 * software AC with a single HW queue.
1771 */
1772 hw->queues = __AR9170_NUM_TXQ;
1773
1774 mutex_init(&ar->mutex);
1775 spin_lock_init(&ar->beacon_lock);
1776 spin_lock_init(&ar->cmd_lock);
1777 spin_lock_init(&ar->tx_stats_lock);
1778 spin_lock_init(&ar->tx_ampdu_list_lock);
1779 spin_lock_init(&ar->mem_lock);
1780 spin_lock_init(&ar->state_lock);
1781 atomic_set(&ar->pending_restarts, 0);
1782 ar->vifs = 0;
1783 for (i = 0; i < ar->hw->queues; i++) {
1784 skb_queue_head_init(&ar->tx_status[i]);
1785 skb_queue_head_init(&ar->tx_pending[i]);
1786
1787 INIT_LIST_HEAD(&ar->bar_list[i]);
1788 spin_lock_init(&ar->bar_list_lock[i]);
1789 }
1790 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1791 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1792 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1793 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1794 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1795 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1796 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1797 rcu_assign_pointer(ar->tx_ampdu_iter,
1798 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1799
1800 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1801 INIT_LIST_HEAD(&ar->vif_list);
1802 init_completion(&ar->tx_flush);
1803
1804 /* firmware decides which modes we support */
1805 hw->wiphy->interface_modes = 0;
1806
1807 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1808 ieee80211_hw_set(hw, MFP_CAPABLE);
1809 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1810 ieee80211_hw_set(hw, SUPPORTS_PS);
1811 ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1812 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1813 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1814 ieee80211_hw_set(hw, SIGNAL_DBM);
1815 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1816
1817 if (!modparam_noht) {
1818 /*
1819 * see the comment above, why we allow the user
1820 * to disable HT by a module parameter.
1821 */
1822 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1823 }
1824
1825 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1826 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1827 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1828
1829 hw->max_rates = CARL9170_TX_MAX_RATES;
1830 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1831
1832 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1833 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1834
1835 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1836
1837 return ar;
1838
1839 err_nomem:
1840 kfree_skb(skb);
1841 return ERR_PTR(-ENOMEM);
1842 }
1843
carl9170_read_eeprom(struct ar9170 * ar)1844 static int carl9170_read_eeprom(struct ar9170 *ar)
1845 {
1846 #define RW 8 /* number of words to read at once */
1847 #define RB (sizeof(u32) * RW)
1848 u8 *eeprom = (void *)&ar->eeprom;
1849 __le32 offsets[RW];
1850 int i, j, err;
1851
1852 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1853
1854 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1855 #ifndef __CHECKER__
1856 /* don't want to handle trailing remains */
1857 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1858 #endif
1859
1860 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1861 for (j = 0; j < RW; j++)
1862 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1863 RB * i + 4 * j);
1864
1865 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1866 RB, (u8 *) &offsets,
1867 RB, eeprom + RB * i);
1868 if (err)
1869 return err;
1870 }
1871
1872 #undef RW
1873 #undef RB
1874 return 0;
1875 }
1876
carl9170_parse_eeprom(struct ar9170 * ar)1877 static int carl9170_parse_eeprom(struct ar9170 *ar)
1878 {
1879 struct ath_regulatory *regulatory = &ar->common.regulatory;
1880 unsigned int rx_streams, tx_streams, tx_params = 0;
1881 int bands = 0;
1882 int chans = 0;
1883
1884 if (ar->eeprom.length == cpu_to_le16(0xffff))
1885 return -ENODATA;
1886
1887 rx_streams = hweight8(ar->eeprom.rx_mask);
1888 tx_streams = hweight8(ar->eeprom.tx_mask);
1889
1890 if (rx_streams != tx_streams) {
1891 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1892
1893 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1894 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1895
1896 tx_params |= (tx_streams - 1) <<
1897 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1898
1899 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1900 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1901 }
1902
1903 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1904 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1905 &carl9170_band_2GHz;
1906 chans += carl9170_band_2GHz.n_channels;
1907 bands++;
1908 }
1909 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1910 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1911 &carl9170_band_5GHz;
1912 chans += carl9170_band_5GHz.n_channels;
1913 bands++;
1914 }
1915
1916 if (!bands)
1917 return -EINVAL;
1918
1919 ar->survey = devm_kcalloc(&ar->udev->dev, chans,
1920 sizeof(struct survey_info), GFP_KERNEL);
1921 if (!ar->survey)
1922 return -ENOMEM;
1923 ar->num_channels = chans;
1924
1925 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1926
1927 /* second part of wiphy init */
1928 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1929
1930 return 0;
1931 }
1932
carl9170_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)1933 static void carl9170_reg_notifier(struct wiphy *wiphy,
1934 struct regulatory_request *request)
1935 {
1936 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1937 struct ar9170 *ar = hw->priv;
1938
1939 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1940 }
1941
carl9170_register(struct ar9170 * ar)1942 int carl9170_register(struct ar9170 *ar)
1943 {
1944 struct ath_regulatory *regulatory = &ar->common.regulatory;
1945 int err = 0, i;
1946
1947 ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
1948 if (!ar->mem_bitmap)
1949 return -ENOMEM;
1950
1951 /* try to read EEPROM, init MAC addr */
1952 err = carl9170_read_eeprom(ar);
1953 if (err)
1954 return err;
1955
1956 err = carl9170_parse_eeprom(ar);
1957 if (err)
1958 return err;
1959
1960 err = ath_regd_init(regulatory, ar->hw->wiphy,
1961 carl9170_reg_notifier);
1962 if (err)
1963 return err;
1964
1965 if (modparam_noht) {
1966 carl9170_band_2GHz.ht_cap.ht_supported = false;
1967 carl9170_band_5GHz.ht_cap.ht_supported = false;
1968 }
1969
1970 for (i = 0; i < ar->fw.vif_num; i++) {
1971 ar->vif_priv[i].id = i;
1972 ar->vif_priv[i].vif = NULL;
1973 }
1974
1975 err = ieee80211_register_hw(ar->hw);
1976 if (err)
1977 return err;
1978
1979 /* mac80211 interface is now registered */
1980 ar->registered = true;
1981
1982 if (!ath_is_world_regd(regulatory))
1983 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1984
1985 #ifdef CONFIG_CARL9170_DEBUGFS
1986 carl9170_debugfs_register(ar);
1987 #endif /* CONFIG_CARL9170_DEBUGFS */
1988
1989 err = carl9170_led_init(ar);
1990 if (err)
1991 goto err_unreg;
1992
1993 #ifdef CONFIG_CARL9170_LEDS
1994 err = carl9170_led_register(ar);
1995 if (err)
1996 goto err_unreg;
1997 #endif /* CONFIG_CARL9170_LEDS */
1998
1999 #ifdef CONFIG_CARL9170_WPC
2000 err = carl9170_register_wps_button(ar);
2001 if (err)
2002 goto err_unreg;
2003 #endif /* CONFIG_CARL9170_WPC */
2004
2005 #ifdef CONFIG_CARL9170_HWRNG
2006 err = carl9170_register_hwrng(ar);
2007 if (err)
2008 goto err_unreg;
2009 #endif /* CONFIG_CARL9170_HWRNG */
2010
2011 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2012 wiphy_name(ar->hw->wiphy));
2013
2014 return 0;
2015
2016 err_unreg:
2017 carl9170_unregister(ar);
2018 return err;
2019 }
2020
carl9170_unregister(struct ar9170 * ar)2021 void carl9170_unregister(struct ar9170 *ar)
2022 {
2023 if (!ar->registered)
2024 return;
2025
2026 ar->registered = false;
2027
2028 #ifdef CONFIG_CARL9170_LEDS
2029 carl9170_led_unregister(ar);
2030 #endif /* CONFIG_CARL9170_LEDS */
2031
2032 #ifdef CONFIG_CARL9170_DEBUGFS
2033 carl9170_debugfs_unregister(ar);
2034 #endif /* CONFIG_CARL9170_DEBUGFS */
2035
2036 carl9170_cancel_worker(ar);
2037 cancel_work_sync(&ar->restart_work);
2038
2039 ieee80211_unregister_hw(ar->hw);
2040 }
2041
carl9170_free(struct ar9170 * ar)2042 void carl9170_free(struct ar9170 *ar)
2043 {
2044 WARN_ON(ar->registered);
2045 WARN_ON(IS_INITIALIZED(ar));
2046
2047 kfree_skb(ar->rx_failover);
2048 ar->rx_failover = NULL;
2049
2050 mutex_destroy(&ar->mutex);
2051
2052 ieee80211_free_hw(ar->hw);
2053 }
2054