1 /*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "core.h"
19 #include "hif-ops.h"
20 #include "cfg80211.h"
21 #include "target.h"
22 #include "debug.h"
23
ath6kl_find_sta(struct ath6kl_vif * vif,u8 * node_addr)24 struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
25 {
26 struct ath6kl *ar = vif->ar;
27 struct ath6kl_sta *conn = NULL;
28 u8 i, max_conn;
29
30 max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
31
32 for (i = 0; i < max_conn; i++) {
33 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
34 conn = &ar->sta_list[i];
35 break;
36 }
37 }
38
39 return conn;
40 }
41
ath6kl_find_sta_by_aid(struct ath6kl * ar,u8 aid)42 struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
43 {
44 struct ath6kl_sta *conn = NULL;
45 u8 ctr;
46
47 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
48 if (ar->sta_list[ctr].aid == aid) {
49 conn = &ar->sta_list[ctr];
50 break;
51 }
52 }
53 return conn;
54 }
55
ath6kl_add_new_sta(struct ath6kl_vif * vif,u8 * mac,u16 aid,u8 * wpaie,size_t ielen,u8 keymgmt,u8 ucipher,u8 auth,u8 apsd_info)56 static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid,
57 u8 *wpaie, size_t ielen, u8 keymgmt,
58 u8 ucipher, u8 auth, u8 apsd_info)
59 {
60 struct ath6kl *ar = vif->ar;
61 struct ath6kl_sta *sta;
62 u8 free_slot;
63
64 free_slot = aid - 1;
65
66 sta = &ar->sta_list[free_slot];
67 memcpy(sta->mac, mac, ETH_ALEN);
68 if (ielen <= ATH6KL_MAX_IE)
69 memcpy(sta->wpa_ie, wpaie, ielen);
70 sta->aid = aid;
71 sta->keymgmt = keymgmt;
72 sta->ucipher = ucipher;
73 sta->auth = auth;
74 sta->apsd_info = apsd_info;
75
76 ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
77 ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
78 aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn);
79 }
80
ath6kl_sta_cleanup(struct ath6kl * ar,u8 i)81 static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
82 {
83 struct ath6kl_sta *sta = &ar->sta_list[i];
84 struct ath6kl_mgmt_buff *entry, *tmp;
85
86 /* empty the queued pkts in the PS queue if any */
87 spin_lock_bh(&sta->psq_lock);
88 skb_queue_purge(&sta->psq);
89 skb_queue_purge(&sta->apsdq);
90
91 if (sta->mgmt_psq_len != 0) {
92 list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) {
93 kfree(entry);
94 }
95 INIT_LIST_HEAD(&sta->mgmt_psq);
96 sta->mgmt_psq_len = 0;
97 }
98
99 spin_unlock_bh(&sta->psq_lock);
100
101 memset(&ar->ap_stats.sta[sta->aid - 1], 0,
102 sizeof(struct wmi_per_sta_stat));
103 memset(sta->mac, 0, ETH_ALEN);
104 memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
105 sta->aid = 0;
106 sta->sta_flags = 0;
107
108 ar->sta_list_index = ar->sta_list_index & ~(1 << i);
109 aggr_reset_state(sta->aggr_conn);
110 }
111
ath6kl_remove_sta(struct ath6kl * ar,u8 * mac,u16 reason)112 static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
113 {
114 u8 i, removed = 0;
115
116 if (is_zero_ether_addr(mac))
117 return removed;
118
119 if (is_broadcast_ether_addr(mac)) {
120 ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
121
122 for (i = 0; i < AP_MAX_NUM_STA; i++) {
123 if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
124 ath6kl_sta_cleanup(ar, i);
125 removed = 1;
126 }
127 }
128 } else {
129 for (i = 0; i < AP_MAX_NUM_STA; i++) {
130 if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
131 ath6kl_dbg(ATH6KL_DBG_TRC,
132 "deleting station %pM aid=%d reason=%d\n",
133 mac, ar->sta_list[i].aid, reason);
134 ath6kl_sta_cleanup(ar, i);
135 removed = 1;
136 break;
137 }
138 }
139 }
140
141 return removed;
142 }
143
ath6kl_ac2_endpoint_id(void * devt,u8 ac)144 enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
145 {
146 struct ath6kl *ar = devt;
147 return ar->ac2ep_map[ac];
148 }
149
ath6kl_alloc_cookie(struct ath6kl * ar)150 struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
151 {
152 struct ath6kl_cookie *cookie;
153
154 cookie = ar->cookie_list;
155 if (cookie != NULL) {
156 ar->cookie_list = cookie->arc_list_next;
157 ar->cookie_count--;
158 }
159
160 return cookie;
161 }
162
ath6kl_cookie_init(struct ath6kl * ar)163 void ath6kl_cookie_init(struct ath6kl *ar)
164 {
165 u32 i;
166
167 ar->cookie_list = NULL;
168 ar->cookie_count = 0;
169
170 memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
171
172 for (i = 0; i < MAX_COOKIE_NUM; i++)
173 ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
174 }
175
ath6kl_cookie_cleanup(struct ath6kl * ar)176 void ath6kl_cookie_cleanup(struct ath6kl *ar)
177 {
178 ar->cookie_list = NULL;
179 ar->cookie_count = 0;
180 }
181
ath6kl_free_cookie(struct ath6kl * ar,struct ath6kl_cookie * cookie)182 void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
183 {
184 /* Insert first */
185
186 if (!ar || !cookie)
187 return;
188
189 cookie->arc_list_next = ar->cookie_list;
190 ar->cookie_list = cookie;
191 ar->cookie_count++;
192 }
193
194 /*
195 * Read from the hardware through its diagnostic window. No cooperation
196 * from the firmware is required for this.
197 */
ath6kl_diag_read32(struct ath6kl * ar,u32 address,u32 * value)198 int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
199 {
200 int ret;
201
202 ret = ath6kl_hif_diag_read32(ar, address, value);
203 if (ret) {
204 ath6kl_warn("failed to read32 through diagnose window: %d\n",
205 ret);
206 return ret;
207 }
208
209 return 0;
210 }
211
212 /*
213 * Write to the ATH6KL through its diagnostic window. No cooperation from
214 * the Target is required for this.
215 */
ath6kl_diag_write32(struct ath6kl * ar,u32 address,__le32 value)216 int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
217 {
218 int ret;
219
220 ret = ath6kl_hif_diag_write32(ar, address, value);
221
222 if (ret) {
223 ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
224 address, value);
225 return ret;
226 }
227
228 return 0;
229 }
230
ath6kl_diag_read(struct ath6kl * ar,u32 address,void * data,u32 length)231 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
232 {
233 u32 count, *buf = data;
234 int ret;
235
236 if (WARN_ON(length % 4))
237 return -EINVAL;
238
239 for (count = 0; count < length / 4; count++, address += 4) {
240 ret = ath6kl_diag_read32(ar, address, &buf[count]);
241 if (ret)
242 return ret;
243 }
244
245 return 0;
246 }
247
ath6kl_diag_write(struct ath6kl * ar,u32 address,void * data,u32 length)248 int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length)
249 {
250 u32 count;
251 __le32 *buf = data;
252 int ret;
253
254 if (WARN_ON(length % 4))
255 return -EINVAL;
256
257 for (count = 0; count < length / 4; count++, address += 4) {
258 ret = ath6kl_diag_write32(ar, address, buf[count]);
259 if (ret)
260 return ret;
261 }
262
263 return 0;
264 }
265
ath6kl_read_fwlogs(struct ath6kl * ar)266 int ath6kl_read_fwlogs(struct ath6kl *ar)
267 {
268 struct ath6kl_dbglog_hdr debug_hdr;
269 struct ath6kl_dbglog_buf debug_buf;
270 u32 address, length, dropped, firstbuf, debug_hdr_addr;
271 int ret, loop;
272 u8 *buf;
273
274 buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL);
275 if (!buf)
276 return -ENOMEM;
277
278 address = TARG_VTOP(ar->target_type,
279 ath6kl_get_hi_item_addr(ar,
280 HI_ITEM(hi_dbglog_hdr)));
281
282 ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr);
283 if (ret)
284 goto out;
285
286 /* Get the contents of the ring buffer */
287 if (debug_hdr_addr == 0) {
288 ath6kl_warn("Invalid address for debug_hdr_addr\n");
289 ret = -EINVAL;
290 goto out;
291 }
292
293 address = TARG_VTOP(ar->target_type, debug_hdr_addr);
294 ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
295
296 address = TARG_VTOP(ar->target_type,
297 le32_to_cpu(debug_hdr.dbuf_addr));
298 firstbuf = address;
299 dropped = le32_to_cpu(debug_hdr.dropped);
300 ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
301
302 loop = 100;
303
304 do {
305 address = TARG_VTOP(ar->target_type,
306 le32_to_cpu(debug_buf.buffer_addr));
307 length = le32_to_cpu(debug_buf.length);
308
309 if (length != 0 && (le32_to_cpu(debug_buf.length) <=
310 le32_to_cpu(debug_buf.bufsize))) {
311 length = ALIGN(length, 4);
312
313 ret = ath6kl_diag_read(ar, address,
314 buf, length);
315 if (ret)
316 goto out;
317
318 ath6kl_debug_fwlog_event(ar, buf, length);
319 }
320
321 address = TARG_VTOP(ar->target_type,
322 le32_to_cpu(debug_buf.next));
323 ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
324 if (ret)
325 goto out;
326
327 loop--;
328
329 if (WARN_ON(loop == 0)) {
330 ret = -ETIMEDOUT;
331 goto out;
332 }
333 } while (address != firstbuf);
334
335 out:
336 kfree(buf);
337
338 return ret;
339 }
340
341 /* FIXME: move to a better place, target.h? */
342 #define AR6003_RESET_CONTROL_ADDRESS 0x00004000
343 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000
344
ath6kl_reset_device(struct ath6kl * ar,u32 target_type,bool wait_fot_compltn,bool cold_reset)345 void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
346 bool wait_fot_compltn, bool cold_reset)
347 {
348 int status = 0;
349 u32 address;
350 __le32 data;
351
352 if (target_type != TARGET_TYPE_AR6003 &&
353 target_type != TARGET_TYPE_AR6004)
354 return;
355
356 data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
357 cpu_to_le32(RESET_CONTROL_MBOX_RST);
358
359 switch (target_type) {
360 case TARGET_TYPE_AR6003:
361 address = AR6003_RESET_CONTROL_ADDRESS;
362 break;
363 case TARGET_TYPE_AR6004:
364 address = AR6004_RESET_CONTROL_ADDRESS;
365 break;
366 }
367
368 status = ath6kl_diag_write32(ar, address, data);
369
370 if (status)
371 ath6kl_err("failed to reset target\n");
372 }
373
ath6kl_install_static_wep_keys(struct ath6kl_vif * vif)374 static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
375 {
376 u8 index;
377 u8 keyusage;
378
379 for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) {
380 if (vif->wep_key_list[index].key_len) {
381 keyusage = GROUP_USAGE;
382 if (index == vif->def_txkey_index)
383 keyusage |= TX_USAGE;
384
385 ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
386 index,
387 WEP_CRYPT,
388 keyusage,
389 vif->wep_key_list[index].key_len,
390 NULL, 0,
391 vif->wep_key_list[index].key,
392 KEY_OP_INIT_VAL, NULL,
393 NO_SYNC_WMIFLAG);
394 }
395 }
396 }
397
ath6kl_connect_ap_mode_bss(struct ath6kl_vif * vif,u16 channel)398 void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
399 {
400 struct ath6kl *ar = vif->ar;
401 struct ath6kl_req_key *ik;
402 int res;
403 u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
404
405 ik = &ar->ap_mode_bkey;
406
407 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
408
409 switch (vif->auth_mode) {
410 case NONE_AUTH:
411 if (vif->prwise_crypto == WEP_CRYPT)
412 ath6kl_install_static_wep_keys(vif);
413 if (!ik->valid || ik->key_type != WAPI_CRYPT)
414 break;
415 /* for WAPI, we need to set the delayed group key, continue: */
416 case WPA_PSK_AUTH:
417 case WPA2_PSK_AUTH:
418 case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
419 if (!ik->valid)
420 break;
421
422 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed addkey for "
423 "the initial group key for AP mode\n");
424 memset(key_rsc, 0, sizeof(key_rsc));
425 res = ath6kl_wmi_addkey_cmd(
426 ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
427 GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
428 ik->key,
429 KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
430 if (res) {
431 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
432 "addkey failed: %d\n", res);
433 }
434 break;
435 }
436
437 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
438 set_bit(CONNECTED, &vif->flags);
439 netif_carrier_on(vif->ndev);
440 }
441
ath6kl_connect_ap_mode_sta(struct ath6kl_vif * vif,u16 aid,u8 * mac_addr,u8 keymgmt,u8 ucipher,u8 auth,u8 assoc_req_len,u8 * assoc_info,u8 apsd_info)442 void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
443 u8 keymgmt, u8 ucipher, u8 auth,
444 u8 assoc_req_len, u8 *assoc_info, u8 apsd_info)
445 {
446 u8 *ies = NULL, *wpa_ie = NULL, *pos;
447 size_t ies_len = 0;
448 struct station_info sinfo;
449
450 ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
451
452 if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
453 struct ieee80211_mgmt *mgmt =
454 (struct ieee80211_mgmt *) assoc_info;
455 if (ieee80211_is_assoc_req(mgmt->frame_control) &&
456 assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) +
457 sizeof(mgmt->u.assoc_req)) {
458 ies = mgmt->u.assoc_req.variable;
459 ies_len = assoc_info + assoc_req_len - ies;
460 } else if (ieee80211_is_reassoc_req(mgmt->frame_control) &&
461 assoc_req_len >= sizeof(struct ieee80211_hdr_3addr)
462 + sizeof(mgmt->u.reassoc_req)) {
463 ies = mgmt->u.reassoc_req.variable;
464 ies_len = assoc_info + assoc_req_len - ies;
465 }
466 }
467
468 pos = ies;
469 while (pos && pos + 1 < ies + ies_len) {
470 if (pos + 2 + pos[1] > ies + ies_len)
471 break;
472 if (pos[0] == WLAN_EID_RSN)
473 wpa_ie = pos; /* RSN IE */
474 else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
475 pos[1] >= 4 &&
476 pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) {
477 if (pos[5] == 0x01)
478 wpa_ie = pos; /* WPA IE */
479 else if (pos[5] == 0x04) {
480 wpa_ie = pos; /* WPS IE */
481 break; /* overrides WPA/RSN IE */
482 }
483 } else if (pos[0] == 0x44 && wpa_ie == NULL) {
484 /*
485 * Note: WAPI Parameter Set IE re-uses Element ID that
486 * was officially allocated for BSS AC Access Delay. As
487 * such, we need to be a bit more careful on when
488 * parsing the frame. However, BSS AC Access Delay
489 * element is not supposed to be included in
490 * (Re)Association Request frames, so this should not
491 * cause problems.
492 */
493 wpa_ie = pos; /* WAPI IE */
494 break;
495 }
496 pos += 2 + pos[1];
497 }
498
499 ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie,
500 wpa_ie ? 2 + wpa_ie[1] : 0,
501 keymgmt, ucipher, auth, apsd_info);
502
503 /* send event to application */
504 memset(&sinfo, 0, sizeof(sinfo));
505
506 /* TODO: sinfo.generation */
507
508 sinfo.assoc_req_ies = ies;
509 sinfo.assoc_req_ies_len = ies_len;
510 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
511
512 cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
513
514 netif_wake_queue(vif->ndev);
515 }
516
disconnect_timer_handler(unsigned long ptr)517 void disconnect_timer_handler(unsigned long ptr)
518 {
519 struct net_device *dev = (struct net_device *)ptr;
520 struct ath6kl_vif *vif = netdev_priv(dev);
521
522 ath6kl_init_profile_info(vif);
523 ath6kl_disconnect(vif);
524 }
525
ath6kl_disconnect(struct ath6kl_vif * vif)526 void ath6kl_disconnect(struct ath6kl_vif *vif)
527 {
528 if (test_bit(CONNECTED, &vif->flags) ||
529 test_bit(CONNECT_PEND, &vif->flags)) {
530 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
531 /*
532 * Disconnect command is issued, clear the connect pending
533 * flag. The connected flag will be cleared in
534 * disconnect event notification.
535 */
536 clear_bit(CONNECT_PEND, &vif->flags);
537 }
538 }
539
540 /* WMI Event handlers */
541
ath6kl_ready_event(void * devt,u8 * datap,u32 sw_ver,u32 abi_ver)542 void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
543 {
544 struct ath6kl *ar = devt;
545
546 memcpy(ar->mac_addr, datap, ETH_ALEN);
547 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
548 __func__, ar->mac_addr);
549
550 ar->version.wlan_ver = sw_ver;
551 ar->version.abi_ver = abi_ver;
552
553 snprintf(ar->wiphy->fw_version,
554 sizeof(ar->wiphy->fw_version),
555 "%u.%u.%u.%u",
556 (ar->version.wlan_ver & 0xf0000000) >> 28,
557 (ar->version.wlan_ver & 0x0f000000) >> 24,
558 (ar->version.wlan_ver & 0x00ff0000) >> 16,
559 (ar->version.wlan_ver & 0x0000ffff));
560
561 /* indicate to the waiting thread that the ready event was received */
562 set_bit(WMI_READY, &ar->flag);
563 wake_up(&ar->event_wq);
564 }
565
ath6kl_scan_complete_evt(struct ath6kl_vif * vif,int status)566 void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
567 {
568 struct ath6kl *ar = vif->ar;
569 bool aborted = false;
570
571 if (status != WMI_SCAN_STATUS_SUCCESS)
572 aborted = true;
573
574 ath6kl_cfg80211_scan_complete_event(vif, aborted);
575
576 if (!ar->usr_bss_filter) {
577 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
578 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
579 NONE_BSS_FILTER, 0);
580 }
581
582 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
583 }
584
ath6kl_connect_event(struct ath6kl_vif * vif,u16 channel,u8 * bssid,u16 listen_int,u16 beacon_int,enum network_type net_type,u8 beacon_ie_len,u8 assoc_req_len,u8 assoc_resp_len,u8 * assoc_info)585 void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
586 u16 listen_int, u16 beacon_int,
587 enum network_type net_type, u8 beacon_ie_len,
588 u8 assoc_req_len, u8 assoc_resp_len,
589 u8 *assoc_info)
590 {
591 struct ath6kl *ar = vif->ar;
592
593 ath6kl_cfg80211_connect_event(vif, channel, bssid,
594 listen_int, beacon_int,
595 net_type, beacon_ie_len,
596 assoc_req_len, assoc_resp_len,
597 assoc_info);
598
599 memcpy(vif->bssid, bssid, sizeof(vif->bssid));
600 vif->bss_ch = channel;
601
602 if ((vif->nw_type == INFRA_NETWORK))
603 ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
604 vif->listen_intvl_t, 0);
605
606 netif_wake_queue(vif->ndev);
607
608 /* Update connect & link status atomically */
609 spin_lock_bh(&vif->if_lock);
610 set_bit(CONNECTED, &vif->flags);
611 clear_bit(CONNECT_PEND, &vif->flags);
612 netif_carrier_on(vif->ndev);
613 spin_unlock_bh(&vif->if_lock);
614
615 aggr_reset_state(vif->aggr_cntxt->aggr_conn);
616 vif->reconnect_flag = 0;
617
618 if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
619 memset(ar->node_map, 0, sizeof(ar->node_map));
620 ar->node_num = 0;
621 ar->next_ep_id = ENDPOINT_2;
622 }
623
624 if (!ar->usr_bss_filter) {
625 set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
626 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
627 CURRENT_BSS_FILTER, 0);
628 }
629 }
630
ath6kl_tkip_micerr_event(struct ath6kl_vif * vif,u8 keyid,bool ismcast)631 void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
632 {
633 struct ath6kl_sta *sta;
634 struct ath6kl *ar = vif->ar;
635 u8 tsc[6];
636
637 /*
638 * For AP case, keyid will have aid of STA which sent pkt with
639 * MIC error. Use this aid to get MAC & send it to hostapd.
640 */
641 if (vif->nw_type == AP_NETWORK) {
642 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
643 if (!sta)
644 return;
645
646 ath6kl_dbg(ATH6KL_DBG_TRC,
647 "ap tkip mic error received from aid=%d\n", keyid);
648
649 memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
650 cfg80211_michael_mic_failure(vif->ndev, sta->mac,
651 NL80211_KEYTYPE_PAIRWISE, keyid,
652 tsc, GFP_KERNEL);
653 } else
654 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
655
656 }
657
ath6kl_update_target_stats(struct ath6kl_vif * vif,u8 * ptr,u32 len)658 static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
659 {
660 struct wmi_target_stats *tgt_stats =
661 (struct wmi_target_stats *) ptr;
662 struct ath6kl *ar = vif->ar;
663 struct target_stats *stats = &vif->target_stats;
664 struct tkip_ccmp_stats *ccmp_stats;
665 u8 ac;
666
667 if (len < sizeof(*tgt_stats))
668 return;
669
670 ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
671
672 stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
673 stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
674 stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
675 stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
676 stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
677 stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
678 stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
679 stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
680 stats->tx_rts_success_cnt +=
681 le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
682
683 for (ac = 0; ac < WMM_NUM_AC; ac++)
684 stats->tx_pkt_per_ac[ac] +=
685 le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
686
687 stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
688 stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
689 stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
690 stats->tx_mult_retry_cnt +=
691 le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
692 stats->tx_rts_fail_cnt +=
693 le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
694 stats->tx_ucast_rate =
695 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
696
697 stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
698 stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
699 stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
700 stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
701 stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
702 stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
703 stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
704 stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
705 stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
706 stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
707 stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
708 stats->rx_key_cache_miss +=
709 le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
710 stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
711 stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
712 stats->rx_ucast_rate =
713 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
714
715 ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
716
717 stats->tkip_local_mic_fail +=
718 le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
719 stats->tkip_cnter_measures_invoked +=
720 le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
721 stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
722
723 stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
724 stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
725
726 stats->pwr_save_fail_cnt +=
727 le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
728 stats->noise_floor_calib =
729 a_sle32_to_cpu(tgt_stats->noise_floor_calib);
730
731 stats->cs_bmiss_cnt +=
732 le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
733 stats->cs_low_rssi_cnt +=
734 le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
735 stats->cs_connect_cnt +=
736 le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
737 stats->cs_discon_cnt +=
738 le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
739
740 stats->cs_ave_beacon_rssi =
741 a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
742
743 stats->cs_last_roam_msec =
744 tgt_stats->cserv_stats.cs_last_roam_msec;
745 stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
746 stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
747
748 stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
749
750 stats->wow_pkt_dropped +=
751 le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
752 stats->wow_host_pkt_wakeups +=
753 tgt_stats->wow_stats.wow_host_pkt_wakeups;
754 stats->wow_host_evt_wakeups +=
755 tgt_stats->wow_stats.wow_host_evt_wakeups;
756 stats->wow_evt_discarded +=
757 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
758
759 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
760 clear_bit(STATS_UPDATE_PEND, &vif->flags);
761 wake_up(&ar->event_wq);
762 }
763 }
764
ath6kl_add_le32(__le32 * var,__le32 val)765 static void ath6kl_add_le32(__le32 *var, __le32 val)
766 {
767 *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
768 }
769
ath6kl_tgt_stats_event(struct ath6kl_vif * vif,u8 * ptr,u32 len)770 void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
771 {
772 struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
773 struct ath6kl *ar = vif->ar;
774 struct wmi_ap_mode_stat *ap = &ar->ap_stats;
775 struct wmi_per_sta_stat *st_ap, *st_p;
776 u8 ac;
777
778 if (vif->nw_type == AP_NETWORK) {
779 if (len < sizeof(*p))
780 return;
781
782 for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
783 st_ap = &ap->sta[ac];
784 st_p = &p->sta[ac];
785
786 ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
787 ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
788 ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
789 ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
790 ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
791 ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
792 ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
793 ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
794 }
795
796 } else {
797 ath6kl_update_target_stats(vif, ptr, len);
798 }
799 }
800
ath6kl_wakeup_event(void * dev)801 void ath6kl_wakeup_event(void *dev)
802 {
803 struct ath6kl *ar = (struct ath6kl *) dev;
804
805 wake_up(&ar->event_wq);
806 }
807
ath6kl_txpwr_rx_evt(void * devt,u8 tx_pwr)808 void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
809 {
810 struct ath6kl *ar = (struct ath6kl *) devt;
811
812 ar->tx_pwr = tx_pwr;
813 wake_up(&ar->event_wq);
814 }
815
ath6kl_pspoll_event(struct ath6kl_vif * vif,u8 aid)816 void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
817 {
818 struct ath6kl_sta *conn;
819 struct sk_buff *skb;
820 bool psq_empty = false;
821 struct ath6kl *ar = vif->ar;
822 struct ath6kl_mgmt_buff *mgmt_buf;
823
824 conn = ath6kl_find_sta_by_aid(ar, aid);
825
826 if (!conn)
827 return;
828 /*
829 * Send out a packet queued on ps queue. When the ps queue
830 * becomes empty update the PVB for this station.
831 */
832 spin_lock_bh(&conn->psq_lock);
833 psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
834 spin_unlock_bh(&conn->psq_lock);
835
836 if (psq_empty)
837 /* TODO: Send out a NULL data frame */
838 return;
839
840 spin_lock_bh(&conn->psq_lock);
841 if (conn->mgmt_psq_len > 0) {
842 mgmt_buf = list_first_entry(&conn->mgmt_psq,
843 struct ath6kl_mgmt_buff, list);
844 list_del(&mgmt_buf->list);
845 conn->mgmt_psq_len--;
846 spin_unlock_bh(&conn->psq_lock);
847
848 conn->sta_flags |= STA_PS_POLLED;
849 ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx,
850 mgmt_buf->id, mgmt_buf->freq,
851 mgmt_buf->wait, mgmt_buf->buf,
852 mgmt_buf->len, mgmt_buf->no_cck);
853 conn->sta_flags &= ~STA_PS_POLLED;
854 kfree(mgmt_buf);
855 } else {
856 skb = skb_dequeue(&conn->psq);
857 spin_unlock_bh(&conn->psq_lock);
858
859 conn->sta_flags |= STA_PS_POLLED;
860 ath6kl_data_tx(skb, vif->ndev);
861 conn->sta_flags &= ~STA_PS_POLLED;
862 }
863
864 spin_lock_bh(&conn->psq_lock);
865 psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
866 spin_unlock_bh(&conn->psq_lock);
867
868 if (psq_empty)
869 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
870 }
871
ath6kl_dtimexpiry_event(struct ath6kl_vif * vif)872 void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
873 {
874 bool mcastq_empty = false;
875 struct sk_buff *skb;
876 struct ath6kl *ar = vif->ar;
877
878 /*
879 * If there are no associated STAs, ignore the DTIM expiry event.
880 * There can be potential race conditions where the last associated
881 * STA may disconnect & before the host could clear the 'Indicate
882 * DTIM' request to the firmware, the firmware would have just
883 * indicated a DTIM expiry event. The race is between 'clear DTIM
884 * expiry cmd' going from the host to the firmware & the DTIM
885 * expiry event happening from the firmware to the host.
886 */
887 if (!ar->sta_list_index)
888 return;
889
890 spin_lock_bh(&ar->mcastpsq_lock);
891 mcastq_empty = skb_queue_empty(&ar->mcastpsq);
892 spin_unlock_bh(&ar->mcastpsq_lock);
893
894 if (mcastq_empty)
895 return;
896
897 /* set the STA flag to dtim_expired for the frame to go out */
898 set_bit(DTIM_EXPIRED, &vif->flags);
899
900 spin_lock_bh(&ar->mcastpsq_lock);
901 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
902 spin_unlock_bh(&ar->mcastpsq_lock);
903
904 ath6kl_data_tx(skb, vif->ndev);
905
906 spin_lock_bh(&ar->mcastpsq_lock);
907 }
908 spin_unlock_bh(&ar->mcastpsq_lock);
909
910 clear_bit(DTIM_EXPIRED, &vif->flags);
911
912 /* clear the LSB of the BitMapCtl field of the TIM IE */
913 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
914 }
915
ath6kl_disconnect_event(struct ath6kl_vif * vif,u8 reason,u8 * bssid,u8 assoc_resp_len,u8 * assoc_info,u16 prot_reason_status)916 void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
917 u8 assoc_resp_len, u8 *assoc_info,
918 u16 prot_reason_status)
919 {
920 struct ath6kl *ar = vif->ar;
921
922 if (vif->nw_type == AP_NETWORK) {
923 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
924 return;
925
926 /* if no more associated STAs, empty the mcast PS q */
927 if (ar->sta_list_index == 0) {
928 spin_lock_bh(&ar->mcastpsq_lock);
929 skb_queue_purge(&ar->mcastpsq);
930 spin_unlock_bh(&ar->mcastpsq_lock);
931
932 /* clear the LSB of the TIM IE's BitMapCtl field */
933 if (test_bit(WMI_READY, &ar->flag))
934 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
935 MCAST_AID, 0);
936 }
937
938 if (!is_broadcast_ether_addr(bssid)) {
939 /* send event to application */
940 cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
941 }
942
943 if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
944 memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
945 clear_bit(CONNECTED, &vif->flags);
946 }
947 return;
948 }
949
950 ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
951 assoc_resp_len, assoc_info,
952 prot_reason_status);
953
954 aggr_reset_state(vif->aggr_cntxt->aggr_conn);
955
956 del_timer(&vif->disconnect_timer);
957
958 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
959
960 /*
961 * If the event is due to disconnect cmd from the host, only they
962 * the target would stop trying to connect. Under any other
963 * condition, target would keep trying to connect.
964 */
965 if (reason == DISCONNECT_CMD) {
966 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
967 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
968 NONE_BSS_FILTER, 0);
969 } else {
970 set_bit(CONNECT_PEND, &vif->flags);
971 if (((reason == ASSOC_FAILED) &&
972 (prot_reason_status == 0x11)) ||
973 ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) &&
974 (vif->reconnect_flag == 1))) {
975 set_bit(CONNECTED, &vif->flags);
976 return;
977 }
978 }
979
980 /* update connect & link status atomically */
981 spin_lock_bh(&vif->if_lock);
982 clear_bit(CONNECTED, &vif->flags);
983 netif_carrier_off(vif->ndev);
984 spin_unlock_bh(&vif->if_lock);
985
986 if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
987 vif->reconnect_flag = 0;
988
989 if (reason != CSERV_DISCONNECT)
990 ar->user_key_ctrl = 0;
991
992 netif_stop_queue(vif->ndev);
993 memset(vif->bssid, 0, sizeof(vif->bssid));
994 vif->bss_ch = 0;
995
996 ath6kl_tx_data_cleanup(ar);
997 }
998
ath6kl_vif_first(struct ath6kl * ar)999 struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
1000 {
1001 struct ath6kl_vif *vif;
1002
1003 spin_lock_bh(&ar->list_lock);
1004 if (list_empty(&ar->vif_list)) {
1005 spin_unlock_bh(&ar->list_lock);
1006 return NULL;
1007 }
1008
1009 vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
1010
1011 spin_unlock_bh(&ar->list_lock);
1012
1013 return vif;
1014 }
1015
ath6kl_open(struct net_device * dev)1016 static int ath6kl_open(struct net_device *dev)
1017 {
1018 struct ath6kl_vif *vif = netdev_priv(dev);
1019
1020 set_bit(WLAN_ENABLED, &vif->flags);
1021
1022 if (test_bit(CONNECTED, &vif->flags)) {
1023 netif_carrier_on(dev);
1024 netif_wake_queue(dev);
1025 } else
1026 netif_carrier_off(dev);
1027
1028 return 0;
1029 }
1030
ath6kl_close(struct net_device * dev)1031 static int ath6kl_close(struct net_device *dev)
1032 {
1033 struct ath6kl_vif *vif = netdev_priv(dev);
1034
1035 netif_stop_queue(dev);
1036
1037 ath6kl_cfg80211_stop(vif);
1038
1039 clear_bit(WLAN_ENABLED, &vif->flags);
1040
1041 return 0;
1042 }
1043
ath6kl_get_stats(struct net_device * dev)1044 static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
1045 {
1046 struct ath6kl_vif *vif = netdev_priv(dev);
1047
1048 return &vif->net_stats;
1049 }
1050
ath6kl_set_features(struct net_device * dev,netdev_features_t features)1051 static int ath6kl_set_features(struct net_device *dev,
1052 netdev_features_t features)
1053 {
1054 struct ath6kl_vif *vif = netdev_priv(dev);
1055 struct ath6kl *ar = vif->ar;
1056 int err = 0;
1057
1058 if ((features & NETIF_F_RXCSUM) &&
1059 (ar->rx_meta_ver != WMI_META_VERSION_2)) {
1060 ar->rx_meta_ver = WMI_META_VERSION_2;
1061 err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
1062 vif->fw_vif_idx,
1063 ar->rx_meta_ver, 0, 0);
1064 if (err) {
1065 dev->features = features & ~NETIF_F_RXCSUM;
1066 return err;
1067 }
1068 } else if (!(features & NETIF_F_RXCSUM) &&
1069 (ar->rx_meta_ver == WMI_META_VERSION_2)) {
1070 ar->rx_meta_ver = 0;
1071 err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
1072 vif->fw_vif_idx,
1073 ar->rx_meta_ver, 0, 0);
1074 if (err) {
1075 dev->features = features | NETIF_F_RXCSUM;
1076 return err;
1077 }
1078
1079 }
1080
1081 return err;
1082 }
1083
ath6kl_set_multicast_list(struct net_device * ndev)1084 static void ath6kl_set_multicast_list(struct net_device *ndev)
1085 {
1086 struct ath6kl_vif *vif = netdev_priv(ndev);
1087 bool mc_all_on = false, mc_all_off = false;
1088 int mc_count = netdev_mc_count(ndev);
1089 struct netdev_hw_addr *ha;
1090 bool found;
1091 struct ath6kl_mc_filter *mc_filter, *tmp;
1092 struct list_head mc_filter_new;
1093 int ret;
1094
1095 if (!test_bit(WMI_READY, &vif->ar->flag) ||
1096 !test_bit(WLAN_ENABLED, &vif->flags))
1097 return;
1098
1099 mc_all_on = !!(ndev->flags & IFF_PROMISC) ||
1100 !!(ndev->flags & IFF_ALLMULTI) ||
1101 !!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST);
1102
1103 mc_all_off = !(ndev->flags & IFF_MULTICAST) || mc_count == 0;
1104
1105 if (mc_all_on || mc_all_off) {
1106 /* Enable/disable all multicast */
1107 ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n",
1108 mc_all_on ? "enabling" : "disabling");
1109 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
1110 mc_all_on);
1111 if (ret)
1112 ath6kl_warn("Failed to %s multicast receive\n",
1113 mc_all_on ? "enable" : "disable");
1114 return;
1115 }
1116
1117 list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
1118 found = false;
1119 netdev_for_each_mc_addr(ha, ndev) {
1120 if (memcmp(ha->addr, mc_filter->hw_addr,
1121 ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
1122 found = true;
1123 break;
1124 }
1125 }
1126
1127 if (!found) {
1128 /*
1129 * Delete the filter which was previously set
1130 * but not in the new request.
1131 */
1132 ath6kl_dbg(ATH6KL_DBG_TRC,
1133 "Removing %pM from multicast filter\n",
1134 mc_filter->hw_addr);
1135 ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
1136 vif->fw_vif_idx, mc_filter->hw_addr,
1137 false);
1138 if (ret) {
1139 ath6kl_warn("Failed to remove multicast filter:%pM\n",
1140 mc_filter->hw_addr);
1141 return;
1142 }
1143
1144 list_del(&mc_filter->list);
1145 kfree(mc_filter);
1146 }
1147 }
1148
1149 INIT_LIST_HEAD(&mc_filter_new);
1150
1151 netdev_for_each_mc_addr(ha, ndev) {
1152 found = false;
1153 list_for_each_entry(mc_filter, &vif->mc_filter, list) {
1154 if (memcmp(ha->addr, mc_filter->hw_addr,
1155 ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
1156 found = true;
1157 break;
1158 }
1159 }
1160
1161 if (!found) {
1162 mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter),
1163 GFP_ATOMIC);
1164 if (!mc_filter) {
1165 WARN_ON(1);
1166 goto out;
1167 }
1168
1169 memcpy(mc_filter->hw_addr, ha->addr,
1170 ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
1171 /* Set the multicast filter */
1172 ath6kl_dbg(ATH6KL_DBG_TRC,
1173 "Adding %pM to multicast filter list\n",
1174 mc_filter->hw_addr);
1175 ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
1176 vif->fw_vif_idx, mc_filter->hw_addr,
1177 true);
1178 if (ret) {
1179 ath6kl_warn("Failed to add multicast filter :%pM\n",
1180 mc_filter->hw_addr);
1181 kfree(mc_filter);
1182 goto out;
1183 }
1184
1185 list_add_tail(&mc_filter->list, &mc_filter_new);
1186 }
1187 }
1188
1189 out:
1190 list_splice_tail(&mc_filter_new, &vif->mc_filter);
1191 }
1192
1193 static const struct net_device_ops ath6kl_netdev_ops = {
1194 .ndo_open = ath6kl_open,
1195 .ndo_stop = ath6kl_close,
1196 .ndo_start_xmit = ath6kl_data_tx,
1197 .ndo_get_stats = ath6kl_get_stats,
1198 .ndo_set_features = ath6kl_set_features,
1199 .ndo_set_rx_mode = ath6kl_set_multicast_list,
1200 };
1201
init_netdev(struct net_device * dev)1202 void init_netdev(struct net_device *dev)
1203 {
1204 dev->netdev_ops = &ath6kl_netdev_ops;
1205 dev->destructor = free_netdev;
1206 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1207
1208 dev->needed_headroom = ETH_HLEN;
1209 dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
1210 sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
1211 + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
1212
1213 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1214
1215 return;
1216 }
1217