1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2014 Intel Mobile Communications GmbH
4 * Copyright (C) 2017 Intel Deutschland GmbH
5 * Copyright (C) 2018-2020, 2022 Intel Corporation
6 */
7 #include <linux/etherdevice.h>
8 #include "mvm.h"
9 #include "time-event.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12
13 #define TU_TO_US(x) (x * 1024)
14 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
15
iwl_mvm_teardown_tdls_peers(struct iwl_mvm * mvm)16 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
17 {
18 struct ieee80211_sta *sta;
19 struct iwl_mvm_sta *mvmsta;
20 int i;
21
22 lockdep_assert_held(&mvm->mutex);
23
24 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
25 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
26 lockdep_is_held(&mvm->mutex));
27 if (!sta || IS_ERR(sta) || !sta->tdls)
28 continue;
29
30 mvmsta = iwl_mvm_sta_from_mac80211(sta);
31 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
32 NL80211_TDLS_TEARDOWN,
33 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
34 GFP_KERNEL);
35 }
36 }
37
iwl_mvm_tdls_sta_count(struct iwl_mvm * mvm,struct ieee80211_vif * vif)38 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
39 {
40 struct ieee80211_sta *sta;
41 struct iwl_mvm_sta *mvmsta;
42 int count = 0;
43 int i;
44
45 lockdep_assert_held(&mvm->mutex);
46
47 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
48 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
49 lockdep_is_held(&mvm->mutex));
50 if (!sta || IS_ERR(sta) || !sta->tdls)
51 continue;
52
53 if (vif) {
54 mvmsta = iwl_mvm_sta_from_mac80211(sta);
55 if (mvmsta->vif != vif)
56 continue;
57 }
58
59 count++;
60 }
61
62 return count;
63 }
64
iwl_mvm_tdls_config(struct iwl_mvm * mvm,struct ieee80211_vif * vif)65 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
66 {
67 struct iwl_rx_packet *pkt;
68 struct iwl_tdls_config_res *resp;
69 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
70 struct iwl_host_cmd cmd = {
71 .id = TDLS_CONFIG_CMD,
72 .flags = CMD_WANT_SKB,
73 .data = { &tdls_cfg_cmd, },
74 .len = { sizeof(struct iwl_tdls_config_cmd), },
75 };
76 struct ieee80211_sta *sta;
77 int ret, i, cnt;
78 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
79
80 lockdep_assert_held(&mvm->mutex);
81
82 tdls_cfg_cmd.id_and_color =
83 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
84 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
85 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
86
87 /* for now the Tx cmd is empty and unused */
88
89 /* populate TDLS peer data */
90 cnt = 0;
91 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
92 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
93 lockdep_is_held(&mvm->mutex));
94 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
95 continue;
96
97 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
98 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
99 IWL_MVM_TDLS_FW_TID;
100 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
101 tdls_cfg_cmd.sta_info[cnt].is_initiator =
102 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
103
104 cnt++;
105 }
106
107 tdls_cfg_cmd.tdls_peer_count = cnt;
108 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
109
110 ret = iwl_mvm_send_cmd(mvm, &cmd);
111 if (WARN_ON_ONCE(ret))
112 return;
113
114 pkt = cmd.resp_pkt;
115
116 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
117
118 /* we don't really care about the response at this point */
119
120 iwl_free_resp(&cmd);
121 }
122
iwl_mvm_recalc_tdls_state(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool sta_added)123 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
124 bool sta_added)
125 {
126 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
127
128 /* when the first peer joins, send a power update first */
129 if (tdls_sta_cnt == 1 && sta_added)
130 iwl_mvm_power_update_mac(mvm);
131
132 /* Configure the FW with TDLS peer info only if TDLS channel switch
133 * capability is set.
134 * TDLS config data is used currently only in TDLS channel switch code.
135 * Supposed to serve also TDLS buffer station which is not implemneted
136 * yet in FW*/
137 if (fw_has_capa(&mvm->fw->ucode_capa,
138 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
139 iwl_mvm_tdls_config(mvm, vif);
140
141 /* when the last peer leaves, send a power update last */
142 if (tdls_sta_cnt == 0 && !sta_added)
143 iwl_mvm_power_update_mac(mvm);
144 }
145
iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw * hw,struct ieee80211_vif * vif)146 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
147 struct ieee80211_vif *vif)
148 {
149 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
150 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
151
152 /* Protect the session to hear the TDLS setup response on the channel */
153 mutex_lock(&mvm->mutex);
154 if (fw_has_capa(&mvm->fw->ucode_capa,
155 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
156 iwl_mvm_schedule_session_protection(mvm, vif, duration,
157 duration, true);
158 else
159 iwl_mvm_protect_session(mvm, vif, duration,
160 duration, 100, true);
161 mutex_unlock(&mvm->mutex);
162 }
163
164 static const char *
iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)165 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
166 {
167 switch (state) {
168 case IWL_MVM_TDLS_SW_IDLE:
169 return "IDLE";
170 case IWL_MVM_TDLS_SW_REQ_SENT:
171 return "REQ SENT";
172 case IWL_MVM_TDLS_SW_RESP_RCVD:
173 return "RESP RECEIVED";
174 case IWL_MVM_TDLS_SW_REQ_RCVD:
175 return "REQ RECEIVED";
176 case IWL_MVM_TDLS_SW_ACTIVE:
177 return "ACTIVE";
178 }
179
180 return NULL;
181 }
182
iwl_mvm_tdls_update_cs_state(struct iwl_mvm * mvm,enum iwl_mvm_tdls_cs_state state)183 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
184 enum iwl_mvm_tdls_cs_state state)
185 {
186 if (mvm->tdls_cs.state == state)
187 return;
188
189 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
190 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
191 iwl_mvm_tdls_cs_state_str(state));
192 mvm->tdls_cs.state = state;
193
194 /* we only send requests to our switching peer - update sent time */
195 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
196 mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
197
198 if (state == IWL_MVM_TDLS_SW_IDLE)
199 mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
200 }
201
iwl_mvm_rx_tdls_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)202 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
203 {
204 struct iwl_rx_packet *pkt = rxb_addr(rxb);
205 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
206 struct ieee80211_sta *sta;
207 unsigned int delay;
208 struct iwl_mvm_sta *mvmsta;
209 struct ieee80211_vif *vif;
210 u32 sta_id = le32_to_cpu(notif->sta_id);
211
212 lockdep_assert_held(&mvm->mutex);
213
214 /* can fail sometimes */
215 if (!le32_to_cpu(notif->status)) {
216 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
217 return;
218 }
219
220 if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
221 return;
222
223 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
224 lockdep_is_held(&mvm->mutex));
225 /* the station may not be here, but if it is, it must be a TDLS peer */
226 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
227 return;
228
229 mvmsta = iwl_mvm_sta_from_mac80211(sta);
230 vif = mvmsta->vif;
231
232 /*
233 * Update state and possibly switch again after this is over (DTIM).
234 * Also convert TU to msec.
235 */
236 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
237 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
238 msecs_to_jiffies(delay));
239
240 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
241 }
242
243 static int
iwl_mvm_tdls_check_action(struct iwl_mvm * mvm,enum iwl_tdls_channel_switch_type type,const u8 * peer,bool peer_initiator,u32 timestamp)244 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
245 enum iwl_tdls_channel_switch_type type,
246 const u8 *peer, bool peer_initiator, u32 timestamp)
247 {
248 bool same_peer = false;
249 int ret = 0;
250
251 /* get the existing peer if it's there */
252 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
253 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
254 struct ieee80211_sta *sta = rcu_dereference_protected(
255 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
256 lockdep_is_held(&mvm->mutex));
257 if (!IS_ERR_OR_NULL(sta))
258 same_peer = ether_addr_equal(peer, sta->addr);
259 }
260
261 switch (mvm->tdls_cs.state) {
262 case IWL_MVM_TDLS_SW_IDLE:
263 /*
264 * might be spurious packet from the peer after the switch is
265 * already done
266 */
267 if (type == TDLS_MOVE_CH)
268 ret = -EINVAL;
269 break;
270 case IWL_MVM_TDLS_SW_REQ_SENT:
271 /* only allow requests from the same peer */
272 if (!same_peer)
273 ret = -EBUSY;
274 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
275 !peer_initiator)
276 /*
277 * We received a ch-switch request while an outgoing
278 * one is pending. Allow it if the peer is the link
279 * initiator.
280 */
281 ret = -EBUSY;
282 else if (type == TDLS_SEND_CHAN_SW_REQ)
283 /* wait for idle before sending another request */
284 ret = -EBUSY;
285 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
286 /* we got a stale response - ignore it */
287 ret = -EINVAL;
288 break;
289 case IWL_MVM_TDLS_SW_RESP_RCVD:
290 /*
291 * we are waiting for the FW to give an "active" notification,
292 * so ignore requests in the meantime
293 */
294 ret = -EBUSY;
295 break;
296 case IWL_MVM_TDLS_SW_REQ_RCVD:
297 /* as above, allow the link initiator to proceed */
298 if (type == TDLS_SEND_CHAN_SW_REQ) {
299 if (!same_peer)
300 ret = -EBUSY;
301 else if (peer_initiator) /* they are the initiator */
302 ret = -EBUSY;
303 } else if (type == TDLS_MOVE_CH) {
304 ret = -EINVAL;
305 }
306 break;
307 case IWL_MVM_TDLS_SW_ACTIVE:
308 /*
309 * the only valid request when active is a request to return
310 * to the base channel by the current off-channel peer
311 */
312 if (type != TDLS_MOVE_CH || !same_peer)
313 ret = -EBUSY;
314 break;
315 }
316
317 if (ret)
318 IWL_DEBUG_TDLS(mvm,
319 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
320 type, mvm->tdls_cs.state, peer, same_peer,
321 peer_initiator);
322
323 return ret;
324 }
325
326 static int
iwl_mvm_tdls_config_channel_switch(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_tdls_channel_switch_type type,const u8 * peer,bool peer_initiator,u8 oper_class,struct cfg80211_chan_def * chandef,u32 timestamp,u16 switch_time,u16 switch_timeout,struct sk_buff * skb,u32 ch_sw_tm_ie)327 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
328 struct ieee80211_vif *vif,
329 enum iwl_tdls_channel_switch_type type,
330 const u8 *peer, bool peer_initiator,
331 u8 oper_class,
332 struct cfg80211_chan_def *chandef,
333 u32 timestamp, u16 switch_time,
334 u16 switch_timeout, struct sk_buff *skb,
335 u32 ch_sw_tm_ie)
336 {
337 struct ieee80211_sta *sta;
338 struct iwl_mvm_sta *mvmsta;
339 struct ieee80211_tx_info *info;
340 struct ieee80211_hdr *hdr;
341 struct iwl_tdls_channel_switch_cmd cmd = {0};
342 struct iwl_tdls_channel_switch_cmd_tail *tail =
343 iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
344 u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
345 int ret;
346
347 lockdep_assert_held(&mvm->mutex);
348
349 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
350 timestamp);
351 if (ret)
352 return ret;
353
354 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
355 ret = -EINVAL;
356 goto out;
357 }
358
359 cmd.switch_type = type;
360 tail->timing.frame_timestamp = cpu_to_le32(timestamp);
361 tail->timing.switch_time = cpu_to_le32(switch_time);
362 tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
363
364 rcu_read_lock();
365 sta = ieee80211_find_sta(vif, peer);
366 if (!sta) {
367 rcu_read_unlock();
368 ret = -ENOENT;
369 goto out;
370 }
371 mvmsta = iwl_mvm_sta_from_mac80211(sta);
372 cmd.peer_sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
373
374 if (!chandef) {
375 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
376 mvm->tdls_cs.peer.chandef.chan) {
377 /* actually moving to the channel */
378 chandef = &mvm->tdls_cs.peer.chandef;
379 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
380 type == TDLS_MOVE_CH) {
381 /* we need to return to base channel */
382 struct ieee80211_chanctx_conf *chanctx =
383 rcu_dereference(vif->bss_conf.chanctx_conf);
384
385 if (WARN_ON_ONCE(!chanctx)) {
386 rcu_read_unlock();
387 goto out;
388 }
389
390 chandef = &chanctx->def;
391 }
392 }
393
394 if (chandef)
395 iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
396
397 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
398 tail->timing.max_offchan_duration =
399 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
400 vif->bss_conf.beacon_int) / 2);
401
402 /* Switch time is the first element in the switch-timing IE. */
403 tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
404
405 info = IEEE80211_SKB_CB(skb);
406 hdr = (void *)skb->data;
407 if (info->control.hw_key) {
408 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
409 rcu_read_unlock();
410 ret = -EINVAL;
411 goto out;
412 }
413 iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
414 }
415
416 iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
417 mvmsta->deflink.sta_id);
418
419 iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
420 hdr->frame_control);
421 rcu_read_unlock();
422
423 memcpy(tail->frame.data, skb->data, skb->len);
424
425 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
426 if (ret) {
427 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
428 ret);
429 goto out;
430 }
431
432 /* channel switch has started, update state */
433 if (type != TDLS_MOVE_CH) {
434 mvm->tdls_cs.cur_sta_id = mvmsta->deflink.sta_id;
435 iwl_mvm_tdls_update_cs_state(mvm,
436 type == TDLS_SEND_CHAN_SW_REQ ?
437 IWL_MVM_TDLS_SW_REQ_SENT :
438 IWL_MVM_TDLS_SW_REQ_RCVD);
439 } else {
440 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
441 }
442
443 out:
444
445 /* channel switch failed - we are idle */
446 if (ret)
447 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
448
449 return ret;
450 }
451
iwl_mvm_tdls_ch_switch_work(struct work_struct * work)452 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
453 {
454 struct iwl_mvm *mvm;
455 struct ieee80211_sta *sta;
456 struct iwl_mvm_sta *mvmsta;
457 struct ieee80211_vif *vif;
458 unsigned int delay;
459 int ret;
460
461 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
462 mutex_lock(&mvm->mutex);
463
464 /* called after an active channel switch has finished or timed-out */
465 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
466
467 /* station might be gone, in that case do nothing */
468 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
469 goto out;
470
471 sta = rcu_dereference_protected(
472 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
473 lockdep_is_held(&mvm->mutex));
474 /* the station may not be here, but if it is, it must be a TDLS peer */
475 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
476 goto out;
477
478 mvmsta = iwl_mvm_sta_from_mac80211(sta);
479 vif = mvmsta->vif;
480 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
481 TDLS_SEND_CHAN_SW_REQ,
482 sta->addr,
483 mvm->tdls_cs.peer.initiator,
484 mvm->tdls_cs.peer.op_class,
485 &mvm->tdls_cs.peer.chandef,
486 0, 0, 0,
487 mvm->tdls_cs.peer.skb,
488 mvm->tdls_cs.peer.ch_sw_tm_ie);
489 if (ret)
490 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
491
492 /* retry after a DTIM if we failed sending now */
493 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
494 schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
495 out:
496 mutex_unlock(&mvm->mutex);
497 }
498
499 int
iwl_mvm_tdls_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u8 oper_class,struct cfg80211_chan_def * chandef,struct sk_buff * tmpl_skb,u32 ch_sw_tm_ie)500 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
501 struct ieee80211_vif *vif,
502 struct ieee80211_sta *sta, u8 oper_class,
503 struct cfg80211_chan_def *chandef,
504 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
505 {
506 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
507 struct iwl_mvm_sta *mvmsta;
508 unsigned int delay;
509 int ret;
510
511 mutex_lock(&mvm->mutex);
512
513 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
514 sta->addr, chandef->chan->center_freq, chandef->width);
515
516 /* we only support a single peer for channel switching */
517 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
518 IWL_DEBUG_TDLS(mvm,
519 "Existing peer. Can't start switch with %pM\n",
520 sta->addr);
521 ret = -EBUSY;
522 goto out;
523 }
524
525 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
526 TDLS_SEND_CHAN_SW_REQ,
527 sta->addr, sta->tdls_initiator,
528 oper_class, chandef, 0, 0, 0,
529 tmpl_skb, ch_sw_tm_ie);
530 if (ret)
531 goto out;
532
533 /*
534 * Mark the peer as "in tdls switch" for this vif. We only allow a
535 * single such peer per vif.
536 */
537 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
538 if (!mvm->tdls_cs.peer.skb) {
539 ret = -ENOMEM;
540 goto out;
541 }
542
543 mvmsta = iwl_mvm_sta_from_mac80211(sta);
544 mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
545 mvm->tdls_cs.peer.chandef = *chandef;
546 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
547 mvm->tdls_cs.peer.op_class = oper_class;
548 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
549
550 /*
551 * Wait for 2 DTIM periods before attempting the next switch. The next
552 * switch will be made sooner if the current one completes before that.
553 */
554 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
555 vif->bss_conf.beacon_int);
556 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
557 msecs_to_jiffies(delay));
558
559 out:
560 mutex_unlock(&mvm->mutex);
561 return ret;
562 }
563
iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)564 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
565 struct ieee80211_vif *vif,
566 struct ieee80211_sta *sta)
567 {
568 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
569 struct ieee80211_sta *cur_sta;
570 bool wait_for_phy = false;
571
572 mutex_lock(&mvm->mutex);
573
574 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
575
576 /* we only support a single peer for channel switching */
577 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
578 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
579 goto out;
580 }
581
582 cur_sta = rcu_dereference_protected(
583 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
584 lockdep_is_held(&mvm->mutex));
585 /* make sure it's the same peer */
586 if (cur_sta != sta)
587 goto out;
588
589 /*
590 * If we're currently in a switch because of the now canceled peer,
591 * wait a DTIM here to make sure the phy is back on the base channel.
592 * We can't otherwise force it.
593 */
594 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
595 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
596 wait_for_phy = true;
597
598 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
599 dev_kfree_skb(mvm->tdls_cs.peer.skb);
600 mvm->tdls_cs.peer.skb = NULL;
601
602 out:
603 mutex_unlock(&mvm->mutex);
604
605 /* make sure the phy is on the base channel */
606 if (wait_for_phy)
607 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
608 vif->bss_conf.beacon_int));
609
610 /* flush the channel switch state */
611 flush_delayed_work(&mvm->tdls_cs.dwork);
612
613 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
614 }
615
616 void
iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_tdls_ch_sw_params * params)617 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
618 struct ieee80211_vif *vif,
619 struct ieee80211_tdls_ch_sw_params *params)
620 {
621 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
622 enum iwl_tdls_channel_switch_type type;
623 unsigned int delay;
624 const char *action_str =
625 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
626 "REQ" : "RESP";
627
628 mutex_lock(&mvm->mutex);
629
630 IWL_DEBUG_TDLS(mvm,
631 "Received TDLS ch switch action %s from %pM status %d\n",
632 action_str, params->sta->addr, params->status);
633
634 /*
635 * we got a non-zero status from a peer we were switching to - move to
636 * the idle state and retry again later
637 */
638 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
639 params->status != 0 &&
640 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
641 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
642 struct ieee80211_sta *cur_sta;
643
644 /* make sure it's the same peer */
645 cur_sta = rcu_dereference_protected(
646 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
647 lockdep_is_held(&mvm->mutex));
648 if (cur_sta == params->sta) {
649 iwl_mvm_tdls_update_cs_state(mvm,
650 IWL_MVM_TDLS_SW_IDLE);
651 goto retry;
652 }
653 }
654
655 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
656 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
657
658 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
659 params->sta->tdls_initiator, 0,
660 params->chandef, params->timestamp,
661 params->switch_time,
662 params->switch_timeout,
663 params->tmpl_skb,
664 params->ch_sw_tm_ie);
665
666 retry:
667 /* register a timeout in case we don't succeed in switching */
668 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
669 1024 / 1000;
670 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
671 msecs_to_jiffies(delay));
672 mutex_unlock(&mvm->mutex);
673 }
674