Lines Matching refs:mvm

19 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,  in iwl_mvm_bar_check_trigger()  argument
25 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); in iwl_mvm_bar_check_trigger()
34 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_bar_check_trigger()
42 static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_csum_pre_bz() argument
56 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || in iwl_mvm_tx_csum_pre_bz()
125 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && in iwl_mvm_tx_csum_pre_bz()
143 u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) in iwl_mvm_tx_csum_bz() argument
182 static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_csum() argument
186 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) in iwl_mvm_tx_csum()
187 return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); in iwl_mvm_tx_csum()
188 return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); in iwl_mvm_tx_csum()
194 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_set_tx_cmd() argument
233 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, in iwl_mvm_set_tx_cmd()
253 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << in iwl_mvm_set_tx_cmd()
274 if (ieee80211_is_data(fc) && len > mvm->rts_threshold && in iwl_mvm_set_tx_cmd()
278 if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_set_tx_cmd()
290 cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); in iwl_mvm_set_tx_cmd()
293 static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, in iwl_mvm_get_tx_ant() argument
298 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) in iwl_mvm_get_tx_ant()
299 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
307 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
310 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, in iwl_mvm_get_tx_rate() argument
320 if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { in iwl_mvm_get_tx_rate()
336 &mvm->nvm_data->bands[info->band], sta); in iwl_mvm_get_tx_rate()
349 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); in iwl_mvm_get_tx_rate()
353 if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { in iwl_mvm_get_tx_rate()
365 static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, in iwl_mvm_get_tx_rate_n_flags() argument
369 return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | in iwl_mvm_get_tx_rate_n_flags()
370 iwl_mvm_get_tx_ant(mvm, info, sta, fc); in iwl_mvm_get_tx_rate_n_flags()
376 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, in iwl_mvm_set_tx_cmd_rate() argument
414 cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); in iwl_mvm_set_tx_cmd_rate()
437 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, in iwl_mvm_set_tx_cmd_crypto() argument
495 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_set_tx_params() argument
503 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); in iwl_mvm_set_tx_params()
510 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_set_tx_params()
535 iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, in iwl_mvm_set_tx_params()
539 if (mvm->trans->trans_cfg->device_family >= in iwl_mvm_set_tx_params()
542 u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, in iwl_mvm_set_tx_params()
557 u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, in iwl_mvm_set_tx_params()
578 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); in iwl_mvm_set_tx_params()
580 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); in iwl_mvm_set_tx_params()
582 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); in iwl_mvm_set_tx_params()
602 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, in iwl_mvm_get_ctrl_vif_queue() argument
626 return mvm->probe_queue; in iwl_mvm_get_ctrl_vif_queue()
634 return mvm->probe_queue; in iwl_mvm_get_ctrl_vif_queue()
637 return mvm->p2p_dev_queue; in iwl_mvm_get_ctrl_vif_queue()
640 return mvm->p2p_dev_queue; in iwl_mvm_get_ctrl_vif_queue()
647 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, in iwl_mvm_probe_resp_set_noa() argument
679 IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); in iwl_mvm_probe_resp_set_noa()
685 IWL_ERR(mvm, in iwl_mvm_probe_resp_set_noa()
708 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) in iwl_mvm_tx_skb_non_sta() argument
743 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); in iwl_mvm_tx_skb_non_sta()
745 queue = mvm->snif_queue; in iwl_mvm_tx_skb_non_sta()
746 sta_id = mvm->snif_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
757 sta_id = mvm->aux_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
758 queue = mvm->aux_queue; in iwl_mvm_tx_skb_non_sta()
763 IWL_ERR(mvm, "No queue was found. Dropping TX\n"); in iwl_mvm_tx_skb_non_sta()
768 iwl_mvm_probe_resp_set_noa(mvm, skb); in iwl_mvm_tx_skb_non_sta()
770 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); in iwl_mvm_tx_skb_non_sta()
772 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); in iwl_mvm_tx_skb_non_sta()
779 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { in iwl_mvm_tx_skb_non_sta()
780 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_skb_non_sta()
787 unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, in iwl_mvm_max_amsdu_size() argument
794 int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); in iwl_mvm_max_amsdu_size()
797 if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_max_amsdu_size()
800 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); in iwl_mvm_max_amsdu_size()
809 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); in iwl_mvm_max_amsdu_size()
880 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_tso() argument
930 iwl_mvm_max_amsdu_size(mvm, sta, tid)); in iwl_mvm_tx_tso()
965 mvm->trans->max_skb_frags) in iwl_mvm_tx_tso()
985 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_tso() argument
998 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) in iwl_mvm_txq_should_update() argument
1000 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; in iwl_mvm_txq_should_update()
1004 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_txq_should_update()
1008 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + in iwl_mvm_txq_should_update()
1016 static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, in iwl_mvm_tx_airtime() argument
1026 mdata = &mvm->tcm.data[mac]; in iwl_mvm_tx_airtime()
1028 if (mvm->tcm.paused) in iwl_mvm_tx_airtime()
1031 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) in iwl_mvm_tx_airtime()
1032 schedule_delayed_work(&mvm->tcm.work, 0); in iwl_mvm_tx_airtime()
1037 static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, in iwl_mvm_tx_pkt_queued() argument
1047 mdata = &mvm->tcm.data[mac]; in iwl_mvm_tx_pkt_queued()
1059 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_mpdu() argument
1090 iwl_mvm_probe_resp_set_noa(mvm, skb); in iwl_mvm_tx_mpdu()
1092 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, in iwl_mvm_tx_mpdu()
1125 if (!iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_tx_mpdu()
1142 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_mpdu()
1147 if (!iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_tx_mpdu()
1149 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; in iwl_mvm_tx_mpdu()
1161 if (unlikely(mvm->queue_info[txq_id].status == in iwl_mvm_tx_mpdu()
1163 iwl_mvm_txq_should_update(mvm, txq_id))) in iwl_mvm_tx_mpdu()
1164 schedule_work(&mvm->add_stream_wk); in iwl_mvm_tx_mpdu()
1167 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", in iwl_mvm_tx_mpdu()
1175 iwl_mvm_mei_tx_copy_to_csme(mvm, skb, in iwl_mvm_tx_mpdu()
1179 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) in iwl_mvm_tx_mpdu()
1187 if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, in iwl_mvm_tx_mpdu()
1194 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_mpdu()
1197 IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); in iwl_mvm_tx_mpdu()
1201 int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, in iwl_mvm_tx_skb_sta() argument
1219 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); in iwl_mvm_tx_skb_sta()
1225 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); in iwl_mvm_tx_skb_sta()
1229 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); in iwl_mvm_tx_skb_sta()
1239 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); in iwl_mvm_tx_skb_sta()
1249 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, in iwl_mvm_check_ratid_empty() argument
1261 iwl_mvm_tid_queued(mvm, tid_data) == 0) { in iwl_mvm_check_ratid_empty()
1275 if (mvm->trans->trans_cfg->gen2) in iwl_mvm_check_ratid_empty()
1283 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_check_ratid_empty()
1291 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_check_ratid_empty()
1439 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, in iwl_mvm_tx_status_check_trigger() argument
1453 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_tx_status_check_trigger()
1458 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, in iwl_mvm_tx_status_check_trigger()
1473 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_tx_status_check_trigger()
1493 static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, in iwl_mvm_get_scd_ssn() argument
1496 return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + in iwl_mvm_get_scd_ssn()
1500 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_single() argument
1511 iwl_mvm_get_agg_status(mvm, tx_resp); in iwl_mvm_rx_tx_cmd_single()
1513 u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); in iwl_mvm_rx_tx_cmd_single()
1522 if (iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_rx_tx_cmd_single()
1528 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); in iwl_mvm_rx_tx_cmd_single()
1538 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_rx_tx_cmd_single()
1556 IWL_ERR_LIMIT(mvm, in iwl_mvm_rx_tx_cmd_single()
1567 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); in iwl_mvm_rx_tx_cmd_single()
1577 iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); in iwl_mvm_rx_tx_cmd_single()
1581 iwl_mvm_hwrate_to_tx_status(mvm->fw, in iwl_mvm_rx_tx_cmd_single()
1627 ieee80211_tx_status(mvm->hw, skb); in iwl_mvm_rx_tx_cmd_single()
1646 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_single()
1650 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_single()
1658 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_single()
1669 iwl_mvm_tx_airtime(mvm, mvmsta, in iwl_mvm_rx_tx_cmd_single()
1674 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); in iwl_mvm_rx_tx_cmd_single()
1685 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_single()
1689 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_single()
1693 iwl_mvm_check_ratid_empty(mvm, sta, tid); in iwl_mvm_rx_tx_cmd_single()
1698 !iwl_mvm_tid_queued(mvm, tid_data)) { in iwl_mvm_rx_tx_cmd_single()
1718 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, in iwl_mvm_rx_tx_cmd_single()
1758 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_agg_dbg() argument
1763 iwl_mvm_get_agg_status(mvm, tx_resp); in iwl_mvm_rx_tx_cmd_agg_dbg()
1772 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_tx_cmd_agg_dbg()
1782 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_rx_tx_cmd_agg_dbg()
1787 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_agg_dbg() argument
1792 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, in iwl_mvm_rx_tx_cmd_agg() argument
1807 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); in iwl_mvm_rx_tx_cmd_agg()
1811 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); in iwl_mvm_rx_tx_cmd_agg()
1813 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_agg()
1826 iwl_mvm_tx_airtime(mvm, mvmsta, in iwl_mvm_rx_tx_cmd_agg()
1833 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) in iwl_mvm_rx_tx_cmd() argument
1839 iwl_mvm_rx_tx_cmd_single(mvm, pkt); in iwl_mvm_rx_tx_cmd()
1841 iwl_mvm_rx_tx_cmd_agg(mvm, pkt); in iwl_mvm_rx_tx_cmd()
1844 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, in iwl_mvm_tx_reclaim() argument
1856 if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || in iwl_mvm_tx_reclaim()
1863 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_tx_reclaim()
1878 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); in iwl_mvm_tx_reclaim()
1883 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_tx_reclaim()
1909 IWL_ERR(mvm, in iwl_mvm_tx_reclaim()
1920 iwl_mvm_check_ratid_empty(mvm, sta, tid); in iwl_mvm_tx_reclaim()
1947 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); in iwl_mvm_tx_reclaim()
1968 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); in iwl_mvm_tx_reclaim()
1970 if (!iwl_mvm_has_tlc_offload(mvm)) { in iwl_mvm_tx_reclaim()
1971 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_tx_reclaim()
1973 iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); in iwl_mvm_tx_reclaim()
1982 ieee80211_tx_status(mvm->hw, skb); in iwl_mvm_tx_reclaim()
1986 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) in iwl_mvm_rx_ba_notif() argument
1998 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_rx_ba_notif()
2022 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); in iwl_mvm_rx_ba_notif()
2044 iwl_mvm_tx_reclaim(mvm, sta_id, tid, in iwl_mvm_rx_ba_notif()
2052 iwl_mvm_tx_airtime(mvm, mvmsta, in iwl_mvm_rx_ba_notif()
2056 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_ba_notif()
2074 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); in iwl_mvm_rx_ba_notif()
2090 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, in iwl_mvm_rx_ba_notif()
2093 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_ba_notif()
2097 IWL_DEBUG_TX_REPLY(mvm, in iwl_mvm_rx_ba_notif()
2103 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", in iwl_mvm_rx_ba_notif()
2115 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk) in iwl_mvm_flush_tx_path() argument
2123 WARN_ON(iwl_mvm_has_new_tx_api(mvm)); in iwl_mvm_flush_tx_path()
2124 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0, in iwl_mvm_flush_tx_path()
2127 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); in iwl_mvm_flush_tx_path()
2131 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids) in iwl_mvm_flush_sta_tids() argument
2146 WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); in iwl_mvm_flush_sta_tids()
2148 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) in iwl_mvm_flush_sta_tids()
2151 IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n", in iwl_mvm_flush_sta_tids()
2154 ret = iwl_mvm_send_cmd(mvm, &cmd); in iwl_mvm_flush_sta_tids()
2157 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); in iwl_mvm_flush_sta_tids()
2197 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_flush_sta_tids()
2201 iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after, in iwl_mvm_flush_sta_tids()
2210 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) in iwl_mvm_flush_sta() argument
2218 if (iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_flush_sta()
2219 return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff); in iwl_mvm_flush_sta()
2222 return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk); in iwl_mvm_flush_sta()
2224 return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk); in iwl_mvm_flush_sta()