1 /*
2  * Copyright 2002-2005, Instant802 Networks, Inc.
3  * Copyright 2005-2006, Devicescape Software, Inc.
4  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
5  * Copyright 2008-2010	Johannes Berg <johannes@sipsolutions.net>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/export.h>
13 #include <linux/etherdevice.h>
14 #include <net/mac80211.h>
15 #include <asm/unaligned.h>
16 #include "ieee80211_i.h"
17 #include "rate.h"
18 #include "mesh.h"
19 #include "led.h"
20 #include "wme.h"
21 
22 
ieee80211_tx_status_irqsafe(struct ieee80211_hw * hw,struct sk_buff * skb)23 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
24 				 struct sk_buff *skb)
25 {
26 	struct ieee80211_local *local = hw_to_local(hw);
27 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
28 	int tmp;
29 
30 	skb->pkt_type = IEEE80211_TX_STATUS_MSG;
31 	skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
32 		       &local->skb_queue : &local->skb_queue_unreliable, skb);
33 	tmp = skb_queue_len(&local->skb_queue) +
34 		skb_queue_len(&local->skb_queue_unreliable);
35 	while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
36 	       (skb = skb_dequeue(&local->skb_queue_unreliable))) {
37 		dev_kfree_skb_irq(skb);
38 		tmp--;
39 		I802_DEBUG_INC(local->tx_status_drop);
40 	}
41 	tasklet_schedule(&local->tasklet);
42 }
43 EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
44 
ieee80211_handle_filtered_frame(struct ieee80211_local * local,struct sta_info * sta,struct sk_buff * skb)45 static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
46 					    struct sta_info *sta,
47 					    struct sk_buff *skb)
48 {
49 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
50 	struct ieee80211_hdr *hdr = (void *)skb->data;
51 	int ac;
52 
53 	/*
54 	 * This skb 'survived' a round-trip through the driver, and
55 	 * hopefully the driver didn't mangle it too badly. However,
56 	 * we can definitely not rely on the control information
57 	 * being correct. Clear it so we don't get junk there, and
58 	 * indicate that it needs new processing, but must not be
59 	 * modified/encrypted again.
60 	 */
61 	memset(&info->control, 0, sizeof(info->control));
62 
63 	info->control.jiffies = jiffies;
64 	info->control.vif = &sta->sdata->vif;
65 	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
66 		       IEEE80211_TX_INTFL_RETRANSMISSION;
67 	info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
68 
69 	sta->tx_filtered_count++;
70 
71 	/*
72 	 * Clear more-data bit on filtered frames, it might be set
73 	 * but later frames might time out so it might have to be
74 	 * clear again ... It's all rather unlikely (this frame
75 	 * should time out first, right?) but let's not confuse
76 	 * peers unnecessarily.
77 	 */
78 	if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
79 		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
80 
81 	if (ieee80211_is_data_qos(hdr->frame_control)) {
82 		u8 *p = ieee80211_get_qos_ctl(hdr);
83 		int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
84 
85 		/*
86 		 * Clear EOSP if set, this could happen e.g.
87 		 * if an absence period (us being a P2P GO)
88 		 * shortens the SP.
89 		 */
90 		if (*p & IEEE80211_QOS_CTL_EOSP)
91 			*p &= ~IEEE80211_QOS_CTL_EOSP;
92 		ac = ieee802_1d_to_ac[tid & 7];
93 	} else {
94 		ac = IEEE80211_AC_BE;
95 	}
96 
97 	/*
98 	 * Clear the TX filter mask for this STA when sending the next
99 	 * packet. If the STA went to power save mode, this will happen
100 	 * when it wakes up for the next time.
101 	 */
102 	set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
103 
104 	/*
105 	 * This code races in the following way:
106 	 *
107 	 *  (1) STA sends frame indicating it will go to sleep and does so
108 	 *  (2) hardware/firmware adds STA to filter list, passes frame up
109 	 *  (3) hardware/firmware processes TX fifo and suppresses a frame
110 	 *  (4) we get TX status before having processed the frame and
111 	 *	knowing that the STA has gone to sleep.
112 	 *
113 	 * This is actually quite unlikely even when both those events are
114 	 * processed from interrupts coming in quickly after one another or
115 	 * even at the same time because we queue both TX status events and
116 	 * RX frames to be processed by a tasklet and process them in the
117 	 * same order that they were received or TX status last. Hence, there
118 	 * is no race as long as the frame RX is processed before the next TX
119 	 * status, which drivers can ensure, see below.
120 	 *
121 	 * Note that this can only happen if the hardware or firmware can
122 	 * actually add STAs to the filter list, if this is done by the
123 	 * driver in response to set_tim() (which will only reduce the race
124 	 * this whole filtering tries to solve, not completely solve it)
125 	 * this situation cannot happen.
126 	 *
127 	 * To completely solve this race drivers need to make sure that they
128 	 *  (a) don't mix the irq-safe/not irq-safe TX status/RX processing
129 	 *	functions and
130 	 *  (b) always process RX events before TX status events if ordering
131 	 *      can be unknown, for example with different interrupt status
132 	 *	bits.
133 	 *  (c) if PS mode transitions are manual (i.e. the flag
134 	 *      %IEEE80211_HW_AP_LINK_PS is set), always process PS state
135 	 *      changes before calling TX status events if ordering can be
136 	 *	unknown.
137 	 */
138 	if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
139 	    skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
140 		skb_queue_tail(&sta->tx_filtered[ac], skb);
141 		sta_info_recalc_tim(sta);
142 
143 		if (!timer_pending(&local->sta_cleanup))
144 			mod_timer(&local->sta_cleanup,
145 				  round_jiffies(jiffies +
146 						STA_INFO_CLEANUP_INTERVAL));
147 		return;
148 	}
149 
150 	if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
151 	    !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
152 		/* Software retry the packet once */
153 		info->flags |= IEEE80211_TX_INTFL_RETRIED;
154 		ieee80211_add_pending_skb(local, skb);
155 		return;
156 	}
157 
158 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
159 	if (net_ratelimit())
160 		wiphy_debug(local->hw.wiphy,
161 			    "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
162 			    skb_queue_len(&sta->tx_filtered[ac]),
163 			    !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
164 #endif
165 	dev_kfree_skb(skb);
166 }
167 
ieee80211_check_pending_bar(struct sta_info * sta,u8 * addr,u8 tid)168 static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
169 {
170 	struct tid_ampdu_tx *tid_tx;
171 
172 	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
173 	if (!tid_tx || !tid_tx->bar_pending)
174 		return;
175 
176 	tid_tx->bar_pending = false;
177 	ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
178 }
179 
ieee80211_frame_acked(struct sta_info * sta,struct sk_buff * skb)180 static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
181 {
182 	struct ieee80211_mgmt *mgmt = (void *) skb->data;
183 	struct ieee80211_local *local = sta->local;
184 	struct ieee80211_sub_if_data *sdata = sta->sdata;
185 
186 	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
187 		sta->last_rx = jiffies;
188 
189 	if (ieee80211_is_data_qos(mgmt->frame_control)) {
190 		struct ieee80211_hdr *hdr = (void *) skb->data;
191 		u8 *qc = ieee80211_get_qos_ctl(hdr);
192 		u16 tid = qc[0] & 0xf;
193 
194 		ieee80211_check_pending_bar(sta, hdr->addr1, tid);
195 	}
196 
197 	if (ieee80211_is_action(mgmt->frame_control) &&
198 	    sdata->vif.type == NL80211_IFTYPE_STATION &&
199 	    mgmt->u.action.category == WLAN_CATEGORY_HT &&
200 	    mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
201 		/*
202 		 * This update looks racy, but isn't -- if we come
203 		 * here we've definitely got a station that we're
204 		 * talking to, and on a managed interface that can
205 		 * only be the AP. And the only other place updating
206 		 * this variable is before we're associated.
207 		 */
208 		switch (mgmt->u.action.u.ht_smps.smps_control) {
209 		case WLAN_HT_SMPS_CONTROL_DYNAMIC:
210 			sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
211 			break;
212 		case WLAN_HT_SMPS_CONTROL_STATIC:
213 			sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
214 			break;
215 		case WLAN_HT_SMPS_CONTROL_DISABLED:
216 		default: /* shouldn't happen since we don't send that */
217 			sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
218 			break;
219 		}
220 
221 		ieee80211_queue_work(&local->hw, &local->recalc_smps);
222 	}
223 }
224 
ieee80211_set_bar_pending(struct sta_info * sta,u8 tid,u16 ssn)225 static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
226 {
227 	struct tid_ampdu_tx *tid_tx;
228 
229 	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
230 	if (!tid_tx)
231 		return;
232 
233 	tid_tx->failed_bar_ssn = ssn;
234 	tid_tx->bar_pending = true;
235 }
236 
ieee80211_tx_radiotap_len(struct ieee80211_tx_info * info)237 static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
238 {
239 	int len = sizeof(struct ieee80211_radiotap_header);
240 
241 	/* IEEE80211_RADIOTAP_RATE rate */
242 	if (info->status.rates[0].idx >= 0 &&
243 	    !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
244 		len += 2;
245 
246 	/* IEEE80211_RADIOTAP_TX_FLAGS */
247 	len += 2;
248 
249 	/* IEEE80211_RADIOTAP_DATA_RETRIES */
250 	len += 1;
251 
252 	/* IEEE80211_TX_RC_MCS */
253 	if (info->status.rates[0].idx >= 0 &&
254 	    info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
255 		len += 3;
256 
257 	return len;
258 }
259 
ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band * sband,struct sk_buff * skb,int retry_count,int rtap_len)260 static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
261 					     *sband, struct sk_buff *skb,
262 					     int retry_count, int rtap_len)
263 {
264 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
265 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
266 	struct ieee80211_radiotap_header *rthdr;
267 	unsigned char *pos;
268 	u16 txflags;
269 
270 	rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
271 
272 	memset(rthdr, 0, rtap_len);
273 	rthdr->it_len = cpu_to_le16(rtap_len);
274 	rthdr->it_present =
275 		cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
276 			    (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
277 	pos = (unsigned char *)(rthdr + 1);
278 
279 	/*
280 	 * XXX: Once radiotap gets the bitmap reset thing the vendor
281 	 *	extensions proposal contains, we can actually report
282 	 *	the whole set of tries we did.
283 	 */
284 
285 	/* IEEE80211_RADIOTAP_RATE */
286 	if (info->status.rates[0].idx >= 0 &&
287 	    !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
288 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
289 		*pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
290 		/* padding for tx flags */
291 		pos += 2;
292 	}
293 
294 	/* IEEE80211_RADIOTAP_TX_FLAGS */
295 	txflags = 0;
296 	if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
297 	    !is_multicast_ether_addr(hdr->addr1))
298 		txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
299 
300 	if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
301 	    (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
302 		txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
303 	else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
304 		txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
305 
306 	put_unaligned_le16(txflags, pos);
307 	pos += 2;
308 
309 	/* IEEE80211_RADIOTAP_DATA_RETRIES */
310 	/* for now report the total retry_count */
311 	*pos = retry_count;
312 	pos++;
313 
314 	/* IEEE80211_TX_RC_MCS */
315 	if (info->status.rates[0].idx >= 0 &&
316 	    info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
317 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
318 		pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
319 			 IEEE80211_RADIOTAP_MCS_HAVE_GI |
320 			 IEEE80211_RADIOTAP_MCS_HAVE_BW;
321 		if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
322 			pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
323 		if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
324 			pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
325 		if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
326 			pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
327 		pos[2] = info->status.rates[0].idx;
328 		pos += 3;
329 	}
330 
331 }
332 
333 /*
334  * Use a static threshold for now, best value to be determined
335  * by testing ...
336  * Should it depend on:
337  *  - on # of retransmissions
338  *  - current throughput (higher value for higher tpt)?
339  */
340 #define STA_LOST_PKT_THRESHOLD	50
341 
ieee80211_tx_status(struct ieee80211_hw * hw,struct sk_buff * skb)342 void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
343 {
344 	struct sk_buff *skb2;
345 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
346 	struct ieee80211_local *local = hw_to_local(hw);
347 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
348 	__le16 fc;
349 	struct ieee80211_supported_band *sband;
350 	struct ieee80211_sub_if_data *sdata;
351 	struct net_device *prev_dev = NULL;
352 	struct sta_info *sta, *tmp;
353 	int retry_count = -1, i;
354 	int rates_idx = -1;
355 	bool send_to_cooked;
356 	bool acked;
357 	struct ieee80211_bar *bar;
358 	int rtap_len;
359 
360 	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
361 		if (info->status.rates[i].idx < 0) {
362 			break;
363 		} else if (i >= hw->max_report_rates) {
364 			/* the HW cannot have attempted that rate */
365 			info->status.rates[i].idx = -1;
366 			info->status.rates[i].count = 0;
367 			break;
368 		}
369 
370 		retry_count += info->status.rates[i].count;
371 	}
372 	rates_idx = i - 1;
373 
374 	if (retry_count < 0)
375 		retry_count = 0;
376 
377 	rcu_read_lock();
378 
379 	sband = local->hw.wiphy->bands[info->band];
380 	fc = hdr->frame_control;
381 
382 	for_each_sta_info(local, hdr->addr1, sta, tmp) {
383 		/* skip wrong virtual interface */
384 		if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr))
385 			continue;
386 
387 		if (info->flags & IEEE80211_TX_STATUS_EOSP)
388 			clear_sta_flag(sta, WLAN_STA_SP);
389 
390 		acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
391 		if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
392 			/*
393 			 * The STA is in power save mode, so assume
394 			 * that this TX packet failed because of that.
395 			 */
396 			ieee80211_handle_filtered_frame(local, sta, skb);
397 			rcu_read_unlock();
398 			return;
399 		}
400 
401 		if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
402 		    (rates_idx != -1))
403 			sta->last_tx_rate = info->status.rates[rates_idx];
404 
405 		if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
406 		    (ieee80211_is_data_qos(fc))) {
407 			u16 tid, ssn;
408 			u8 *qc;
409 
410 			qc = ieee80211_get_qos_ctl(hdr);
411 			tid = qc[0] & 0xf;
412 			ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
413 						& IEEE80211_SCTL_SEQ);
414 			ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
415 					   tid, ssn);
416 		}
417 
418 		if (!acked && ieee80211_is_back_req(fc)) {
419 			u16 tid, control;
420 
421 			/*
422 			 * BAR failed, store the last SSN and retry sending
423 			 * the BAR when the next unicast transmission on the
424 			 * same TID succeeds.
425 			 */
426 			bar = (struct ieee80211_bar *) skb->data;
427 			control = le16_to_cpu(bar->control);
428 			if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
429 				u16 ssn = le16_to_cpu(bar->start_seq_num);
430 
431 				tid = (control &
432 				       IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
433 				      IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
434 
435 				if (local->hw.flags &
436 				    IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
437 					ieee80211_stop_tx_ba_session(&sta->sta, tid);
438 				else
439 					ieee80211_set_bar_pending(sta, tid, ssn);
440 			}
441 		}
442 
443 		if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
444 			ieee80211_handle_filtered_frame(local, sta, skb);
445 			rcu_read_unlock();
446 			return;
447 		} else {
448 			if (!acked)
449 				sta->tx_retry_failed++;
450 			sta->tx_retry_count += retry_count;
451 		}
452 
453 		rate_control_tx_status(local, sband, sta, skb);
454 		if (ieee80211_vif_is_mesh(&sta->sdata->vif))
455 			ieee80211s_update_metric(local, sta, skb);
456 
457 		if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
458 			ieee80211_frame_acked(sta, skb);
459 
460 		if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
461 		    (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
462 			ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, acked);
463 
464 		if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
465 			if (info->flags & IEEE80211_TX_STAT_ACK) {
466 				if (sta->lost_packets)
467 					sta->lost_packets = 0;
468 			} else if (++sta->lost_packets >= STA_LOST_PKT_THRESHOLD) {
469 				cfg80211_cqm_pktloss_notify(sta->sdata->dev,
470 							    sta->sta.addr,
471 							    sta->lost_packets,
472 							    GFP_ATOMIC);
473 				sta->lost_packets = 0;
474 			}
475 		}
476 	}
477 
478 	rcu_read_unlock();
479 
480 	ieee80211_led_tx(local, 0);
481 
482 	/* SNMP counters
483 	 * Fragments are passed to low-level drivers as separate skbs, so these
484 	 * are actually fragments, not frames. Update frame counters only for
485 	 * the first fragment of the frame. */
486 	if (info->flags & IEEE80211_TX_STAT_ACK) {
487 		if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
488 			local->dot11TransmittedFrameCount++;
489 			if (is_multicast_ether_addr(hdr->addr1))
490 				local->dot11MulticastTransmittedFrameCount++;
491 			if (retry_count > 0)
492 				local->dot11RetryCount++;
493 			if (retry_count > 1)
494 				local->dot11MultipleRetryCount++;
495 		}
496 
497 		/* This counter shall be incremented for an acknowledged MPDU
498 		 * with an individual address in the address 1 field or an MPDU
499 		 * with a multicast address in the address 1 field of type Data
500 		 * or Management. */
501 		if (!is_multicast_ether_addr(hdr->addr1) ||
502 		    ieee80211_is_data(fc) ||
503 		    ieee80211_is_mgmt(fc))
504 			local->dot11TransmittedFragmentCount++;
505 	} else {
506 		if (ieee80211_is_first_frag(hdr->seq_ctrl))
507 			local->dot11FailedCount++;
508 	}
509 
510 	if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
511 	    (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
512 	    !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
513 	    local->ps_sdata && !(local->scanning)) {
514 		if (info->flags & IEEE80211_TX_STAT_ACK) {
515 			local->ps_sdata->u.mgd.flags |=
516 					IEEE80211_STA_NULLFUNC_ACKED;
517 		} else
518 			mod_timer(&local->dynamic_ps_timer, jiffies +
519 					msecs_to_jiffies(10));
520 	}
521 
522 	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
523 		u64 cookie = (unsigned long)skb;
524 
525 		if (ieee80211_is_nullfunc(hdr->frame_control) ||
526 		    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
527 			acked = info->flags & IEEE80211_TX_STAT_ACK;
528 
529 			cfg80211_probe_status(skb->dev, hdr->addr1,
530 					      cookie, acked, GFP_ATOMIC);
531 		} else {
532 			struct ieee80211_work *wk;
533 
534 			rcu_read_lock();
535 			list_for_each_entry_rcu(wk, &local->work_list, list) {
536 				if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
537 					continue;
538 				if (wk->offchan_tx.frame != skb)
539 					continue;
540 				wk->offchan_tx.status = true;
541 				break;
542 			}
543 			rcu_read_unlock();
544 			if (local->hw_roc_skb_for_status == skb) {
545 				cookie = local->hw_roc_cookie ^ 2;
546 				local->hw_roc_skb_for_status = NULL;
547 			}
548 
549 			cfg80211_mgmt_tx_status(
550 				skb->dev, cookie, skb->data, skb->len,
551 				!!(info->flags & IEEE80211_TX_STAT_ACK),
552 				GFP_ATOMIC);
553 		}
554 	}
555 
556 	if (unlikely(info->ack_frame_id)) {
557 		struct sk_buff *ack_skb;
558 		unsigned long flags;
559 
560 		spin_lock_irqsave(&local->ack_status_lock, flags);
561 		ack_skb = idr_find(&local->ack_status_frames,
562 				   info->ack_frame_id);
563 		if (ack_skb)
564 			idr_remove(&local->ack_status_frames,
565 				   info->ack_frame_id);
566 		spin_unlock_irqrestore(&local->ack_status_lock, flags);
567 
568 		/* consumes ack_skb */
569 		if (ack_skb)
570 			skb_complete_wifi_ack(ack_skb,
571 				info->flags & IEEE80211_TX_STAT_ACK);
572 	}
573 
574 	/* this was a transmitted frame, but now we want to reuse it */
575 	skb_orphan(skb);
576 
577 	/* Need to make a copy before skb->cb gets cleared */
578 	send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
579 			 !(ieee80211_is_data(fc));
580 
581 	/*
582 	 * This is a bit racy but we can avoid a lot of work
583 	 * with this test...
584 	 */
585 	if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
586 		dev_kfree_skb(skb);
587 		return;
588 	}
589 
590 	/* send frame to monitor interfaces now */
591 	rtap_len = ieee80211_tx_radiotap_len(info);
592 	if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
593 		printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
594 		dev_kfree_skb(skb);
595 		return;
596 	}
597 	ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
598 
599 	/* XXX: is this sufficient for BPF? */
600 	skb_set_mac_header(skb, 0);
601 	skb->ip_summed = CHECKSUM_UNNECESSARY;
602 	skb->pkt_type = PACKET_OTHERHOST;
603 	skb->protocol = htons(ETH_P_802_2);
604 	memset(skb->cb, 0, sizeof(skb->cb));
605 
606 	rcu_read_lock();
607 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
608 		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
609 			if (!ieee80211_sdata_running(sdata))
610 				continue;
611 
612 			if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
613 			    !send_to_cooked)
614 				continue;
615 
616 			if (prev_dev) {
617 				skb2 = skb_clone(skb, GFP_ATOMIC);
618 				if (skb2) {
619 					skb2->dev = prev_dev;
620 					netif_rx(skb2);
621 				}
622 			}
623 
624 			prev_dev = sdata->dev;
625 		}
626 	}
627 	if (prev_dev) {
628 		skb->dev = prev_dev;
629 		netif_rx(skb);
630 		skb = NULL;
631 	}
632 	rcu_read_unlock();
633 	dev_kfree_skb(skb);
634 }
635 EXPORT_SYMBOL(ieee80211_tx_status);
636 
ieee80211_report_low_ack(struct ieee80211_sta * pubsta,u32 num_packets)637 void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
638 {
639 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
640 	cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
641 				    num_packets, GFP_ATOMIC);
642 }
643 EXPORT_SYMBOL(ieee80211_report_low_ack);
644 
ieee80211_free_txskb(struct ieee80211_hw * hw,struct sk_buff * skb)645 void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
646 {
647 	struct ieee80211_local *local = hw_to_local(hw);
648 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
649 
650 	if (unlikely(info->ack_frame_id)) {
651 		struct sk_buff *ack_skb;
652 		unsigned long flags;
653 
654 		spin_lock_irqsave(&local->ack_status_lock, flags);
655 		ack_skb = idr_find(&local->ack_status_frames,
656 				   info->ack_frame_id);
657 		if (ack_skb)
658 			idr_remove(&local->ack_status_frames,
659 				   info->ack_frame_id);
660 		spin_unlock_irqrestore(&local->ack_status_lock, flags);
661 
662 		/* consumes ack_skb */
663 		if (ack_skb)
664 			dev_kfree_skb_any(ack_skb);
665 	}
666 
667 	dev_kfree_skb_any(skb);
668 }
669 EXPORT_SYMBOL(ieee80211_free_txskb);
670 
ieee80211_purge_tx_queue(struct ieee80211_hw * hw,struct sk_buff_head * skbs)671 void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
672 			      struct sk_buff_head *skbs)
673 {
674 	struct sk_buff *skb;
675 
676 	while ((skb = __skb_dequeue(skbs)))
677 		ieee80211_free_txskb(hw, skb);
678 }
679