blob: 783542a8ea20c96cc3baf69d617feaf73ac7e349 [file] [log] [blame]
Johannes Bergfe7a5d52009-11-18 18:42:47 +01001/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
Jouni Malinen026331c2010-02-15 12:53:10 +02005 * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
Johannes Bergfe7a5d52009-11-18 18:42:47 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <net/mac80211.h>
13#include "ieee80211_i.h"
14#include "rate.h"
15#include "mesh.h"
16#include "led.h"
17
18
19void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
20 struct sk_buff *skb)
21{
22 struct ieee80211_local *local = hw_to_local(hw);
23 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
24 int tmp;
25
26 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
27 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
28 &local->skb_queue : &local->skb_queue_unreliable, skb);
29 tmp = skb_queue_len(&local->skb_queue) +
30 skb_queue_len(&local->skb_queue_unreliable);
31 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
32 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
33 dev_kfree_skb_irq(skb);
34 tmp--;
35 I802_DEBUG_INC(local->tx_status_drop);
36 }
37 tasklet_schedule(&local->tasklet);
38}
39EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
40
41static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
42 struct sta_info *sta,
43 struct sk_buff *skb)
44{
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46
47 /*
Johannes Berg697e6a02010-01-17 01:47:56 +010048 * This skb 'survived' a round-trip through the driver, and
49 * hopefully the driver didn't mangle it too badly. However,
Walter Goldens77c20612010-05-18 04:44:54 -070050 * we can definitely not rely on the control information
Johannes Bergc6fcf6b2010-01-17 01:47:59 +010051 * being correct. Clear it so we don't get junk there, and
52 * indicate that it needs new processing, but must not be
53 * modified/encrypted again.
Johannes Berg697e6a02010-01-17 01:47:56 +010054 */
55 memset(&info->control, 0, sizeof(info->control));
Johannes Berg18c94902010-01-25 19:07:39 +010056
57 info->control.jiffies = jiffies;
58 info->control.vif = &sta->sdata->vif;
Johannes Bergc6fcf6b2010-01-17 01:47:59 +010059 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
60 IEEE80211_TX_INTFL_RETRANSMISSION;
Christian Lampartereb7d3062010-09-21 21:36:18 +020061 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
Johannes Berg697e6a02010-01-17 01:47:56 +010062
Johannes Bergfe7a5d52009-11-18 18:42:47 +010063 sta->tx_filtered_count++;
64
65 /*
66 * Clear the TX filter mask for this STA when sending the next
67 * packet. If the STA went to power save mode, this will happen
68 * when it wakes up for the next time.
69 */
70 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
71
72 /*
73 * This code races in the following way:
74 *
75 * (1) STA sends frame indicating it will go to sleep and does so
76 * (2) hardware/firmware adds STA to filter list, passes frame up
77 * (3) hardware/firmware processes TX fifo and suppresses a frame
78 * (4) we get TX status before having processed the frame and
79 * knowing that the STA has gone to sleep.
80 *
81 * This is actually quite unlikely even when both those events are
82 * processed from interrupts coming in quickly after one another or
83 * even at the same time because we queue both TX status events and
84 * RX frames to be processed by a tasklet and process them in the
85 * same order that they were received or TX status last. Hence, there
86 * is no race as long as the frame RX is processed before the next TX
87 * status, which drivers can ensure, see below.
88 *
89 * Note that this can only happen if the hardware or firmware can
90 * actually add STAs to the filter list, if this is done by the
91 * driver in response to set_tim() (which will only reduce the race
92 * this whole filtering tries to solve, not completely solve it)
93 * this situation cannot happen.
94 *
95 * To completely solve this race drivers need to make sure that they
96 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
97 * functions and
98 * (b) always process RX events before TX status events if ordering
99 * can be unknown, for example with different interrupt status
100 * bits.
Arik Nemtsovd057e5a2011-01-31 22:29:13 +0200101 * (c) if PS mode transitions are manual (i.e. the flag
102 * %IEEE80211_HW_AP_LINK_PS is set), always process PS state
103 * changes before calling TX status events if ordering can be
104 * unknown.
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100105 */
106 if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
107 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
108 skb_queue_tail(&sta->tx_filtered, skb);
Johannes Bergc868cb352011-09-29 16:04:27 +0200109 sta_info_recalc_tim(sta);
Johannes Berg60750392011-09-29 16:04:28 +0200110
111 if (!timer_pending(&local->sta_cleanup))
112 mod_timer(&local->sta_cleanup,
113 round_jiffies(jiffies +
114 STA_INFO_CLEANUP_INTERVAL));
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100115 return;
116 }
117
118 if (!test_sta_flags(sta, WLAN_STA_PS_STA) &&
119 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
120 /* Software retry the packet once */
121 info->flags |= IEEE80211_TX_INTFL_RETRIED;
122 ieee80211_add_pending_skb(local, skb);
123 return;
124 }
125
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100126#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
127 if (net_ratelimit())
Joe Perches0fb9a9e2010-08-20 16:25:38 -0700128 wiphy_debug(local->hw.wiphy,
129 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
130 skb_queue_len(&sta->tx_filtered),
131 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100132#endif
133 dev_kfree_skb(skb);
134}
135
Felix Fietkauf0425be2011-08-28 21:11:01 +0200136static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
137{
138 struct tid_ampdu_tx *tid_tx;
139
140 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
141 if (!tid_tx || !tid_tx->bar_pending)
142 return;
143
144 tid_tx->bar_pending = false;
Felix Fietkau8c771242011-08-20 15:53:55 +0200145 ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
Felix Fietkauf0425be2011-08-28 21:11:01 +0200146}
147
Johannes Berg0f782312009-12-01 13:37:02 +0100148static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
149{
150 struct ieee80211_mgmt *mgmt = (void *) skb->data;
151 struct ieee80211_local *local = sta->local;
152 struct ieee80211_sub_if_data *sdata = sta->sdata;
153
Felix Fietkauf0425be2011-08-28 21:11:01 +0200154 if (ieee80211_is_data_qos(mgmt->frame_control)) {
155 struct ieee80211_hdr *hdr = (void *) skb->data;
156 u8 *qc = ieee80211_get_qos_ctl(hdr);
157 u16 tid = qc[0] & 0xf;
158
159 ieee80211_check_pending_bar(sta, hdr->addr1, tid);
160 }
161
Johannes Berg0f782312009-12-01 13:37:02 +0100162 if (ieee80211_is_action(mgmt->frame_control) &&
163 sdata->vif.type == NL80211_IFTYPE_STATION &&
164 mgmt->u.action.category == WLAN_CATEGORY_HT &&
165 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
166 /*
167 * This update looks racy, but isn't -- if we come
168 * here we've definitely got a station that we're
169 * talking to, and on a managed interface that can
170 * only be the AP. And the only other place updating
171 * this variable is before we're associated.
172 */
173 switch (mgmt->u.action.u.ht_smps.smps_control) {
174 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
175 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
176 break;
177 case WLAN_HT_SMPS_CONTROL_STATIC:
178 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
179 break;
180 case WLAN_HT_SMPS_CONTROL_DISABLED:
181 default: /* shouldn't happen since we don't send that */
182 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
183 break;
184 }
185
186 ieee80211_queue_work(&local->hw, &local->recalc_smps);
187 }
188}
189
Felix Fietkauf0425be2011-08-28 21:11:01 +0200190static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
191{
192 struct tid_ampdu_tx *tid_tx;
193
194 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
195 if (!tid_tx)
196 return;
197
198 tid_tx->failed_bar_ssn = ssn;
199 tid_tx->bar_pending = true;
200}
201
Johannes Berg99ba2a12010-11-24 08:10:06 +0100202/*
203 * Use a static threshold for now, best value to be determined
204 * by testing ...
205 * Should it depend on:
206 * - on # of retransmissions
207 * - current throughput (higher value for higher tpt)?
208 */
209#define STA_LOST_PKT_THRESHOLD 50
210
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100211void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
212{
213 struct sk_buff *skb2;
214 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
215 struct ieee80211_local *local = hw_to_local(hw);
216 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
217 u16 frag, type;
218 __le16 fc;
219 struct ieee80211_supported_band *sband;
220 struct ieee80211_tx_status_rtap_hdr *rthdr;
221 struct ieee80211_sub_if_data *sdata;
222 struct net_device *prev_dev = NULL;
Johannes Bergabe60632009-11-25 17:46:18 +0100223 struct sta_info *sta, *tmp;
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100224 int retry_count = -1, i;
Juuso Oikarinen0c869802010-04-22 10:27:48 +0300225 int rates_idx = -1;
Felix Fietkaueaf55532010-03-11 16:28:24 +0100226 bool send_to_cooked;
Felix Fietkau04ac3c02010-12-02 21:01:08 +0100227 bool acked;
Helmut Schaae69dede2011-08-11 16:17:42 +0200228 struct ieee80211_bar *bar;
229 u16 tid;
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100230
231 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
Helmut Schaabe7974a2011-03-21 15:07:55 +0100232 if (info->status.rates[i].idx < 0) {
233 break;
234 } else if (i >= hw->max_report_rates) {
235 /* the HW cannot have attempted that rate */
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100236 info->status.rates[i].idx = -1;
237 info->status.rates[i].count = 0;
Helmut Schaabe7974a2011-03-21 15:07:55 +0100238 break;
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100239 }
240
241 retry_count += info->status.rates[i].count;
242 }
Helmut Schaabe7974a2011-03-21 15:07:55 +0100243 rates_idx = i - 1;
244
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100245 if (retry_count < 0)
246 retry_count = 0;
247
248 rcu_read_lock();
249
250 sband = local->hw.wiphy->bands[info->band];
Vivek Natarajan375177b2010-02-09 14:50:28 +0530251 fc = hdr->frame_control;
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100252
Johannes Bergabe60632009-11-25 17:46:18 +0100253 for_each_sta_info(local, hdr->addr1, sta, tmp) {
254 /* skip wrong virtual interface */
Johannes Berg47846c92009-11-25 17:46:19 +0100255 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
Johannes Bergabe60632009-11-25 17:46:18 +0100256 continue;
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100257
Felix Fietkau04ac3c02010-12-02 21:01:08 +0100258 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
259 if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) {
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100260 /*
261 * The STA is in power save mode, so assume
262 * that this TX packet failed because of that.
263 */
264 ieee80211_handle_filtered_frame(local, sta, skb);
265 rcu_read_unlock();
266 return;
267 }
268
Juuso Oikarinen0c869802010-04-22 10:27:48 +0300269 if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
270 (rates_idx != -1))
271 sta->last_tx_rate = info->status.rates[rates_idx];
272
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100273 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
274 (ieee80211_is_data_qos(fc))) {
275 u16 tid, ssn;
276 u8 *qc;
277
278 qc = ieee80211_get_qos_ctl(hdr);
279 tid = qc[0] & 0xf;
280 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
281 & IEEE80211_SCTL_SEQ);
Felix Fietkau8c771242011-08-20 15:53:55 +0200282 ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100283 tid, ssn);
284 }
285
Helmut Schaae69dede2011-08-11 16:17:42 +0200286 if (!acked && ieee80211_is_back_req(fc)) {
Felix Fietkau71076762011-09-15 09:37:46 +0200287 u16 control;
288
Helmut Schaae69dede2011-08-11 16:17:42 +0200289 /*
Felix Fietkau71076762011-09-15 09:37:46 +0200290 * BAR failed, store the last SSN and retry sending
291 * the BAR when the next unicast transmission on the
292 * same TID succeeds.
Helmut Schaae69dede2011-08-11 16:17:42 +0200293 */
294 bar = (struct ieee80211_bar *) skb->data;
Felix Fietkau71076762011-09-15 09:37:46 +0200295 control = le16_to_cpu(bar->control);
296 if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
Felix Fietkauf0425be2011-08-28 21:11:01 +0200297 u16 ssn = le16_to_cpu(bar->start_seq_num);
298
Felix Fietkau71076762011-09-15 09:37:46 +0200299 tid = (control &
Helmut Schaae69dede2011-08-11 16:17:42 +0200300 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
301 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
Felix Fietkauf0425be2011-08-28 21:11:01 +0200302
303 ieee80211_set_bar_pending(sta, tid, ssn);
Helmut Schaae69dede2011-08-11 16:17:42 +0200304 }
305 }
306
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100307 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
308 ieee80211_handle_filtered_frame(local, sta, skb);
309 rcu_read_unlock();
310 return;
311 } else {
Felix Fietkau04ac3c02010-12-02 21:01:08 +0100312 if (!acked)
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100313 sta->tx_retry_failed++;
314 sta->tx_retry_count += retry_count;
315 }
316
317 rate_control_tx_status(local, sband, sta, skb);
318 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
319 ieee80211s_update_metric(local, sta, skb);
Johannes Berg0f782312009-12-01 13:37:02 +0100320
Felix Fietkau04ac3c02010-12-02 21:01:08 +0100321 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
Johannes Berg0f782312009-12-01 13:37:02 +0100322 ieee80211_frame_acked(sta, skb);
Johannes Berg99ba2a12010-11-24 08:10:06 +0100323
Felix Fietkau04ac3c02010-12-02 21:01:08 +0100324 if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
325 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
326 ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, acked);
327
Johannes Berg99ba2a12010-11-24 08:10:06 +0100328 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
329 if (info->flags & IEEE80211_TX_STAT_ACK) {
330 if (sta->lost_packets)
331 sta->lost_packets = 0;
332 } else if (++sta->lost_packets >= STA_LOST_PKT_THRESHOLD) {
333 cfg80211_cqm_pktloss_notify(sta->sdata->dev,
334 sta->sta.addr,
335 sta->lost_packets,
336 GFP_ATOMIC);
337 sta->lost_packets = 0;
338 }
339 }
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100340 }
341
342 rcu_read_unlock();
343
344 ieee80211_led_tx(local, 0);
345
346 /* SNMP counters
347 * Fragments are passed to low-level drivers as separate skbs, so these
348 * are actually fragments, not frames. Update frame counters only for
349 * the first fragment of the frame. */
350
351 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
352 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
353
354 if (info->flags & IEEE80211_TX_STAT_ACK) {
355 if (frag == 0) {
356 local->dot11TransmittedFrameCount++;
357 if (is_multicast_ether_addr(hdr->addr1))
358 local->dot11MulticastTransmittedFrameCount++;
359 if (retry_count > 0)
360 local->dot11RetryCount++;
361 if (retry_count > 1)
362 local->dot11MultipleRetryCount++;
363 }
364
365 /* This counter shall be incremented for an acknowledged MPDU
366 * with an individual address in the address 1 field or an MPDU
367 * with a multicast address in the address 1 field of type Data
368 * or Management. */
369 if (!is_multicast_ether_addr(hdr->addr1) ||
370 type == IEEE80211_FTYPE_DATA ||
371 type == IEEE80211_FTYPE_MGMT)
372 local->dot11TransmittedFragmentCount++;
373 } else {
374 if (frag == 0)
375 local->dot11FailedCount++;
376 }
377
Vivek Natarajan375177b2010-02-09 14:50:28 +0530378 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
379 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
380 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
381 local->ps_sdata && !(local->scanning)) {
382 if (info->flags & IEEE80211_TX_STAT_ACK) {
383 local->ps_sdata->u.mgd.flags |=
384 IEEE80211_STA_NULLFUNC_ACKED;
Vivek Natarajan375177b2010-02-09 14:50:28 +0530385 } else
386 mod_timer(&local->dynamic_ps_timer, jiffies +
387 msecs_to_jiffies(10));
388 }
389
Johannes Bergf30221e2010-11-25 10:02:30 +0100390 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
391 struct ieee80211_work *wk;
Johannes Berg4334ec82011-02-02 16:58:06 +0100392 u64 cookie = (unsigned long)skb;
Johannes Bergf30221e2010-11-25 10:02:30 +0100393
394 rcu_read_lock();
395 list_for_each_entry_rcu(wk, &local->work_list, list) {
396 if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
397 continue;
398 if (wk->offchan_tx.frame != skb)
399 continue;
400 wk->offchan_tx.frame = NULL;
401 break;
402 }
403 rcu_read_unlock();
Johannes Berg4334ec82011-02-02 16:58:06 +0100404 if (local->hw_roc_skb_for_status == skb) {
405 cookie = local->hw_roc_cookie ^ 2;
406 local->hw_roc_skb_for_status = NULL;
407 }
Johannes Berg5f16a432011-02-25 15:36:57 +0100408
Johannes Berg2e161f72010-08-12 15:38:38 +0200409 cfg80211_mgmt_tx_status(
Johannes Berg4334ec82011-02-02 16:58:06 +0100410 skb->dev, cookie, skb->data, skb->len,
Jouni Malinen026331c2010-02-15 12:53:10 +0200411 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
Johannes Bergf30221e2010-11-25 10:02:30 +0100412 }
Jouni Malinen026331c2010-02-15 12:53:10 +0200413
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100414 /* this was a transmitted frame, but now we want to reuse it */
415 skb_orphan(skb);
416
Felix Fietkaueaf55532010-03-11 16:28:24 +0100417 /* Need to make a copy before skb->cb gets cleared */
418 send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
419 (type != IEEE80211_FTYPE_DATA);
420
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100421 /*
422 * This is a bit racy but we can avoid a lot of work
423 * with this test...
424 */
Felix Fietkaueaf55532010-03-11 16:28:24 +0100425 if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100426 dev_kfree_skb(skb);
427 return;
428 }
429
430 /* send frame to monitor interfaces now */
431
432 if (skb_headroom(skb) < sizeof(*rthdr)) {
433 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
434 dev_kfree_skb(skb);
435 return;
436 }
437
438 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
439 skb_push(skb, sizeof(*rthdr));
440
441 memset(rthdr, 0, sizeof(*rthdr));
442 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
443 rthdr->hdr.it_present =
444 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
445 (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
446 (1 << IEEE80211_RADIOTAP_RATE));
447
448 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
449 !is_multicast_ether_addr(hdr->addr1))
450 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
451
452 /*
453 * XXX: Once radiotap gets the bitmap reset thing the vendor
454 * extensions proposal contains, we can actually report
455 * the whole set of tries we did.
456 */
457 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
458 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
459 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
460 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
461 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
462 if (info->status.rates[0].idx >= 0 &&
463 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
464 rthdr->rate = sband->bitrates[
465 info->status.rates[0].idx].bitrate / 5;
466
467 /* for now report the total retry_count */
468 rthdr->data_retries = retry_count;
469
470 /* XXX: is this sufficient for BPF? */
471 skb_set_mac_header(skb, 0);
472 skb->ip_summed = CHECKSUM_UNNECESSARY;
473 skb->pkt_type = PACKET_OTHERHOST;
474 skb->protocol = htons(ETH_P_802_2);
475 memset(skb->cb, 0, sizeof(skb->cb));
476
477 rcu_read_lock();
478 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
479 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
Johannes Berg9607e6b2009-12-23 13:15:31 +0100480 if (!ieee80211_sdata_running(sdata))
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100481 continue;
482
483 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
Felix Fietkaueaf55532010-03-11 16:28:24 +0100484 !send_to_cooked)
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100485 continue;
486
487 if (prev_dev) {
488 skb2 = skb_clone(skb, GFP_ATOMIC);
489 if (skb2) {
490 skb2->dev = prev_dev;
John W. Linville4efe7f52010-10-07 11:35:40 -0400491 netif_rx(skb2);
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100492 }
493 }
494
495 prev_dev = sdata->dev;
496 }
497 }
498 if (prev_dev) {
499 skb->dev = prev_dev;
John W. Linville4efe7f52010-10-07 11:35:40 -0400500 netif_rx(skb);
Johannes Bergfe7a5d52009-11-18 18:42:47 +0100501 skb = NULL;
502 }
503 rcu_read_unlock();
504 dev_kfree_skb(skb);
505}
506EXPORT_SYMBOL(ieee80211_tx_status);
Arik Nemtsov8178d382011-04-18 14:22:28 +0300507
508void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
509{
510 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
511 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
512 num_packets, GFP_ATOMIC);
513}
514EXPORT_SYMBOL(ieee80211_report_low_ack);