blob: 740a488ef5049211e9c4bebf2a852c1d1d2356cb [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
Vasanthakumar Thiagarajan1b2df402012-02-06 20:15:53 +05303 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
Kalle Valobdcd8172011-07-18 00:22:30 +03004 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Joe Perches516304b2012-03-18 17:30:52 -070018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Kalle Valobdcd8172011-07-18 00:22:30 +030020#include "core.h"
21#include "debug.h"
Kalle Valoe76ac2bf2012-03-25 17:15:27 +030022#include "htc-ops.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030023
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +053024/*
25 * tid - tid_mux0..tid_mux3
26 * aid - tid_mux4..tid_mux7
27 */
28#define ATH6KL_TID_MASK 0xf
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +053029#define ATH6KL_AID_SHIFT 4
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +053030
31static inline u8 ath6kl_get_tid(u8 tid_mux)
32{
33 return tid_mux & ATH6KL_TID_MASK;
34}
35
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +053036static inline u8 ath6kl_get_aid(u8 tid_mux)
37{
38 return tid_mux >> ATH6KL_AID_SHIFT;
39}
40
Kalle Valobdcd8172011-07-18 00:22:30 +030041static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
42 u32 *map_no)
43{
44 struct ath6kl *ar = ath6kl_priv(dev);
45 struct ethhdr *eth_hdr;
46 u32 i, ep_map = -1;
47 u8 *datap;
48
49 *map_no = 0;
50 datap = skb->data;
51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
52
53 if (is_multicast_ether_addr(eth_hdr->h_dest))
54 return ENDPOINT_2;
55
56 for (i = 0; i < ar->node_num; i++) {
57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
58 ETH_ALEN) == 0) {
59 *map_no = i + 1;
60 ar->node_map[i].tx_pend++;
61 return ar->node_map[i].ep_id;
62 }
63
64 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
65 ep_map = i;
66 }
67
68 if (ep_map == -1) {
69 ep_map = ar->node_num;
70 ar->node_num++;
71 if (ar->node_num > MAX_NODE_NUM)
72 return ENDPOINT_UNUSED;
73 }
74
75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
76
77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
78 if (!ar->tx_pending[i]) {
79 ar->node_map[ep_map].ep_id = i;
80 break;
81 }
82
83 /*
84 * No free endpoint is available, start redistribution on
85 * the inuse endpoints.
86 */
87 if (i == ENDPOINT_5) {
88 ar->node_map[ep_map].ep_id = ar->next_ep_id;
89 ar->next_ep_id++;
90 if (ar->next_ep_id > ENDPOINT_5)
91 ar->next_ep_id = ENDPOINT_2;
92 }
93 }
94
95 *map_no = ep_map + 1;
96 ar->node_map[ep_map].tx_pend++;
97
98 return ar->node_map[ep_map].ep_id;
99}
100
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530101static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
102 struct ath6kl_vif *vif,
103 struct sk_buff *skb,
104 u32 *flags)
105{
106 struct ath6kl *ar = vif->ar;
107 bool is_apsdq_empty = false;
108 struct ethhdr *datap = (struct ethhdr *) skb->data;
Kalle Valoe5726022012-01-17 15:05:46 +0200109 u8 up = 0, traffic_class, *ip_hdr;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530110 u16 ether_type;
111 struct ath6kl_llc_snap_hdr *llc_hdr;
112
113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
114 /*
115 * This tx is because of a uAPSD trigger, determine
116 * more and EOSP bit. Set EOSP if queue is empty
117 * or sufficient frames are delivered for this trigger.
118 */
119 spin_lock_bh(&conn->psq_lock);
120 if (!skb_queue_empty(&conn->apsdq))
121 *flags |= WMI_DATA_HDR_FLAGS_MORE;
122 else if (conn->sta_flags & STA_PS_APSD_EOSP)
123 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
125 spin_unlock_bh(&conn->psq_lock);
126 return false;
127 } else if (!conn->apsd_info)
128 return false;
129
130 if (test_bit(WMM_ENABLED, &vif->flags)) {
131 ether_type = be16_to_cpu(datap->h_proto);
132 if (is_ethertype(ether_type)) {
133 /* packet is in DIX format */
134 ip_hdr = (u8 *)(datap + 1);
135 } else {
136 /* packet is in 802.3 format */
137 llc_hdr = (struct ath6kl_llc_snap_hdr *)
138 (datap + 1);
139 ether_type = be16_to_cpu(llc_hdr->eth_type);
140 ip_hdr = (u8 *)(llc_hdr + 1);
141 }
142
143 if (ether_type == IP_ETHERTYPE)
144 up = ath6kl_wmi_determine_user_priority(
145 ip_hdr, 0);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530146 }
147
148 traffic_class = ath6kl_wmi_get_traffic_class(up);
149
150 if ((conn->apsd_info & (1 << traffic_class)) == 0)
151 return false;
152
153 /* Queue the frames if the STA is sleeping */
154 spin_lock_bh(&conn->psq_lock);
155 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
156 skb_queue_tail(&conn->apsdq, skb);
157 spin_unlock_bh(&conn->psq_lock);
158
159 /*
160 * If this is the first pkt getting queued
161 * for this STA, update the PVB for this STA
162 */
163 if (is_apsdq_empty) {
164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
Kalle Valo96f1fad2012-03-07 20:03:57 +0200165 vif->fw_vif_idx,
166 conn->aid, 1, 0);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530167 }
168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
169
170 return true;
171}
172
173static bool ath6kl_process_psq(struct ath6kl_sta *conn,
174 struct ath6kl_vif *vif,
175 struct sk_buff *skb,
176 u32 *flags)
177{
178 bool is_psq_empty = false;
179 struct ath6kl *ar = vif->ar;
180
181 if (conn->sta_flags & STA_PS_POLLED) {
182 spin_lock_bh(&conn->psq_lock);
183 if (!skb_queue_empty(&conn->psq))
184 *flags |= WMI_DATA_HDR_FLAGS_MORE;
185 spin_unlock_bh(&conn->psq_lock);
186 return false;
187 }
188
189 /* Queue the frames if the STA is sleeping */
190 spin_lock_bh(&conn->psq_lock);
191 is_psq_empty = skb_queue_empty(&conn->psq);
192 skb_queue_tail(&conn->psq, skb);
193 spin_unlock_bh(&conn->psq_lock);
194
195 /*
196 * If this is the first pkt getting queued
197 * for this STA, update the PVB for this
198 * STA.
199 */
200 if (is_psq_empty)
201 ath6kl_wmi_set_pvb_cmd(ar->wmi,
202 vif->fw_vif_idx,
203 conn->aid, 1);
204 return true;
205}
206
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530207static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530208 u32 *flags)
Kalle Valobdcd8172011-07-18 00:22:30 +0300209{
210 struct ethhdr *datap = (struct ethhdr *) skb->data;
211 struct ath6kl_sta *conn = NULL;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530212 bool ps_queued = false;
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530213 struct ath6kl *ar = vif->ar;
Kalle Valobdcd8172011-07-18 00:22:30 +0300214
215 if (is_multicast_ether_addr(datap->h_dest)) {
216 u8 ctr = 0;
217 bool q_mcast = false;
218
219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
221 q_mcast = true;
222 break;
223 }
224 }
225
226 if (q_mcast) {
227 /*
228 * If this transmit is not because of a Dtim Expiry
229 * q it.
230 */
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300232 bool is_mcastq_empty = false;
233
234 spin_lock_bh(&ar->mcastpsq_lock);
235 is_mcastq_empty =
236 skb_queue_empty(&ar->mcastpsq);
237 skb_queue_tail(&ar->mcastpsq, skb);
238 spin_unlock_bh(&ar->mcastpsq_lock);
239
240 /*
241 * If this is the first Mcast pkt getting
242 * queued indicate to the target to set the
243 * BitmapControl LSB of the TIM IE.
244 */
245 if (is_mcastq_empty)
246 ath6kl_wmi_set_pvb_cmd(ar->wmi,
Vasanthakumar Thiagarajan334234b2011-10-25 19:34:12 +0530247 vif->fw_vif_idx,
Kalle Valobdcd8172011-07-18 00:22:30 +0300248 MCAST_AID, 1);
249
250 ps_queued = true;
251 } else {
252 /*
253 * This transmit is because of Dtim expiry.
254 * Determine if MoreData bit has to be set.
255 */
256 spin_lock_bh(&ar->mcastpsq_lock);
257 if (!skb_queue_empty(&ar->mcastpsq))
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530258 *flags |= WMI_DATA_HDR_FLAGS_MORE;
Kalle Valobdcd8172011-07-18 00:22:30 +0300259 spin_unlock_bh(&ar->mcastpsq_lock);
260 }
261 }
262 } else {
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530263 conn = ath6kl_find_sta(vif, datap->h_dest);
Kalle Valobdcd8172011-07-18 00:22:30 +0300264 if (!conn) {
265 dev_kfree_skb(skb);
266
267 /* Inform the caller that the skb is consumed */
268 return true;
269 }
270
271 if (conn->sta_flags & STA_PS_SLEEP) {
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530272 ps_queued = ath6kl_process_uapsdq(conn,
273 vif, skb, flags);
274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
275 ps_queued = ath6kl_process_psq(conn,
276 vif, skb, flags);
Kalle Valobdcd8172011-07-18 00:22:30 +0300277 }
278 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300279 return ps_queued;
280}
281
282/* Tx functions */
283
284int ath6kl_control_tx(void *devt, struct sk_buff *skb,
285 enum htc_endpoint_id eid)
286{
287 struct ath6kl *ar = devt;
288 int status = 0;
289 struct ath6kl_cookie *cookie = NULL;
290
Vasanthakumar Thiagarajan0616dc12012-08-14 10:10:33 +0530291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
292 dev_kfree_skb(skb);
Raja Mani390a8c82012-03-07 11:35:04 +0530293 return -EACCES;
Vasanthakumar Thiagarajan0616dc12012-08-14 10:10:33 +0530294 }
Raja Mani390a8c82012-03-07 11:35:04 +0530295
Kalle Valobdcd8172011-07-18 00:22:30 +0300296 spin_lock_bh(&ar->lock);
297
298 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
299 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
300 skb, skb->len, eid);
301
302 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
303 /*
304 * Control endpoint is full, don't allocate resources, we
305 * are just going to drop this packet.
306 */
307 cookie = NULL;
308 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
309 skb, skb->len);
310 } else
311 cookie = ath6kl_alloc_cookie(ar);
312
313 if (cookie == NULL) {
314 spin_unlock_bh(&ar->lock);
315 status = -ENOMEM;
316 goto fail_ctrl_tx;
317 }
318
319 ar->tx_pending[eid]++;
320
321 if (eid != ar->ctrl_ep)
322 ar->total_tx_data_pend++;
323
324 spin_unlock_bh(&ar->lock);
325
326 cookie->skb = skb;
327 cookie->map_no = 0;
328 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
329 eid, ATH6KL_CONTROL_PKT_TAG);
Kalle Valocfc10f22012-03-25 17:15:24 +0300330 cookie->htc_pkt.skb = skb;
Kalle Valobdcd8172011-07-18 00:22:30 +0300331
332 /*
333 * This interface is asynchronous, if there is an error, cleanup
334 * will happen in the TX completion callback.
335 */
Kalle Vaload226ec2011-08-10 09:49:12 +0300336 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300337
338 return 0;
339
340fail_ctrl_tx:
341 dev_kfree_skb(skb);
342 return status;
343}
344
345int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
346{
347 struct ath6kl *ar = ath6kl_priv(dev);
348 struct ath6kl_cookie *cookie = NULL;
349 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530350 struct ath6kl_vif *vif = netdev_priv(dev);
Kalle Valobdcd8172011-07-18 00:22:30 +0300351 u32 map_no = 0;
352 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
353 u8 ac = 99 ; /* initialize to unmapped ac */
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530354 bool chk_adhoc_ps_mapping = false;
Kalle Valobdcd8172011-07-18 00:22:30 +0300355 int ret;
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800356 struct wmi_tx_meta_v2 meta_v2;
357 void *meta;
358 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
359 u8 meta_ver = 0;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530360 u32 flags = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300361
362 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
363 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
364 skb, skb->data, skb->len);
365
366 /* If target is not associated */
Vasanthakumar Thiagarajan1881ced2012-04-26 20:26:13 +0530367 if (!test_bit(CONNECTED, &vif->flags))
368 goto fail_tx;
Kalle Valobdcd8172011-07-18 00:22:30 +0300369
Vasanthakumar Thiagarajan1881ced2012-04-26 20:26:13 +0530370 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
371 goto fail_tx;
Raja Mani390a8c82012-03-07 11:35:04 +0530372
Kalle Valobdcd8172011-07-18 00:22:30 +0300373 if (!test_bit(WMI_READY, &ar->flag))
374 goto fail_tx;
375
376 /* AP mode Power saving processing */
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +0530377 if (vif->nw_type == AP_NETWORK) {
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530378 if (ath6kl_powersave_ap(vif, skb, &flags))
Kalle Valobdcd8172011-07-18 00:22:30 +0300379 return 0;
380 }
381
382 if (test_bit(WMI_ENABLED, &ar->flag)) {
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800383 if ((dev->features & NETIF_F_IP_CSUM) &&
Kalle Valo96f1fad2012-03-07 20:03:57 +0200384 (csum == CHECKSUM_PARTIAL)) {
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800385 csum_start = skb->csum_start -
386 (skb_network_header(skb) - skb->head) +
387 sizeof(struct ath6kl_llc_snap_hdr);
388 csum_dest = skb->csum_offset + csum_start;
389 }
390
Kalle Valobdcd8172011-07-18 00:22:30 +0300391 if (skb_headroom(skb) < dev->needed_headroom) {
Vasanthakumar Thiagarajana29517c2011-11-04 15:48:51 +0530392 struct sk_buff *tmp_skb = skb;
393
394 skb = skb_realloc_headroom(skb, dev->needed_headroom);
395 kfree_skb(tmp_skb);
396 if (skb == NULL) {
397 vif->net_stats.tx_dropped++;
398 return 0;
399 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300400 }
401
402 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
403 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
404 goto fail_tx;
405 }
406
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800407 if ((dev->features & NETIF_F_IP_CSUM) &&
Kalle Valo96f1fad2012-03-07 20:03:57 +0200408 (csum == CHECKSUM_PARTIAL)) {
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800409 meta_v2.csum_start = csum_start;
410 meta_v2.csum_dest = csum_dest;
411
412 /* instruct target to calculate checksum */
413 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
414 meta_ver = WMI_META_VERSION_2;
415 meta = &meta_v2;
416 } else {
417 meta_ver = 0;
418 meta = NULL;
419 }
420
421 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530422 DATA_MSGTYPE, flags, 0,
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800423 meta_ver,
424 meta, vif->fw_vif_idx);
425
426 if (ret) {
427 ath6kl_warn("failed to add wmi data header:%d\n"
428 , ret);
Kalle Valobdcd8172011-07-18 00:22:30 +0300429 goto fail_tx;
430 }
431
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +0530432 if ((vif->nw_type == ADHOC_NETWORK) &&
Kalle Valo96f1fad2012-03-07 20:03:57 +0200433 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
Kalle Valobdcd8172011-07-18 00:22:30 +0300434 chk_adhoc_ps_mapping = true;
435 else {
436 /* get the stream mapping */
Vasanthakumar Thiagarajan240d2792011-10-25 19:34:13 +0530437 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
438 vif->fw_vif_idx, skb,
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530439 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
Kalle Valobdcd8172011-07-18 00:22:30 +0300440 if (ret)
441 goto fail_tx;
442 }
443 } else
444 goto fail_tx;
445
446 spin_lock_bh(&ar->lock);
447
448 if (chk_adhoc_ps_mapping)
449 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
450 else
451 eid = ar->ac2ep_map[ac];
452
453 if (eid == 0 || eid == ENDPOINT_UNUSED) {
454 ath6kl_err("eid %d is not mapped!\n", eid);
455 spin_unlock_bh(&ar->lock);
456 goto fail_tx;
457 }
458
459 /* allocate resource for this packet */
460 cookie = ath6kl_alloc_cookie(ar);
461
462 if (!cookie) {
463 spin_unlock_bh(&ar->lock);
464 goto fail_tx;
465 }
466
467 /* update counts while the lock is held */
468 ar->tx_pending[eid]++;
469 ar->total_tx_data_pend++;
470
471 spin_unlock_bh(&ar->lock);
472
Jouni Malinen00b1edf2011-09-27 11:00:08 +0300473 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
474 skb_cloned(skb)) {
475 /*
476 * We will touch (move the buffer data to align it. Since the
477 * skb buffer is cloned and not only the header is changed, we
478 * have to copy it to allow the changes. Since we are copying
479 * the data here, we may as well align it by reserving suitable
480 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
481 */
482 struct sk_buff *nskb;
483
484 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
485 if (nskb == NULL)
486 goto fail_tx;
487 kfree_skb(skb);
488 skb = nskb;
489 }
490
Kalle Valobdcd8172011-07-18 00:22:30 +0300491 cookie->skb = skb;
492 cookie->map_no = map_no;
493 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
494 eid, htc_tag);
Kalle Valocfc10f22012-03-25 17:15:24 +0300495 cookie->htc_pkt.skb = skb;
Kalle Valobdcd8172011-07-18 00:22:30 +0300496
Kalle Valoef094102011-09-27 14:30:45 +0300497 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
498 skb->data, skb->len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300499
500 /*
501 * HTC interface is asynchronous, if this fails, cleanup will
502 * happen in the ath6kl_tx_complete callback.
503 */
Kalle Vaload226ec2011-08-10 09:49:12 +0300504 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300505
506 return 0;
507
508fail_tx:
509 dev_kfree_skb(skb);
510
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +0530511 vif->net_stats.tx_dropped++;
512 vif->net_stats.tx_aborted_errors++;
Kalle Valobdcd8172011-07-18 00:22:30 +0300513
514 return 0;
515}
516
517/* indicate tx activity or inactivity on a WMI stream */
518void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
519{
520 struct ath6kl *ar = devt;
521 enum htc_endpoint_id eid;
522 int i;
523
524 eid = ar->ac2ep_map[traffic_class];
525
526 if (!test_bit(WMI_ENABLED, &ar->flag))
527 goto notify_htc;
528
529 spin_lock_bh(&ar->lock);
530
531 ar->ac_stream_active[traffic_class] = active;
532
533 if (active) {
534 /*
535 * Keep track of the active stream with the highest
536 * priority.
537 */
538 if (ar->ac_stream_pri_map[traffic_class] >
539 ar->hiac_stream_active_pri)
540 /* set the new highest active priority */
541 ar->hiac_stream_active_pri =
542 ar->ac_stream_pri_map[traffic_class];
543
544 } else {
545 /*
546 * We may have to search for the next active stream
547 * that is the highest priority.
548 */
549 if (ar->hiac_stream_active_pri ==
550 ar->ac_stream_pri_map[traffic_class]) {
551 /*
552 * The highest priority stream just went inactive
553 * reset and search for the "next" highest "active"
554 * priority stream.
555 */
556 ar->hiac_stream_active_pri = 0;
557
558 for (i = 0; i < WMM_NUM_AC; i++) {
559 if (ar->ac_stream_active[i] &&
560 (ar->ac_stream_pri_map[i] >
561 ar->hiac_stream_active_pri))
562 /*
563 * Set the new highest active
564 * priority.
565 */
566 ar->hiac_stream_active_pri =
567 ar->ac_stream_pri_map[i];
568 }
569 }
570 }
571
572 spin_unlock_bh(&ar->lock);
573
574notify_htc:
575 /* notify HTC, this may cause credit distribution changes */
Kalle Valoe76ac2bf2012-03-25 17:15:27 +0300576 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
Kalle Valobdcd8172011-07-18 00:22:30 +0300577}
578
579enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
580 struct htc_packet *packet)
581{
582 struct ath6kl *ar = target->dev->ar;
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530583 struct ath6kl_vif *vif;
Kalle Valobdcd8172011-07-18 00:22:30 +0300584 enum htc_endpoint_id endpoint = packet->endpoint;
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530585 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
Kalle Valobdcd8172011-07-18 00:22:30 +0300586
587 if (endpoint == ar->ctrl_ep) {
588 /*
589 * Under normal WMI if this is getting full, then something
590 * is running rampant the host should not be exhausting the
591 * WMI queue with too many commands the only exception to
592 * this is during testing using endpointping.
593 */
Kalle Valobdcd8172011-07-18 00:22:30 +0300594 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
Kalle Valobdcd8172011-07-18 00:22:30 +0300595 ath6kl_err("wmi ctrl ep is full\n");
Vasanthakumar Thiagarajan901db392011-11-08 20:01:25 +0530596 return action;
Kalle Valobdcd8172011-07-18 00:22:30 +0300597 }
598
599 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
Vasanthakumar Thiagarajan901db392011-11-08 20:01:25 +0530600 return action;
Kalle Valobdcd8172011-07-18 00:22:30 +0300601
602 /*
603 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
604 * the highest active stream.
605 */
606 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
607 ar->hiac_stream_active_pri &&
Chilam Ng0ea10f2b2012-02-09 02:17:01 -0800608 ar->cookie_count <=
609 target->endpoint[endpoint].tx_drop_packet_threshold)
Kalle Valobdcd8172011-07-18 00:22:30 +0300610 /*
611 * Give preference to the highest priority stream by
612 * dropping the packets which overflowed.
613 */
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530614 action = HTC_SEND_FULL_DROP;
Kalle Valobdcd8172011-07-18 00:22:30 +0300615
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530616 /* FIXME: Locking */
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530617 spin_lock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530618 list_for_each_entry(vif, &ar->vif_list, list) {
Vasanthakumar Thiagarajan901db392011-11-08 20:01:25 +0530619 if (vif->nw_type == ADHOC_NETWORK ||
620 action != HTC_SEND_FULL_DROP) {
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530621 spin_unlock_bh(&ar->list_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300622
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530623 set_bit(NETQ_STOPPED, &vif->flags);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530624 netif_stop_queue(vif->ndev);
625
626 return action;
627 }
628 }
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530629 spin_unlock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530630
631 return action;
Kalle Valobdcd8172011-07-18 00:22:30 +0300632}
633
634/* TODO this needs to be looked at */
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530635static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
Kalle Valobdcd8172011-07-18 00:22:30 +0300636 enum htc_endpoint_id eid, u32 map_no)
637{
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530638 struct ath6kl *ar = vif->ar;
Kalle Valobdcd8172011-07-18 00:22:30 +0300639 u32 i;
640
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +0530641 if (vif->nw_type != ADHOC_NETWORK)
Kalle Valobdcd8172011-07-18 00:22:30 +0300642 return;
643
644 if (!ar->ibss_ps_enable)
645 return;
646
647 if (eid == ar->ctrl_ep)
648 return;
649
650 if (map_no == 0)
651 return;
652
653 map_no--;
654 ar->node_map[map_no].tx_pend--;
655
656 if (ar->node_map[map_no].tx_pend)
657 return;
658
659 if (map_no != (ar->node_num - 1))
660 return;
661
662 for (i = ar->node_num; i > 0; i--) {
663 if (ar->node_map[i - 1].tx_pend)
664 break;
665
666 memset(&ar->node_map[i - 1], 0,
667 sizeof(struct ath6kl_node_mapping));
668 ar->node_num--;
669 }
670}
671
Kalle Valo63de1112012-03-25 17:15:22 +0300672void ath6kl_tx_complete(struct htc_target *target,
673 struct list_head *packet_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300674{
Kalle Valo63de1112012-03-25 17:15:22 +0300675 struct ath6kl *ar = target->dev->ar;
Kalle Valobdcd8172011-07-18 00:22:30 +0300676 struct sk_buff_head skb_queue;
677 struct htc_packet *packet;
678 struct sk_buff *skb;
679 struct ath6kl_cookie *ath6kl_cookie;
680 u32 map_no = 0;
681 int status;
682 enum htc_endpoint_id eid;
683 bool wake_event = false;
Kalle Valo71f96ee2011-11-14 19:31:30 +0200684 bool flushing[ATH6KL_VIF_MAX] = {false};
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530685 u8 if_idx;
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530686 struct ath6kl_vif *vif;
Kalle Valobdcd8172011-07-18 00:22:30 +0300687
688 skb_queue_head_init(&skb_queue);
689
690 /* lock the driver as we update internal state */
691 spin_lock_bh(&ar->lock);
692
693 /* reap completed packets */
694 while (!list_empty(packet_queue)) {
695
696 packet = list_first_entry(packet_queue, struct htc_packet,
697 list);
698 list_del(&packet->list);
699
700 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
Vasanthakumar Thiagarajan8114f9b2012-08-14 10:10:34 +0530701 if (WARN_ON_ONCE(!ath6kl_cookie))
702 continue;
Kalle Valobdcd8172011-07-18 00:22:30 +0300703
704 status = packet->status;
705 skb = ath6kl_cookie->skb;
706 eid = packet->endpoint;
707 map_no = ath6kl_cookie->map_no;
708
Vasanthakumar Thiagarajan8114f9b2012-08-14 10:10:34 +0530709 if (WARN_ON_ONCE(!skb || !skb->data)) {
710 dev_kfree_skb(skb);
711 ath6kl_free_cookie(ar, ath6kl_cookie);
712 continue;
713 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300714
Kalle Valobdcd8172011-07-18 00:22:30 +0300715 __skb_queue_tail(&skb_queue, skb);
716
Vasanthakumar Thiagarajan8114f9b2012-08-14 10:10:34 +0530717 if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
718 ath6kl_free_cookie(ar, ath6kl_cookie);
719 continue;
720 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300721
722 ar->tx_pending[eid]--;
723
724 if (eid != ar->ctrl_ep)
725 ar->total_tx_data_pend--;
726
727 if (eid == ar->ctrl_ep) {
728 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
729 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
730
731 if (ar->tx_pending[eid] == 0)
732 wake_event = true;
733 }
734
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530735 if (eid == ar->ctrl_ep) {
736 if_idx = wmi_cmd_hdr_get_if_idx(
Vasanthakumar Thiagarajanf3803eb2011-11-07 12:50:17 +0530737 (struct wmi_cmd_hdr *) packet->buf);
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530738 } else {
739 if_idx = wmi_data_hdr_get_if_idx(
Vasanthakumar Thiagarajanf3803eb2011-11-07 12:50:17 +0530740 (struct wmi_data_hdr *) packet->buf);
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530741 }
742
743 vif = ath6kl_get_vif_by_index(ar, if_idx);
744 if (!vif) {
745 ath6kl_free_cookie(ar, ath6kl_cookie);
746 continue;
747 }
748
Kalle Valobdcd8172011-07-18 00:22:30 +0300749 if (status) {
750 if (status == -ECANCELED)
751 /* a packet was flushed */
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530752 flushing[if_idx] = true;
Kalle Valobdcd8172011-07-18 00:22:30 +0300753
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +0530754 vif->net_stats.tx_errors++;
Kalle Valobdcd8172011-07-18 00:22:30 +0300755
Kalle Valo778e6502011-10-27 18:49:08 +0300756 if (status != -ENOSPC && status != -ECANCELED)
757 ath6kl_warn("tx complete error: %d\n", status);
758
Kalle Valobdcd8172011-07-18 00:22:30 +0300759 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
760 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
761 __func__, skb, packet->buf, packet->act_len,
762 eid, "error!");
763 } else {
764 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
765 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
766 __func__, skb, packet->buf, packet->act_len,
767 eid, "OK");
768
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530769 flushing[if_idx] = false;
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +0530770 vif->net_stats.tx_packets++;
771 vif->net_stats.tx_bytes += skb->len;
Kalle Valobdcd8172011-07-18 00:22:30 +0300772 }
773
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530774 ath6kl_tx_clear_node_map(vif, eid, map_no);
Kalle Valobdcd8172011-07-18 00:22:30 +0300775
776 ath6kl_free_cookie(ar, ath6kl_cookie);
777
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530778 if (test_bit(NETQ_STOPPED, &vif->flags))
779 clear_bit(NETQ_STOPPED, &vif->flags);
Kalle Valobdcd8172011-07-18 00:22:30 +0300780 }
781
782 spin_unlock_bh(&ar->lock);
783
784 __skb_queue_purge(&skb_queue);
785
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530786 /* FIXME: Locking */
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530787 spin_lock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530788 list_for_each_entry(vif, &ar->vif_list, list) {
789 if (test_bit(CONNECTED, &vif->flags) &&
790 !flushing[vif->fw_vif_idx]) {
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530791 spin_unlock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +0530792 netif_wake_queue(vif->ndev);
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530793 spin_lock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530794 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300795 }
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530796 spin_unlock_bh(&ar->list_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300797
798 if (wake_event)
799 wake_up(&ar->event_wq);
800
801 return;
Kalle Valobdcd8172011-07-18 00:22:30 +0300802}
803
804void ath6kl_tx_data_cleanup(struct ath6kl *ar)
805{
806 int i;
807
808 /* flush all the data (non-control) streams */
809 for (i = 0; i < WMM_NUM_AC; i++)
Kalle Vaload226ec2011-08-10 09:49:12 +0300810 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
811 ATH6KL_DATA_PKT_TAG);
Kalle Valobdcd8172011-07-18 00:22:30 +0300812}
813
814/* Rx functions */
815
816static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
817 struct sk_buff *skb)
818{
819 if (!skb)
820 return;
821
822 skb->dev = dev;
823
824 if (!(skb->dev->flags & IFF_UP)) {
825 dev_kfree_skb(skb);
826 return;
827 }
828
829 skb->protocol = eth_type_trans(skb, skb->dev);
830
831 netif_rx_ni(skb);
832}
833
834static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
835{
836 struct sk_buff *skb;
837
838 while (num) {
839 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
840 if (!skb) {
841 ath6kl_err("netbuf allocation failed\n");
842 return;
843 }
844 skb_queue_tail(q, skb);
845 num--;
846 }
847}
848
849static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
850{
851 struct sk_buff *skb = NULL;
852
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +0530853 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
854 (AGGR_NUM_OF_FREE_NETBUFS >> 2))
855 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
856 AGGR_NUM_OF_FREE_NETBUFS);
Kalle Valobdcd8172011-07-18 00:22:30 +0300857
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +0530858 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300859
860 return skb;
861}
862
863void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
864{
865 struct ath6kl *ar = target->dev->ar;
866 struct sk_buff *skb;
867 int rx_buf;
868 int n_buf_refill;
869 struct htc_packet *packet;
870 struct list_head queue;
871
872 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
Kalle Vaload226ec2011-08-10 09:49:12 +0300873 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300874
875 if (n_buf_refill <= 0)
876 return;
877
878 INIT_LIST_HEAD(&queue);
879
880 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
881 "%s: providing htc with %d buffers at eid=%d\n",
882 __func__, n_buf_refill, endpoint);
883
884 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
885 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
886 if (!skb)
887 break;
888
889 packet = (struct htc_packet *) skb->head;
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530890 if (!IS_ALIGNED((unsigned long) skb->data, 4))
891 skb->data = PTR_ALIGN(skb->data - 4, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +0300892 set_htc_rxpkt_info(packet, skb, skb->data,
Kalle Valo96f1fad2012-03-07 20:03:57 +0200893 ATH6KL_BUFFER_SIZE, endpoint);
Kalle Valocfc10f22012-03-25 17:15:24 +0300894 packet->skb = skb;
Kalle Valobdcd8172011-07-18 00:22:30 +0300895 list_add_tail(&packet->list, &queue);
896 }
897
898 if (!list_empty(&queue))
Kalle Vaload226ec2011-08-10 09:49:12 +0300899 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +0300900}
901
902void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
903{
904 struct htc_packet *packet;
905 struct sk_buff *skb;
906
907 while (count) {
908 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
909 if (!skb)
910 return;
911
912 packet = (struct htc_packet *) skb->head;
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530913 if (!IS_ALIGNED((unsigned long) skb->data, 4))
914 skb->data = PTR_ALIGN(skb->data - 4, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +0300915 set_htc_rxpkt_info(packet, skb, skb->data,
916 ATH6KL_AMSDU_BUFFER_SIZE, 0);
Kalle Valocfc10f22012-03-25 17:15:24 +0300917 packet->skb = skb;
918
Kalle Valobdcd8172011-07-18 00:22:30 +0300919 spin_lock_bh(&ar->lock);
920 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
921 spin_unlock_bh(&ar->lock);
922 count--;
923 }
924}
925
926/*
927 * Callback to allocate a receive buffer for a pending packet. We use a
928 * pre-allocated list of buffers of maximum AMSDU size (4K).
929 */
930struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
931 enum htc_endpoint_id endpoint,
932 int len)
933{
934 struct ath6kl *ar = target->dev->ar;
935 struct htc_packet *packet = NULL;
936 struct list_head *pkt_pos;
937 int refill_cnt = 0, depth = 0;
938
939 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
940 __func__, endpoint, len);
941
942 if ((len <= ATH6KL_BUFFER_SIZE) ||
943 (len > ATH6KL_AMSDU_BUFFER_SIZE))
944 return NULL;
945
946 spin_lock_bh(&ar->lock);
947
948 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
949 spin_unlock_bh(&ar->lock);
950 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
951 goto refill_buf;
952 }
953
954 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
955 struct htc_packet, list);
956 list_del(&packet->list);
957 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
958 depth++;
959
960 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
961 spin_unlock_bh(&ar->lock);
962
963 /* set actual endpoint ID */
964 packet->endpoint = endpoint;
965
966refill_buf:
967 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
968 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
969
970 return packet;
971}
972
973static void aggr_slice_amsdu(struct aggr_info *p_aggr,
974 struct rxtid *rxtid, struct sk_buff *skb)
975{
976 struct sk_buff *new_skb;
977 struct ethhdr *hdr;
978 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
979 u8 *framep;
980
981 mac_hdr_len = sizeof(struct ethhdr);
982 framep = skb->data + mac_hdr_len;
983 amsdu_len = skb->len - mac_hdr_len;
984
985 while (amsdu_len > mac_hdr_len) {
986 hdr = (struct ethhdr *) framep;
987 payload_8023_len = ntohs(hdr->h_proto);
988
989 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
990 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
991 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
992 payload_8023_len);
993 break;
994 }
995
996 frame_8023_len = payload_8023_len + mac_hdr_len;
997 new_skb = aggr_get_free_skb(p_aggr);
998 if (!new_skb) {
999 ath6kl_err("no buffer available\n");
1000 break;
1001 }
1002
1003 memcpy(new_skb->data, framep, frame_8023_len);
1004 skb_put(new_skb, frame_8023_len);
1005 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1006 ath6kl_err("dot3_2_dix error\n");
1007 dev_kfree_skb(new_skb);
1008 break;
1009 }
1010
1011 skb_queue_tail(&rxtid->q, new_skb);
1012
1013 /* Is this the last subframe within this aggregate ? */
1014 if ((amsdu_len - frame_8023_len) == 0)
1015 break;
1016
1017 /* Add the length of A-MSDU subframe padding bytes -
1018 * Round to nearest word.
1019 */
Vasanthakumar Thiagarajan13e34ea2011-08-16 11:19:38 +05301020 frame_8023_len = ALIGN(frame_8023_len, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001021
1022 framep += frame_8023_len;
1023 amsdu_len -= frame_8023_len;
1024 }
1025
1026 dev_kfree_skb(skb);
1027}
1028
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301029static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
Kalle Valobdcd8172011-07-18 00:22:30 +03001030 u16 seq_no, u8 order)
1031{
1032 struct sk_buff *skb;
1033 struct rxtid *rxtid;
1034 struct skb_hold_q *node;
1035 u16 idx, idx_end, seq_end;
1036 struct rxtid_stats *stats;
1037
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301038 rxtid = &agg_conn->rx_tid[tid];
1039 stats = &agg_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001040
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301041 spin_lock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001042 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1043
1044 /*
1045 * idx_end is typically the last possible frame in the window,
1046 * but changes to 'the' seq_no, when BAR comes. If seq_no
1047 * is non-zero, we will go up to that and stop.
1048 * Note: last seq no in current window will occupy the same
1049 * index position as index that is just previous to start.
1050 * An imp point : if win_sz is 7, for seq_no space of 4095,
1051 * then, there would be holes when sequence wrap around occurs.
1052 * Target should judiciously choose the win_sz, based on
1053 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1054 * 2, 4, 8, 16 win_sz works fine).
1055 * We must deque from "idx" to "idx_end", including both.
1056 */
1057 seq_end = seq_no ? seq_no : rxtid->seq_next;
1058 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1059
Kalle Valobdcd8172011-07-18 00:22:30 +03001060 do {
1061 node = &rxtid->hold_q[idx];
1062 if ((order == 1) && (!node->skb))
1063 break;
1064
1065 if (node->skb) {
1066 if (node->is_amsdu)
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301067 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1068 node->skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001069 else
1070 skb_queue_tail(&rxtid->q, node->skb);
1071 node->skb = NULL;
1072 } else
1073 stats->num_hole++;
1074
1075 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1076 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1077 } while (idx != idx_end);
1078
1079 spin_unlock_bh(&rxtid->lock);
1080
1081 stats->num_delivered += skb_queue_len(&rxtid->q);
1082
1083 while ((skb = skb_dequeue(&rxtid->q)))
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301084 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001085}
1086
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301087static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
Kalle Valobdcd8172011-07-18 00:22:30 +03001088 u16 seq_no,
1089 bool is_amsdu, struct sk_buff *frame)
1090{
1091 struct rxtid *rxtid;
1092 struct rxtid_stats *stats;
1093 struct sk_buff *skb;
1094 struct skb_hold_q *node;
1095 u16 idx, st, cur, end;
1096 bool is_queued = false;
1097 u16 extended_end;
1098
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301099 rxtid = &agg_conn->rx_tid[tid];
1100 stats = &agg_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001101
1102 stats->num_into_aggr++;
1103
1104 if (!rxtid->aggr) {
1105 if (is_amsdu) {
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301106 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
Kalle Valobdcd8172011-07-18 00:22:30 +03001107 is_queued = true;
1108 stats->num_amsdu++;
1109 while ((skb = skb_dequeue(&rxtid->q)))
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301110 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
Kalle Valobdcd8172011-07-18 00:22:30 +03001111 skb);
1112 }
1113 return is_queued;
1114 }
1115
1116 /* Check the incoming sequence no, if it's in the window */
1117 st = rxtid->seq_next;
1118 cur = seq_no;
1119 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1120
1121 if (((st < end) && (cur < st || cur > end)) ||
1122 ((st > end) && (cur > end) && (cur < st))) {
1123 extended_end = (end + rxtid->hold_q_sz - 1) &
1124 ATH6KL_MAX_SEQ_NO;
1125
1126 if (((end < extended_end) &&
1127 (cur < end || cur > extended_end)) ||
1128 ((end > extended_end) && (cur > extended_end) &&
1129 (cur < end))) {
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301130 aggr_deque_frms(agg_conn, tid, 0, 0);
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301131 spin_lock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001132 if (cur >= rxtid->hold_q_sz - 1)
1133 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1134 else
1135 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1136 (rxtid->hold_q_sz - 2 - cur);
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301137 spin_unlock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001138 } else {
1139 /*
1140 * Dequeue only those frames that are outside the
1141 * new shifted window.
1142 */
1143 if (cur >= rxtid->hold_q_sz - 1)
1144 st = cur - (rxtid->hold_q_sz - 1);
1145 else
1146 st = ATH6KL_MAX_SEQ_NO -
1147 (rxtid->hold_q_sz - 2 - cur);
1148
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301149 aggr_deque_frms(agg_conn, tid, st, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001150 }
1151
1152 stats->num_oow++;
1153 }
1154
1155 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1156
1157 node = &rxtid->hold_q[idx];
1158
1159 spin_lock_bh(&rxtid->lock);
1160
1161 /*
1162 * Is the cur frame duplicate or something beyond our window(hold_q
1163 * -> which is 2x, already)?
1164 *
1165 * 1. Duplicate is easy - drop incoming frame.
1166 * 2. Not falling in current sliding window.
1167 * 2a. is the frame_seq_no preceding current tid_seq_no?
1168 * -> drop the frame. perhaps sender did not get our ACK.
1169 * this is taken care of above.
1170 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1171 * -> Taken care of it above, by moving window forward.
1172 */
1173 dev_kfree_skb(node->skb);
1174 stats->num_dups++;
1175
1176 node->skb = frame;
1177 is_queued = true;
1178 node->is_amsdu = is_amsdu;
1179 node->seq_no = seq_no;
1180
1181 if (node->is_amsdu)
1182 stats->num_amsdu++;
1183 else
1184 stats->num_mpdu++;
1185
1186 spin_unlock_bh(&rxtid->lock);
1187
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301188 aggr_deque_frms(agg_conn, tid, 0, 1);
Kalle Valobdcd8172011-07-18 00:22:30 +03001189
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301190 if (agg_conn->timer_scheduled)
Vasanthakumar Thiagarajan7940bad2012-05-30 12:27:12 +05301191 return is_queued;
1192
1193 spin_lock_bh(&rxtid->lock);
1194 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1195 if (rxtid->hold_q[idx].skb) {
1196 /*
1197 * There is a frame in the queue and no
1198 * timer so start a timer to ensure that
1199 * the frame doesn't remain stuck
1200 * forever.
1201 */
1202 agg_conn->timer_scheduled = true;
1203 mod_timer(&agg_conn->timer,
1204 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1205 rxtid->timer_mon = true;
1206 break;
Kalle Valobdcd8172011-07-18 00:22:30 +03001207 }
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301208 }
Vasanthakumar Thiagarajan7940bad2012-05-30 12:27:12 +05301209 spin_unlock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001210
1211 return is_queued;
1212}
1213
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301214static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1215 struct ath6kl_sta *conn)
1216{
1217 struct ath6kl *ar = vif->ar;
1218 bool is_apsdq_empty, is_apsdq_empty_at_start;
1219 u32 num_frames_to_deliver, flags;
1220 struct sk_buff *skb = NULL;
1221
1222 /*
1223 * If the APSD q for this STA is not empty, dequeue and
1224 * send a pkt from the head of the q. Also update the
1225 * More data bit in the WMI_DATA_HDR if there are
1226 * more pkts for this STA in the APSD q.
1227 * If there are no more pkts for this STA,
1228 * update the APSD bitmap for this STA.
1229 */
1230
1231 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1232 ATH6KL_APSD_FRAME_MASK;
1233 /*
1234 * Number of frames to send in a service period is
1235 * indicated by the station
1236 * in the QOS_INFO of the association request
1237 * If it is zero, send all frames
1238 */
1239 if (!num_frames_to_deliver)
1240 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1241
1242 spin_lock_bh(&conn->psq_lock);
1243 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1244 spin_unlock_bh(&conn->psq_lock);
1245 is_apsdq_empty_at_start = is_apsdq_empty;
1246
1247 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1248
1249 spin_lock_bh(&conn->psq_lock);
1250 skb = skb_dequeue(&conn->apsdq);
1251 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1252 spin_unlock_bh(&conn->psq_lock);
1253
1254 /*
1255 * Set the STA flag to Trigger delivery,
1256 * so that the frame will go out
1257 */
1258 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1259 num_frames_to_deliver--;
1260
1261 /* Last frame in the service period, set EOSP or queue empty */
1262 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1263 conn->sta_flags |= STA_PS_APSD_EOSP;
1264
1265 ath6kl_data_tx(skb, vif->ndev);
1266 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1267 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1268 }
1269
1270 if (is_apsdq_empty) {
1271 if (is_apsdq_empty_at_start)
1272 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1273 else
1274 flags = 0;
1275
1276 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
Kalle Valo96f1fad2012-03-07 20:03:57 +02001277 vif->fw_vif_idx,
1278 conn->aid, 0, flags);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301279 }
1280
1281 return;
1282}
1283
Kalle Valobdcd8172011-07-18 00:22:30 +03001284void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1285{
1286 struct ath6kl *ar = target->dev->ar;
1287 struct sk_buff *skb = packet->pkt_cntxt;
1288 struct wmi_rx_meta_v2 *meta;
1289 struct wmi_data_hdr *dhdr;
1290 int min_hdr_len;
1291 u8 meta_type, dot11_hdr = 0;
Kalle Valo8bd5bca2012-03-25 17:15:25 +03001292 u8 pad_before_data_start;
Kalle Valobdcd8172011-07-18 00:22:30 +03001293 int status = packet->status;
1294 enum htc_endpoint_id ept = packet->endpoint;
1295 bool is_amsdu, prev_ps, ps_state = false;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301296 bool trig_state = false;
Kalle Valobdcd8172011-07-18 00:22:30 +03001297 struct ath6kl_sta *conn = NULL;
1298 struct sk_buff *skb1 = NULL;
1299 struct ethhdr *datap = NULL;
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301300 struct ath6kl_vif *vif;
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301301 struct aggr_info_conn *aggr_conn;
Kalle Valobdcd8172011-07-18 00:22:30 +03001302 u16 seq_no, offset;
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301303 u8 tid, if_idx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001304
1305 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1306 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1307 __func__, ar, ept, skb, packet->buf,
1308 packet->act_len, status);
1309
1310 if (status || !(skb->data + HTC_HDR_LENGTH)) {
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301311 dev_kfree_skb(skb);
1312 return;
1313 }
1314
1315 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1316 skb_pull(skb, HTC_HDR_LENGTH);
1317
Vasanthakumar Thiagarajan81db48d2012-02-28 20:20:22 +05301318 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1319 skb->data, skb->len);
1320
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301321 if (ept == ar->ctrl_ep) {
Vasanthakumar Thiagarajan81db48d2012-02-28 20:20:22 +05301322 if (test_bit(WMI_ENABLED, &ar->flag)) {
1323 ath6kl_check_wow_status(ar);
1324 ath6kl_wmi_control_rx(ar->wmi, skb);
1325 return;
1326 }
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301327 if_idx =
1328 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1329 } else {
1330 if_idx =
1331 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1332 }
1333
1334 vif = ath6kl_get_vif_by_index(ar, if_idx);
1335 if (!vif) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001336 dev_kfree_skb(skb);
1337 return;
1338 }
1339
1340 /*
1341 * Take lock to protect buffer counts and adaptive power throughput
1342 * state.
1343 */
Vasanthakumar Thiagarajan478ac022011-10-25 19:34:19 +05301344 spin_lock_bh(&vif->if_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001345
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +05301346 vif->net_stats.rx_packets++;
1347 vif->net_stats.rx_bytes += packet->act_len;
Kalle Valobdcd8172011-07-18 00:22:30 +03001348
Vasanthakumar Thiagarajan478ac022011-10-25 19:34:19 +05301349 spin_unlock_bh(&vif->if_lock);
Vasanthakumar Thiagarajan83dc5f22011-08-14 17:08:33 +05301350
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301351 skb->dev = vif->ndev;
Kalle Valobdcd8172011-07-18 00:22:30 +03001352
1353 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1354 if (EPPING_ALIGNMENT_PAD > 0)
1355 skb_pull(skb, EPPING_ALIGNMENT_PAD);
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301356 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001357 return;
1358 }
1359
Raja Mania918fb32011-11-07 22:52:46 +02001360 ath6kl_check_wow_status(ar);
1361
Vasanthakumar Thiagarajan67f91782011-08-14 17:08:34 +05301362 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1363 sizeof(struct ath6kl_llc_snap_hdr);
Kalle Valobdcd8172011-07-18 00:22:30 +03001364
1365 dhdr = (struct wmi_data_hdr *) skb->data;
1366
1367 /*
1368 * In the case of AP mode we may receive NULL data frames
1369 * that do not have LLC hdr. They are 16 bytes in size.
1370 * Allow these frames in the AP mode.
1371 */
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +05301372 if (vif->nw_type != AP_NETWORK &&
Kalle Valobdcd8172011-07-18 00:22:30 +03001373 ((packet->act_len < min_hdr_len) ||
1374 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1375 ath6kl_info("frame len is too short or too long\n");
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +05301376 vif->net_stats.rx_errors++;
1377 vif->net_stats.rx_length_errors++;
Kalle Valobdcd8172011-07-18 00:22:30 +03001378 dev_kfree_skb(skb);
1379 return;
1380 }
1381
1382 /* Get the Power save state of the STA */
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +05301383 if (vif->nw_type == AP_NETWORK) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001384 meta_type = wmi_data_hdr_get_meta(dhdr);
1385
1386 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1387 WMI_DATA_HDR_PS_MASK);
1388
1389 offset = sizeof(struct wmi_data_hdr);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301390 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
Kalle Valobdcd8172011-07-18 00:22:30 +03001391
1392 switch (meta_type) {
1393 case 0:
1394 break;
1395 case WMI_META_VERSION_1:
1396 offset += sizeof(struct wmi_rx_meta_v1);
1397 break;
1398 case WMI_META_VERSION_2:
1399 offset += sizeof(struct wmi_rx_meta_v2);
1400 break;
1401 default:
1402 break;
1403 }
1404
1405 datap = (struct ethhdr *) (skb->data + offset);
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301406 conn = ath6kl_find_sta(vif, datap->h_source);
Kalle Valobdcd8172011-07-18 00:22:30 +03001407
1408 if (!conn) {
1409 dev_kfree_skb(skb);
1410 return;
1411 }
1412
1413 /*
1414 * If there is a change in PS state of the STA,
1415 * take appropriate steps:
1416 *
1417 * 1. If Sleep-->Awake, flush the psq for the STA
1418 * Clear the PVB for the STA.
1419 * 2. If Awake-->Sleep, Starting queueing frames
1420 * the STA.
1421 */
1422 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1423
1424 if (ps_state)
1425 conn->sta_flags |= STA_PS_SLEEP;
1426 else
1427 conn->sta_flags &= ~STA_PS_SLEEP;
1428
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301429 /* Accept trigger only when the station is in sleep */
1430 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1431 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1432
Kalle Valobdcd8172011-07-18 00:22:30 +03001433 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1434 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1435 struct sk_buff *skbuff = NULL;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301436 bool is_apsdq_empty;
Naveen Gangadharand0ff7382012-02-08 17:51:36 -08001437 struct ath6kl_mgmt_buff *mgmt;
1438 u8 idx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001439
1440 spin_lock_bh(&conn->psq_lock);
Naveen Gangadharand0ff7382012-02-08 17:51:36 -08001441 while (conn->mgmt_psq_len > 0) {
1442 mgmt = list_first_entry(
1443 &conn->mgmt_psq,
1444 struct ath6kl_mgmt_buff,
1445 list);
1446 list_del(&mgmt->list);
1447 conn->mgmt_psq_len--;
1448 spin_unlock_bh(&conn->psq_lock);
1449 idx = vif->fw_vif_idx;
1450
1451 ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1452 idx,
1453 mgmt->id,
1454 mgmt->freq,
1455 mgmt->wait,
1456 mgmt->buf,
1457 mgmt->len,
1458 mgmt->no_cck);
1459
1460 kfree(mgmt);
1461 spin_lock_bh(&conn->psq_lock);
1462 }
1463 conn->mgmt_psq_len = 0;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301464 while ((skbuff = skb_dequeue(&conn->psq))) {
1465 spin_unlock_bh(&conn->psq_lock);
1466 ath6kl_data_tx(skbuff, vif->ndev);
1467 spin_lock_bh(&conn->psq_lock);
1468 }
1469
1470 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1471 while ((skbuff = skb_dequeue(&conn->apsdq))) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001472 spin_unlock_bh(&conn->psq_lock);
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301473 ath6kl_data_tx(skbuff, vif->ndev);
Kalle Valobdcd8172011-07-18 00:22:30 +03001474 spin_lock_bh(&conn->psq_lock);
1475 }
1476 spin_unlock_bh(&conn->psq_lock);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301477
1478 if (!is_apsdq_empty)
1479 ath6kl_wmi_set_apsd_bfrd_traf(
1480 ar->wmi,
1481 vif->fw_vif_idx,
1482 conn->aid, 0, 0);
1483
Kalle Valobdcd8172011-07-18 00:22:30 +03001484 /* Clear the PVB for this STA */
Vasanthakumar Thiagarajan334234b2011-10-25 19:34:12 +05301485 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1486 conn->aid, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001487 }
1488 }
1489
1490 /* drop NULL data frames here */
1491 if ((packet->act_len < min_hdr_len) ||
1492 (packet->act_len >
1493 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1494 dev_kfree_skb(skb);
1495 return;
1496 }
1497 }
1498
1499 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1500 tid = wmi_data_hdr_get_up(dhdr);
1501 seq_no = wmi_data_hdr_get_seqno(dhdr);
1502 meta_type = wmi_data_hdr_get_meta(dhdr);
1503 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
Kalle Valo8bd5bca2012-03-25 17:15:25 +03001504 pad_before_data_start =
1505 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1506 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1507
Vasanthakumar Thiagarajan594a0bc2011-08-14 17:08:35 +05301508 skb_pull(skb, sizeof(struct wmi_data_hdr));
Kalle Valobdcd8172011-07-18 00:22:30 +03001509
1510 switch (meta_type) {
1511 case WMI_META_VERSION_1:
1512 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1513 break;
1514 case WMI_META_VERSION_2:
1515 meta = (struct wmi_rx_meta_v2 *) skb->data;
1516 if (meta->csum_flags & 0x1) {
1517 skb->ip_summed = CHECKSUM_COMPLETE;
1518 skb->csum = (__force __wsum) meta->csum;
1519 }
1520 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1521 break;
1522 default:
1523 break;
1524 }
1525
Kalle Valo8bd5bca2012-03-25 17:15:25 +03001526 skb_pull(skb, pad_before_data_start);
1527
Kalle Valobdcd8172011-07-18 00:22:30 +03001528 if (dot11_hdr)
1529 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1530 else if (!is_amsdu)
1531 status = ath6kl_wmi_dot3_2_dix(skb);
1532
1533 if (status) {
1534 /*
1535 * Drop frames that could not be processed (lack of
1536 * memory, etc.)
1537 */
1538 dev_kfree_skb(skb);
1539 return;
1540 }
1541
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301542 if (!(vif->ndev->flags & IFF_UP)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001543 dev_kfree_skb(skb);
1544 return;
1545 }
1546
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +05301547 if (vif->nw_type == AP_NETWORK) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001548 datap = (struct ethhdr *) skb->data;
1549 if (is_multicast_ether_addr(datap->h_dest))
1550 /*
1551 * Bcast/Mcast frames should be sent to the
1552 * OS stack as well as on the air.
1553 */
1554 skb1 = skb_copy(skb, GFP_ATOMIC);
1555 else {
1556 /*
1557 * Search for a connected STA with dstMac
1558 * as the Mac address. If found send the
1559 * frame to it on the air else send the
1560 * frame up the stack.
1561 */
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301562 conn = ath6kl_find_sta(vif, datap->h_dest);
Kalle Valobdcd8172011-07-18 00:22:30 +03001563
1564 if (conn && ar->intra_bss) {
1565 skb1 = skb;
1566 skb = NULL;
1567 } else if (conn && !ar->intra_bss) {
1568 dev_kfree_skb(skb);
1569 skb = NULL;
1570 }
1571 }
1572 if (skb1)
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301573 ath6kl_data_tx(skb1, vif->ndev);
Kalle Vaload3f78b2011-10-06 14:32:32 +03001574
1575 if (skb == NULL) {
1576 /* nothing to deliver up the stack */
1577 return;
1578 }
Kalle Valobdcd8172011-07-18 00:22:30 +03001579 }
1580
Kalle Valo5694f9622011-09-19 21:38:44 +03001581 datap = (struct ethhdr *) skb->data;
1582
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301583 if (is_unicast_ether_addr(datap->h_dest)) {
1584 if (vif->nw_type == AP_NETWORK) {
1585 conn = ath6kl_find_sta(vif, datap->h_source);
1586 if (!conn)
1587 return;
1588 aggr_conn = conn->aggr_conn;
1589 } else
1590 aggr_conn = vif->aggr_cntxt->aggr_conn;
1591
1592 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
Kalle Valo96f1fad2012-03-07 20:03:57 +02001593 is_amsdu, skb)) {
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301594 /* aggregation code will handle the skb */
1595 return;
1596 }
Vasanthakumar Thiagarajanb514fab2012-04-03 14:13:46 +05301597 } else if (!is_broadcast_ether_addr(datap->h_dest))
1598 vif->net_stats.multicast++;
Kalle Valo5694f9622011-09-19 21:38:44 +03001599
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301600 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001601}
1602
1603static void aggr_timeout(unsigned long arg)
1604{
1605 u8 i, j;
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301606 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
Kalle Valobdcd8172011-07-18 00:22:30 +03001607 struct rxtid *rxtid;
1608 struct rxtid_stats *stats;
1609
1610 for (i = 0; i < NUM_OF_TIDS; i++) {
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301611 rxtid = &aggr_conn->rx_tid[i];
1612 stats = &aggr_conn->stat[i];
Kalle Valobdcd8172011-07-18 00:22:30 +03001613
Vasanthakumar Thiagarajan7940bad2012-05-30 12:27:12 +05301614 if (!rxtid->aggr || !rxtid->timer_mon)
Kalle Valobdcd8172011-07-18 00:22:30 +03001615 continue;
1616
1617 stats->num_timeouts++;
Kalle Valo37ca6332011-07-21 10:54:26 +03001618 ath6kl_dbg(ATH6KL_DBG_AGGR,
1619 "aggr timeout (st %d end %d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001620 rxtid->seq_next,
1621 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1622 ATH6KL_MAX_SEQ_NO));
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301623 aggr_deque_frms(aggr_conn, i, 0, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001624 }
1625
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301626 aggr_conn->timer_scheduled = false;
Kalle Valobdcd8172011-07-18 00:22:30 +03001627
1628 for (i = 0; i < NUM_OF_TIDS; i++) {
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301629 rxtid = &aggr_conn->rx_tid[i];
Kalle Valobdcd8172011-07-18 00:22:30 +03001630
1631 if (rxtid->aggr && rxtid->hold_q) {
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301632 spin_lock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001633 for (j = 0; j < rxtid->hold_q_sz; j++) {
1634 if (rxtid->hold_q[j].skb) {
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301635 aggr_conn->timer_scheduled = true;
Kalle Valobdcd8172011-07-18 00:22:30 +03001636 rxtid->timer_mon = true;
Kalle Valobdcd8172011-07-18 00:22:30 +03001637 break;
1638 }
1639 }
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301640 spin_unlock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001641
1642 if (j >= rxtid->hold_q_sz)
1643 rxtid->timer_mon = false;
1644 }
1645 }
1646
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301647 if (aggr_conn->timer_scheduled)
1648 mod_timer(&aggr_conn->timer,
Kalle Valobdcd8172011-07-18 00:22:30 +03001649 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1650}
1651
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301652static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
Kalle Valobdcd8172011-07-18 00:22:30 +03001653{
1654 struct rxtid *rxtid;
1655 struct rxtid_stats *stats;
1656
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301657 if (!aggr_conn || tid >= NUM_OF_TIDS)
Kalle Valobdcd8172011-07-18 00:22:30 +03001658 return;
1659
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301660 rxtid = &aggr_conn->rx_tid[tid];
1661 stats = &aggr_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001662
1663 if (rxtid->aggr)
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301664 aggr_deque_frms(aggr_conn, tid, 0, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001665
1666 rxtid->aggr = false;
Kalle Valobdcd8172011-07-18 00:22:30 +03001667 rxtid->timer_mon = false;
1668 rxtid->win_sz = 0;
1669 rxtid->seq_next = 0;
1670 rxtid->hold_q_sz = 0;
1671
1672 kfree(rxtid->hold_q);
1673 rxtid->hold_q = NULL;
1674
1675 memset(stats, 0, sizeof(struct rxtid_stats));
1676}
1677
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301678void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
Vasanthakumar Thiagarajan240d2792011-10-25 19:34:13 +05301679 u8 win_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03001680{
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301681 struct ath6kl_sta *sta;
1682 struct aggr_info_conn *aggr_conn = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +03001683 struct rxtid *rxtid;
1684 struct rxtid_stats *stats;
1685 u16 hold_q_size;
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301686 u8 tid, aid;
Kalle Valobdcd8172011-07-18 00:22:30 +03001687
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301688 if (vif->nw_type == AP_NETWORK) {
1689 aid = ath6kl_get_aid(tid_mux);
1690 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1691 if (sta)
1692 aggr_conn = sta->aggr_conn;
1693 } else
1694 aggr_conn = vif->aggr_cntxt->aggr_conn;
1695
1696 if (!aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001697 return;
1698
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301699 tid = ath6kl_get_tid(tid_mux);
1700 if (tid >= NUM_OF_TIDS)
1701 return;
1702
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301703 rxtid = &aggr_conn->rx_tid[tid];
1704 stats = &aggr_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001705
1706 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1707 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1708 __func__, win_sz, tid);
1709
1710 if (rxtid->aggr)
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301711 aggr_delete_tid_state(aggr_conn, tid);
Kalle Valobdcd8172011-07-18 00:22:30 +03001712
1713 rxtid->seq_next = seq_no;
1714 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1715 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1716 if (!rxtid->hold_q)
1717 return;
1718
1719 rxtid->win_sz = win_sz;
1720 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1721 if (!skb_queue_empty(&rxtid->q))
1722 return;
1723
1724 rxtid->aggr = true;
1725}
1726
Vasanthakumar Thiagarajanc8651542012-01-26 13:17:18 +05301727void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1728 struct aggr_info_conn *aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001729{
Kalle Valobdcd8172011-07-18 00:22:30 +03001730 struct rxtid *rxtid;
1731 u8 i;
1732
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301733 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1734 aggr_conn->dev = vif->ndev;
1735 init_timer(&aggr_conn->timer);
1736 aggr_conn->timer.function = aggr_timeout;
1737 aggr_conn->timer.data = (unsigned long) aggr_conn;
Vasanthakumar Thiagarajanc8651542012-01-26 13:17:18 +05301738 aggr_conn->aggr_info = aggr_info;
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301739
1740 aggr_conn->timer_scheduled = false;
1741
1742 for (i = 0; i < NUM_OF_TIDS; i++) {
1743 rxtid = &aggr_conn->rx_tid[i];
1744 rxtid->aggr = false;
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301745 rxtid->timer_mon = false;
1746 skb_queue_head_init(&rxtid->q);
1747 spin_lock_init(&rxtid->lock);
1748 }
1749
1750}
1751
1752struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1753{
1754 struct aggr_info *p_aggr = NULL;
1755
Kalle Valobdcd8172011-07-18 00:22:30 +03001756 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1757 if (!p_aggr) {
1758 ath6kl_err("failed to alloc memory for aggr_node\n");
1759 return NULL;
1760 }
1761
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301762 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1763 if (!p_aggr->aggr_conn) {
1764 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1765 kfree(p_aggr);
1766 return NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +03001767 }
1768
Vasanthakumar Thiagarajanc8651542012-01-26 13:17:18 +05301769 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301770
1771 skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1772 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1773
Kalle Valobdcd8172011-07-18 00:22:30 +03001774 return p_aggr;
1775}
1776
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301777void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
Kalle Valobdcd8172011-07-18 00:22:30 +03001778{
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301779 struct ath6kl_sta *sta;
Kalle Valobdcd8172011-07-18 00:22:30 +03001780 struct rxtid *rxtid;
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301781 struct aggr_info_conn *aggr_conn = NULL;
1782 u8 tid, aid;
Kalle Valobdcd8172011-07-18 00:22:30 +03001783
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301784 if (vif->nw_type == AP_NETWORK) {
1785 aid = ath6kl_get_aid(tid_mux);
1786 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1787 if (sta)
1788 aggr_conn = sta->aggr_conn;
1789 } else
1790 aggr_conn = vif->aggr_cntxt->aggr_conn;
1791
1792 if (!aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001793 return;
1794
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301795 tid = ath6kl_get_tid(tid_mux);
1796 if (tid >= NUM_OF_TIDS)
1797 return;
1798
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301799 rxtid = &aggr_conn->rx_tid[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001800
1801 if (rxtid->aggr)
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301802 aggr_delete_tid_state(aggr_conn, tid);
Kalle Valobdcd8172011-07-18 00:22:30 +03001803}
1804
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301805void aggr_reset_state(struct aggr_info_conn *aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001806{
1807 u8 tid;
1808
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301809 if (!aggr_conn)
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301810 return;
1811
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301812 if (aggr_conn->timer_scheduled) {
1813 del_timer(&aggr_conn->timer);
1814 aggr_conn->timer_scheduled = false;
Vasanthakumar Thiagarajan7a950ea2012-01-21 15:22:48 +05301815 }
1816
Kalle Valobdcd8172011-07-18 00:22:30 +03001817 for (tid = 0; tid < NUM_OF_TIDS; tid++)
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301818 aggr_delete_tid_state(aggr_conn, tid);
Kalle Valobdcd8172011-07-18 00:22:30 +03001819}
1820
1821/* clean up our amsdu buffer list */
1822void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1823{
1824 struct htc_packet *packet, *tmp_pkt;
1825
1826 spin_lock_bh(&ar->lock);
1827 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1828 spin_unlock_bh(&ar->lock);
1829 return;
1830 }
1831
1832 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1833 list) {
1834 list_del(&packet->list);
1835 spin_unlock_bh(&ar->lock);
1836 dev_kfree_skb(packet->pkt_cntxt);
1837 spin_lock_bh(&ar->lock);
1838 }
1839
1840 spin_unlock_bh(&ar->lock);
1841}
1842
1843void aggr_module_destroy(struct aggr_info *aggr_info)
1844{
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301845 if (!aggr_info)
Kalle Valobdcd8172011-07-18 00:22:30 +03001846 return;
1847
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301848 aggr_reset_state(aggr_info->aggr_conn);
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301849 skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1850 kfree(aggr_info->aggr_conn);
Kalle Valobdcd8172011-07-18 00:22:30 +03001851 kfree(aggr_info);
1852}