blob: cf4380d573c4a47b05320f7ae5958577e2a7a821 [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
Vasanthakumar Thiagarajan1b2df402012-02-06 20:15:53 +05303 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
Kalle Valobdcd8172011-07-18 00:22:30 +03004 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Joe Perches516304b2012-03-18 17:30:52 -070018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Kalle Valobdcd8172011-07-18 00:22:30 +030020#include "core.h"
21#include "debug.h"
Kalle Valoe76ac2bf2012-03-25 17:15:27 +030022#include "htc-ops.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030023
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +053024/*
25 * tid - tid_mux0..tid_mux3
26 * aid - tid_mux4..tid_mux7
27 */
28#define ATH6KL_TID_MASK 0xf
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +053029#define ATH6KL_AID_SHIFT 4
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +053030
31static inline u8 ath6kl_get_tid(u8 tid_mux)
32{
33 return tid_mux & ATH6KL_TID_MASK;
34}
35
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +053036static inline u8 ath6kl_get_aid(u8 tid_mux)
37{
38 return tid_mux >> ATH6KL_AID_SHIFT;
39}
40
Kalle Valobdcd8172011-07-18 00:22:30 +030041static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
42 u32 *map_no)
43{
44 struct ath6kl *ar = ath6kl_priv(dev);
45 struct ethhdr *eth_hdr;
46 u32 i, ep_map = -1;
47 u8 *datap;
48
49 *map_no = 0;
50 datap = skb->data;
51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
52
53 if (is_multicast_ether_addr(eth_hdr->h_dest))
54 return ENDPOINT_2;
55
56 for (i = 0; i < ar->node_num; i++) {
57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
58 ETH_ALEN) == 0) {
59 *map_no = i + 1;
60 ar->node_map[i].tx_pend++;
61 return ar->node_map[i].ep_id;
62 }
63
64 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
65 ep_map = i;
66 }
67
68 if (ep_map == -1) {
69 ep_map = ar->node_num;
70 ar->node_num++;
71 if (ar->node_num > MAX_NODE_NUM)
72 return ENDPOINT_UNUSED;
73 }
74
75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
76
77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
78 if (!ar->tx_pending[i]) {
79 ar->node_map[ep_map].ep_id = i;
80 break;
81 }
82
83 /*
84 * No free endpoint is available, start redistribution on
85 * the inuse endpoints.
86 */
87 if (i == ENDPOINT_5) {
88 ar->node_map[ep_map].ep_id = ar->next_ep_id;
89 ar->next_ep_id++;
90 if (ar->next_ep_id > ENDPOINT_5)
91 ar->next_ep_id = ENDPOINT_2;
92 }
93 }
94
95 *map_no = ep_map + 1;
96 ar->node_map[ep_map].tx_pend++;
97
98 return ar->node_map[ep_map].ep_id;
99}
100
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530101static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
102 struct ath6kl_vif *vif,
103 struct sk_buff *skb,
104 u32 *flags)
105{
106 struct ath6kl *ar = vif->ar;
107 bool is_apsdq_empty = false;
108 struct ethhdr *datap = (struct ethhdr *) skb->data;
Kalle Valoe5726022012-01-17 15:05:46 +0200109 u8 up = 0, traffic_class, *ip_hdr;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530110 u16 ether_type;
111 struct ath6kl_llc_snap_hdr *llc_hdr;
112
113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
114 /*
115 * This tx is because of a uAPSD trigger, determine
116 * more and EOSP bit. Set EOSP if queue is empty
117 * or sufficient frames are delivered for this trigger.
118 */
119 spin_lock_bh(&conn->psq_lock);
120 if (!skb_queue_empty(&conn->apsdq))
121 *flags |= WMI_DATA_HDR_FLAGS_MORE;
122 else if (conn->sta_flags & STA_PS_APSD_EOSP)
123 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
125 spin_unlock_bh(&conn->psq_lock);
126 return false;
127 } else if (!conn->apsd_info)
128 return false;
129
130 if (test_bit(WMM_ENABLED, &vif->flags)) {
131 ether_type = be16_to_cpu(datap->h_proto);
132 if (is_ethertype(ether_type)) {
133 /* packet is in DIX format */
134 ip_hdr = (u8 *)(datap + 1);
135 } else {
136 /* packet is in 802.3 format */
137 llc_hdr = (struct ath6kl_llc_snap_hdr *)
138 (datap + 1);
139 ether_type = be16_to_cpu(llc_hdr->eth_type);
140 ip_hdr = (u8 *)(llc_hdr + 1);
141 }
142
143 if (ether_type == IP_ETHERTYPE)
144 up = ath6kl_wmi_determine_user_priority(
145 ip_hdr, 0);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530146 }
147
148 traffic_class = ath6kl_wmi_get_traffic_class(up);
149
150 if ((conn->apsd_info & (1 << traffic_class)) == 0)
151 return false;
152
153 /* Queue the frames if the STA is sleeping */
154 spin_lock_bh(&conn->psq_lock);
155 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
156 skb_queue_tail(&conn->apsdq, skb);
157 spin_unlock_bh(&conn->psq_lock);
158
159 /*
160 * If this is the first pkt getting queued
161 * for this STA, update the PVB for this STA
162 */
163 if (is_apsdq_empty) {
164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
Kalle Valo96f1fad2012-03-07 20:03:57 +0200165 vif->fw_vif_idx,
166 conn->aid, 1, 0);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530167 }
168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
169
170 return true;
171}
172
173static bool ath6kl_process_psq(struct ath6kl_sta *conn,
174 struct ath6kl_vif *vif,
175 struct sk_buff *skb,
176 u32 *flags)
177{
178 bool is_psq_empty = false;
179 struct ath6kl *ar = vif->ar;
180
181 if (conn->sta_flags & STA_PS_POLLED) {
182 spin_lock_bh(&conn->psq_lock);
183 if (!skb_queue_empty(&conn->psq))
184 *flags |= WMI_DATA_HDR_FLAGS_MORE;
185 spin_unlock_bh(&conn->psq_lock);
186 return false;
187 }
188
189 /* Queue the frames if the STA is sleeping */
190 spin_lock_bh(&conn->psq_lock);
191 is_psq_empty = skb_queue_empty(&conn->psq);
192 skb_queue_tail(&conn->psq, skb);
193 spin_unlock_bh(&conn->psq_lock);
194
195 /*
196 * If this is the first pkt getting queued
197 * for this STA, update the PVB for this
198 * STA.
199 */
200 if (is_psq_empty)
201 ath6kl_wmi_set_pvb_cmd(ar->wmi,
202 vif->fw_vif_idx,
203 conn->aid, 1);
204 return true;
205}
206
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530207static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530208 u32 *flags)
Kalle Valobdcd8172011-07-18 00:22:30 +0300209{
210 struct ethhdr *datap = (struct ethhdr *) skb->data;
211 struct ath6kl_sta *conn = NULL;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530212 bool ps_queued = false;
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530213 struct ath6kl *ar = vif->ar;
Kalle Valobdcd8172011-07-18 00:22:30 +0300214
215 if (is_multicast_ether_addr(datap->h_dest)) {
216 u8 ctr = 0;
217 bool q_mcast = false;
218
219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
221 q_mcast = true;
222 break;
223 }
224 }
225
226 if (q_mcast) {
227 /*
228 * If this transmit is not because of a Dtim Expiry
229 * q it.
230 */
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300232 bool is_mcastq_empty = false;
233
234 spin_lock_bh(&ar->mcastpsq_lock);
235 is_mcastq_empty =
236 skb_queue_empty(&ar->mcastpsq);
237 skb_queue_tail(&ar->mcastpsq, skb);
238 spin_unlock_bh(&ar->mcastpsq_lock);
239
240 /*
241 * If this is the first Mcast pkt getting
242 * queued indicate to the target to set the
243 * BitmapControl LSB of the TIM IE.
244 */
245 if (is_mcastq_empty)
246 ath6kl_wmi_set_pvb_cmd(ar->wmi,
Vasanthakumar Thiagarajan334234b2011-10-25 19:34:12 +0530247 vif->fw_vif_idx,
Kalle Valobdcd8172011-07-18 00:22:30 +0300248 MCAST_AID, 1);
249
250 ps_queued = true;
251 } else {
252 /*
253 * This transmit is because of Dtim expiry.
254 * Determine if MoreData bit has to be set.
255 */
256 spin_lock_bh(&ar->mcastpsq_lock);
257 if (!skb_queue_empty(&ar->mcastpsq))
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530258 *flags |= WMI_DATA_HDR_FLAGS_MORE;
Kalle Valobdcd8172011-07-18 00:22:30 +0300259 spin_unlock_bh(&ar->mcastpsq_lock);
260 }
261 }
262 } else {
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530263 conn = ath6kl_find_sta(vif, datap->h_dest);
Kalle Valobdcd8172011-07-18 00:22:30 +0300264 if (!conn) {
265 dev_kfree_skb(skb);
266
267 /* Inform the caller that the skb is consumed */
268 return true;
269 }
270
271 if (conn->sta_flags & STA_PS_SLEEP) {
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530272 ps_queued = ath6kl_process_uapsdq(conn,
273 vif, skb, flags);
274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
275 ps_queued = ath6kl_process_psq(conn,
276 vif, skb, flags);
Kalle Valobdcd8172011-07-18 00:22:30 +0300277 }
278 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300279 return ps_queued;
280}
281
282/* Tx functions */
283
284int ath6kl_control_tx(void *devt, struct sk_buff *skb,
285 enum htc_endpoint_id eid)
286{
287 struct ath6kl *ar = devt;
288 int status = 0;
289 struct ath6kl_cookie *cookie = NULL;
290
Vasanthakumar Thiagarajan58109df2012-09-11 12:07:00 +0530291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
Vasanthakumar Thiagarajan0616dc12012-08-14 10:10:33 +0530292 dev_kfree_skb(skb);
Raja Mani390a8c82012-03-07 11:35:04 +0530293 return -EACCES;
Vasanthakumar Thiagarajan0616dc12012-08-14 10:10:33 +0530294 }
Raja Mani390a8c82012-03-07 11:35:04 +0530295
Raja Mani363f1492012-09-21 15:08:55 +0530296 if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
297 eid >= ENDPOINT_MAX)) {
298 status = -EINVAL;
299 goto fail_ctrl_tx;
300 }
301
Kalle Valobdcd8172011-07-18 00:22:30 +0300302 spin_lock_bh(&ar->lock);
303
304 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
305 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
306 skb, skb->len, eid);
307
308 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
309 /*
310 * Control endpoint is full, don't allocate resources, we
311 * are just going to drop this packet.
312 */
313 cookie = NULL;
314 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
315 skb, skb->len);
316 } else
317 cookie = ath6kl_alloc_cookie(ar);
318
319 if (cookie == NULL) {
320 spin_unlock_bh(&ar->lock);
321 status = -ENOMEM;
322 goto fail_ctrl_tx;
323 }
324
325 ar->tx_pending[eid]++;
326
327 if (eid != ar->ctrl_ep)
328 ar->total_tx_data_pend++;
329
330 spin_unlock_bh(&ar->lock);
331
332 cookie->skb = skb;
333 cookie->map_no = 0;
334 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
335 eid, ATH6KL_CONTROL_PKT_TAG);
Kalle Valocfc10f22012-03-25 17:15:24 +0300336 cookie->htc_pkt.skb = skb;
Kalle Valobdcd8172011-07-18 00:22:30 +0300337
338 /*
339 * This interface is asynchronous, if there is an error, cleanup
340 * will happen in the TX completion callback.
341 */
Kalle Vaload226ec2011-08-10 09:49:12 +0300342 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300343
344 return 0;
345
346fail_ctrl_tx:
347 dev_kfree_skb(skb);
348 return status;
349}
350
351int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
352{
353 struct ath6kl *ar = ath6kl_priv(dev);
354 struct ath6kl_cookie *cookie = NULL;
355 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530356 struct ath6kl_vif *vif = netdev_priv(dev);
Kalle Valobdcd8172011-07-18 00:22:30 +0300357 u32 map_no = 0;
358 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
359 u8 ac = 99 ; /* initialize to unmapped ac */
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530360 bool chk_adhoc_ps_mapping = false;
Kalle Valobdcd8172011-07-18 00:22:30 +0300361 int ret;
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800362 struct wmi_tx_meta_v2 meta_v2;
363 void *meta;
364 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
365 u8 meta_ver = 0;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530366 u32 flags = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300367
368 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
369 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
370 skb, skb->data, skb->len);
371
372 /* If target is not associated */
Vasanthakumar Thiagarajan1881ced2012-04-26 20:26:13 +0530373 if (!test_bit(CONNECTED, &vif->flags))
374 goto fail_tx;
Kalle Valobdcd8172011-07-18 00:22:30 +0300375
Vasanthakumar Thiagarajan1881ced2012-04-26 20:26:13 +0530376 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
377 goto fail_tx;
Raja Mani390a8c82012-03-07 11:35:04 +0530378
Kalle Valobdcd8172011-07-18 00:22:30 +0300379 if (!test_bit(WMI_READY, &ar->flag))
380 goto fail_tx;
381
382 /* AP mode Power saving processing */
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +0530383 if (vif->nw_type == AP_NETWORK) {
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530384 if (ath6kl_powersave_ap(vif, skb, &flags))
Kalle Valobdcd8172011-07-18 00:22:30 +0300385 return 0;
386 }
387
388 if (test_bit(WMI_ENABLED, &ar->flag)) {
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800389 if ((dev->features & NETIF_F_IP_CSUM) &&
Kalle Valo96f1fad2012-03-07 20:03:57 +0200390 (csum == CHECKSUM_PARTIAL)) {
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800391 csum_start = skb->csum_start -
392 (skb_network_header(skb) - skb->head) +
393 sizeof(struct ath6kl_llc_snap_hdr);
394 csum_dest = skb->csum_offset + csum_start;
395 }
396
Kalle Valobdcd8172011-07-18 00:22:30 +0300397 if (skb_headroom(skb) < dev->needed_headroom) {
Vasanthakumar Thiagarajana29517c2011-11-04 15:48:51 +0530398 struct sk_buff *tmp_skb = skb;
399
400 skb = skb_realloc_headroom(skb, dev->needed_headroom);
401 kfree_skb(tmp_skb);
402 if (skb == NULL) {
403 vif->net_stats.tx_dropped++;
404 return 0;
405 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300406 }
407
408 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
409 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
410 goto fail_tx;
411 }
412
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800413 if ((dev->features & NETIF_F_IP_CSUM) &&
Kalle Valo96f1fad2012-03-07 20:03:57 +0200414 (csum == CHECKSUM_PARTIAL)) {
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800415 meta_v2.csum_start = csum_start;
416 meta_v2.csum_dest = csum_dest;
417
418 /* instruct target to calculate checksum */
419 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
420 meta_ver = WMI_META_VERSION_2;
421 meta = &meta_v2;
422 } else {
423 meta_ver = 0;
424 meta = NULL;
425 }
426
427 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +0530428 DATA_MSGTYPE, flags, 0,
Rishi Panjwanibc48ad32011-12-27 14:28:00 -0800429 meta_ver,
430 meta, vif->fw_vif_idx);
431
432 if (ret) {
433 ath6kl_warn("failed to add wmi data header:%d\n"
434 , ret);
Kalle Valobdcd8172011-07-18 00:22:30 +0300435 goto fail_tx;
436 }
437
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +0530438 if ((vif->nw_type == ADHOC_NETWORK) &&
Kalle Valo96f1fad2012-03-07 20:03:57 +0200439 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
Kalle Valobdcd8172011-07-18 00:22:30 +0300440 chk_adhoc_ps_mapping = true;
441 else {
442 /* get the stream mapping */
Vasanthakumar Thiagarajan240d2792011-10-25 19:34:13 +0530443 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
444 vif->fw_vif_idx, skb,
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530445 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
Kalle Valobdcd8172011-07-18 00:22:30 +0300446 if (ret)
447 goto fail_tx;
448 }
449 } else
450 goto fail_tx;
451
452 spin_lock_bh(&ar->lock);
453
454 if (chk_adhoc_ps_mapping)
455 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
456 else
457 eid = ar->ac2ep_map[ac];
458
459 if (eid == 0 || eid == ENDPOINT_UNUSED) {
460 ath6kl_err("eid %d is not mapped!\n", eid);
461 spin_unlock_bh(&ar->lock);
462 goto fail_tx;
463 }
464
465 /* allocate resource for this packet */
466 cookie = ath6kl_alloc_cookie(ar);
467
468 if (!cookie) {
469 spin_unlock_bh(&ar->lock);
470 goto fail_tx;
471 }
472
473 /* update counts while the lock is held */
474 ar->tx_pending[eid]++;
475 ar->total_tx_data_pend++;
476
477 spin_unlock_bh(&ar->lock);
478
Jouni Malinen00b1edf2011-09-27 11:00:08 +0300479 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
480 skb_cloned(skb)) {
481 /*
482 * We will touch (move the buffer data to align it. Since the
483 * skb buffer is cloned and not only the header is changed, we
484 * have to copy it to allow the changes. Since we are copying
485 * the data here, we may as well align it by reserving suitable
486 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
487 */
488 struct sk_buff *nskb;
489
490 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
491 if (nskb == NULL)
492 goto fail_tx;
493 kfree_skb(skb);
494 skb = nskb;
495 }
496
Kalle Valobdcd8172011-07-18 00:22:30 +0300497 cookie->skb = skb;
498 cookie->map_no = map_no;
499 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
500 eid, htc_tag);
Kalle Valocfc10f22012-03-25 17:15:24 +0300501 cookie->htc_pkt.skb = skb;
Kalle Valobdcd8172011-07-18 00:22:30 +0300502
Kalle Valoef094102011-09-27 14:30:45 +0300503 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
504 skb->data, skb->len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300505
506 /*
507 * HTC interface is asynchronous, if this fails, cleanup will
508 * happen in the ath6kl_tx_complete callback.
509 */
Kalle Vaload226ec2011-08-10 09:49:12 +0300510 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300511
512 return 0;
513
514fail_tx:
515 dev_kfree_skb(skb);
516
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +0530517 vif->net_stats.tx_dropped++;
518 vif->net_stats.tx_aborted_errors++;
Kalle Valobdcd8172011-07-18 00:22:30 +0300519
520 return 0;
521}
522
523/* indicate tx activity or inactivity on a WMI stream */
524void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
525{
526 struct ath6kl *ar = devt;
527 enum htc_endpoint_id eid;
528 int i;
529
530 eid = ar->ac2ep_map[traffic_class];
531
532 if (!test_bit(WMI_ENABLED, &ar->flag))
533 goto notify_htc;
534
535 spin_lock_bh(&ar->lock);
536
537 ar->ac_stream_active[traffic_class] = active;
538
539 if (active) {
540 /*
541 * Keep track of the active stream with the highest
542 * priority.
543 */
544 if (ar->ac_stream_pri_map[traffic_class] >
545 ar->hiac_stream_active_pri)
546 /* set the new highest active priority */
547 ar->hiac_stream_active_pri =
548 ar->ac_stream_pri_map[traffic_class];
549
550 } else {
551 /*
552 * We may have to search for the next active stream
553 * that is the highest priority.
554 */
555 if (ar->hiac_stream_active_pri ==
556 ar->ac_stream_pri_map[traffic_class]) {
557 /*
558 * The highest priority stream just went inactive
559 * reset and search for the "next" highest "active"
560 * priority stream.
561 */
562 ar->hiac_stream_active_pri = 0;
563
564 for (i = 0; i < WMM_NUM_AC; i++) {
565 if (ar->ac_stream_active[i] &&
566 (ar->ac_stream_pri_map[i] >
567 ar->hiac_stream_active_pri))
568 /*
569 * Set the new highest active
570 * priority.
571 */
572 ar->hiac_stream_active_pri =
573 ar->ac_stream_pri_map[i];
574 }
575 }
576 }
577
578 spin_unlock_bh(&ar->lock);
579
580notify_htc:
581 /* notify HTC, this may cause credit distribution changes */
Kalle Valoe76ac2bf2012-03-25 17:15:27 +0300582 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
Kalle Valobdcd8172011-07-18 00:22:30 +0300583}
584
585enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
586 struct htc_packet *packet)
587{
588 struct ath6kl *ar = target->dev->ar;
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530589 struct ath6kl_vif *vif;
Kalle Valobdcd8172011-07-18 00:22:30 +0300590 enum htc_endpoint_id endpoint = packet->endpoint;
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530591 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
Kalle Valobdcd8172011-07-18 00:22:30 +0300592
593 if (endpoint == ar->ctrl_ep) {
594 /*
595 * Under normal WMI if this is getting full, then something
596 * is running rampant the host should not be exhausting the
597 * WMI queue with too many commands the only exception to
598 * this is during testing using endpointping.
599 */
Kalle Valobdcd8172011-07-18 00:22:30 +0300600 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
Kalle Valobdcd8172011-07-18 00:22:30 +0300601 ath6kl_err("wmi ctrl ep is full\n");
Vasanthakumar Thiagarajan77565792012-08-29 19:40:28 +0530602 ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
Vasanthakumar Thiagarajan901db392011-11-08 20:01:25 +0530603 return action;
Kalle Valobdcd8172011-07-18 00:22:30 +0300604 }
605
606 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
Vasanthakumar Thiagarajan901db392011-11-08 20:01:25 +0530607 return action;
Kalle Valobdcd8172011-07-18 00:22:30 +0300608
609 /*
610 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
611 * the highest active stream.
612 */
613 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
614 ar->hiac_stream_active_pri &&
Chilam Ng0ea10f2b2012-02-09 02:17:01 -0800615 ar->cookie_count <=
616 target->endpoint[endpoint].tx_drop_packet_threshold)
Kalle Valobdcd8172011-07-18 00:22:30 +0300617 /*
618 * Give preference to the highest priority stream by
619 * dropping the packets which overflowed.
620 */
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530621 action = HTC_SEND_FULL_DROP;
Kalle Valobdcd8172011-07-18 00:22:30 +0300622
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530623 /* FIXME: Locking */
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530624 spin_lock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530625 list_for_each_entry(vif, &ar->vif_list, list) {
Vasanthakumar Thiagarajan901db392011-11-08 20:01:25 +0530626 if (vif->nw_type == ADHOC_NETWORK ||
627 action != HTC_SEND_FULL_DROP) {
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530628 spin_unlock_bh(&ar->list_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300629
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530630 set_bit(NETQ_STOPPED, &vif->flags);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530631 netif_stop_queue(vif->ndev);
632
633 return action;
634 }
635 }
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530636 spin_unlock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530637
638 return action;
Kalle Valobdcd8172011-07-18 00:22:30 +0300639}
640
641/* TODO this needs to be looked at */
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530642static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
Kalle Valobdcd8172011-07-18 00:22:30 +0300643 enum htc_endpoint_id eid, u32 map_no)
644{
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530645 struct ath6kl *ar = vif->ar;
Kalle Valobdcd8172011-07-18 00:22:30 +0300646 u32 i;
647
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +0530648 if (vif->nw_type != ADHOC_NETWORK)
Kalle Valobdcd8172011-07-18 00:22:30 +0300649 return;
650
651 if (!ar->ibss_ps_enable)
652 return;
653
654 if (eid == ar->ctrl_ep)
655 return;
656
657 if (map_no == 0)
658 return;
659
660 map_no--;
661 ar->node_map[map_no].tx_pend--;
662
663 if (ar->node_map[map_no].tx_pend)
664 return;
665
666 if (map_no != (ar->node_num - 1))
667 return;
668
669 for (i = ar->node_num; i > 0; i--) {
670 if (ar->node_map[i - 1].tx_pend)
671 break;
672
673 memset(&ar->node_map[i - 1], 0,
674 sizeof(struct ath6kl_node_mapping));
675 ar->node_num--;
676 }
677}
678
Kalle Valo63de1112012-03-25 17:15:22 +0300679void ath6kl_tx_complete(struct htc_target *target,
680 struct list_head *packet_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300681{
Kalle Valo63de1112012-03-25 17:15:22 +0300682 struct ath6kl *ar = target->dev->ar;
Kalle Valobdcd8172011-07-18 00:22:30 +0300683 struct sk_buff_head skb_queue;
684 struct htc_packet *packet;
685 struct sk_buff *skb;
686 struct ath6kl_cookie *ath6kl_cookie;
687 u32 map_no = 0;
688 int status;
689 enum htc_endpoint_id eid;
690 bool wake_event = false;
Kalle Valo71f96ee2011-11-14 19:31:30 +0200691 bool flushing[ATH6KL_VIF_MAX] = {false};
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530692 u8 if_idx;
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530693 struct ath6kl_vif *vif;
Kalle Valobdcd8172011-07-18 00:22:30 +0300694
695 skb_queue_head_init(&skb_queue);
696
697 /* lock the driver as we update internal state */
698 spin_lock_bh(&ar->lock);
699
700 /* reap completed packets */
701 while (!list_empty(packet_queue)) {
702
703 packet = list_first_entry(packet_queue, struct htc_packet,
704 list);
705 list_del(&packet->list);
706
707 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
Vasanthakumar Thiagarajan8114f9b2012-08-14 10:10:34 +0530708 if (WARN_ON_ONCE(!ath6kl_cookie))
709 continue;
Kalle Valobdcd8172011-07-18 00:22:30 +0300710
711 status = packet->status;
712 skb = ath6kl_cookie->skb;
713 eid = packet->endpoint;
714 map_no = ath6kl_cookie->map_no;
715
Vasanthakumar Thiagarajan8114f9b2012-08-14 10:10:34 +0530716 if (WARN_ON_ONCE(!skb || !skb->data)) {
717 dev_kfree_skb(skb);
718 ath6kl_free_cookie(ar, ath6kl_cookie);
719 continue;
720 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300721
Kalle Valobdcd8172011-07-18 00:22:30 +0300722 __skb_queue_tail(&skb_queue, skb);
723
Vasanthakumar Thiagarajan8114f9b2012-08-14 10:10:34 +0530724 if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
725 ath6kl_free_cookie(ar, ath6kl_cookie);
726 continue;
727 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300728
729 ar->tx_pending[eid]--;
730
731 if (eid != ar->ctrl_ep)
732 ar->total_tx_data_pend--;
733
734 if (eid == ar->ctrl_ep) {
735 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
736 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
737
738 if (ar->tx_pending[eid] == 0)
739 wake_event = true;
740 }
741
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530742 if (eid == ar->ctrl_ep) {
743 if_idx = wmi_cmd_hdr_get_if_idx(
Vasanthakumar Thiagarajanf3803eb2011-11-07 12:50:17 +0530744 (struct wmi_cmd_hdr *) packet->buf);
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530745 } else {
746 if_idx = wmi_data_hdr_get_if_idx(
Vasanthakumar Thiagarajanf3803eb2011-11-07 12:50:17 +0530747 (struct wmi_data_hdr *) packet->buf);
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +0530748 }
749
750 vif = ath6kl_get_vif_by_index(ar, if_idx);
751 if (!vif) {
752 ath6kl_free_cookie(ar, ath6kl_cookie);
753 continue;
754 }
755
Kalle Valobdcd8172011-07-18 00:22:30 +0300756 if (status) {
757 if (status == -ECANCELED)
758 /* a packet was flushed */
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530759 flushing[if_idx] = true;
Kalle Valobdcd8172011-07-18 00:22:30 +0300760
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +0530761 vif->net_stats.tx_errors++;
Kalle Valobdcd8172011-07-18 00:22:30 +0300762
Kalle Valo778e6502011-10-27 18:49:08 +0300763 if (status != -ENOSPC && status != -ECANCELED)
764 ath6kl_warn("tx complete error: %d\n", status);
765
Kalle Valobdcd8172011-07-18 00:22:30 +0300766 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
767 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
768 __func__, skb, packet->buf, packet->act_len,
769 eid, "error!");
770 } else {
771 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
772 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
773 __func__, skb, packet->buf, packet->act_len,
774 eid, "OK");
775
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530776 flushing[if_idx] = false;
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +0530777 vif->net_stats.tx_packets++;
778 vif->net_stats.tx_bytes += skb->len;
Kalle Valobdcd8172011-07-18 00:22:30 +0300779 }
780
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530781 ath6kl_tx_clear_node_map(vif, eid, map_no);
Kalle Valobdcd8172011-07-18 00:22:30 +0300782
783 ath6kl_free_cookie(ar, ath6kl_cookie);
784
Vasanthakumar Thiagarajan59c98442011-10-25 19:34:01 +0530785 if (test_bit(NETQ_STOPPED, &vif->flags))
786 clear_bit(NETQ_STOPPED, &vif->flags);
Kalle Valobdcd8172011-07-18 00:22:30 +0300787 }
788
789 spin_unlock_bh(&ar->lock);
790
791 __skb_queue_purge(&skb_queue);
792
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530793 /* FIXME: Locking */
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530794 spin_lock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530795 list_for_each_entry(vif, &ar->vif_list, list) {
796 if (test_bit(CONNECTED, &vif->flags) &&
797 !flushing[vif->fw_vif_idx]) {
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530798 spin_unlock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +0530799 netif_wake_queue(vif->ndev);
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530800 spin_lock_bh(&ar->list_lock);
Vasanthakumar Thiagarajan990bd912011-10-25 19:34:20 +0530801 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300802 }
Vasanthakumar Thiagarajan11f6e402011-11-01 16:38:50 +0530803 spin_unlock_bh(&ar->list_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300804
805 if (wake_event)
806 wake_up(&ar->event_wq);
807
808 return;
Kalle Valobdcd8172011-07-18 00:22:30 +0300809}
810
811void ath6kl_tx_data_cleanup(struct ath6kl *ar)
812{
813 int i;
814
815 /* flush all the data (non-control) streams */
816 for (i = 0; i < WMM_NUM_AC; i++)
Kalle Vaload226ec2011-08-10 09:49:12 +0300817 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
818 ATH6KL_DATA_PKT_TAG);
Kalle Valobdcd8172011-07-18 00:22:30 +0300819}
820
821/* Rx functions */
822
823static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
824 struct sk_buff *skb)
825{
826 if (!skb)
827 return;
828
829 skb->dev = dev;
830
831 if (!(skb->dev->flags & IFF_UP)) {
832 dev_kfree_skb(skb);
833 return;
834 }
835
836 skb->protocol = eth_type_trans(skb, skb->dev);
837
838 netif_rx_ni(skb);
839}
840
841static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
842{
843 struct sk_buff *skb;
844
845 while (num) {
846 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
847 if (!skb) {
848 ath6kl_err("netbuf allocation failed\n");
849 return;
850 }
851 skb_queue_tail(q, skb);
852 num--;
853 }
854}
855
856static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
857{
858 struct sk_buff *skb = NULL;
859
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +0530860 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
861 (AGGR_NUM_OF_FREE_NETBUFS >> 2))
862 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
863 AGGR_NUM_OF_FREE_NETBUFS);
Kalle Valobdcd8172011-07-18 00:22:30 +0300864
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +0530865 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300866
867 return skb;
868}
869
870void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
871{
872 struct ath6kl *ar = target->dev->ar;
873 struct sk_buff *skb;
874 int rx_buf;
875 int n_buf_refill;
876 struct htc_packet *packet;
877 struct list_head queue;
878
879 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
Kalle Vaload226ec2011-08-10 09:49:12 +0300880 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300881
882 if (n_buf_refill <= 0)
883 return;
884
885 INIT_LIST_HEAD(&queue);
886
887 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
888 "%s: providing htc with %d buffers at eid=%d\n",
889 __func__, n_buf_refill, endpoint);
890
891 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
892 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
893 if (!skb)
894 break;
895
896 packet = (struct htc_packet *) skb->head;
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530897 if (!IS_ALIGNED((unsigned long) skb->data, 4))
898 skb->data = PTR_ALIGN(skb->data - 4, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +0300899 set_htc_rxpkt_info(packet, skb, skb->data,
Kalle Valo96f1fad2012-03-07 20:03:57 +0200900 ATH6KL_BUFFER_SIZE, endpoint);
Kalle Valocfc10f22012-03-25 17:15:24 +0300901 packet->skb = skb;
Kalle Valobdcd8172011-07-18 00:22:30 +0300902 list_add_tail(&packet->list, &queue);
903 }
904
905 if (!list_empty(&queue))
Kalle Vaload226ec2011-08-10 09:49:12 +0300906 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +0300907}
908
909void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
910{
911 struct htc_packet *packet;
912 struct sk_buff *skb;
913
914 while (count) {
915 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
916 if (!skb)
917 return;
918
919 packet = (struct htc_packet *) skb->head;
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530920 if (!IS_ALIGNED((unsigned long) skb->data, 4))
921 skb->data = PTR_ALIGN(skb->data - 4, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +0300922 set_htc_rxpkt_info(packet, skb, skb->data,
923 ATH6KL_AMSDU_BUFFER_SIZE, 0);
Kalle Valocfc10f22012-03-25 17:15:24 +0300924 packet->skb = skb;
925
Kalle Valobdcd8172011-07-18 00:22:30 +0300926 spin_lock_bh(&ar->lock);
927 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
928 spin_unlock_bh(&ar->lock);
929 count--;
930 }
931}
932
933/*
934 * Callback to allocate a receive buffer for a pending packet. We use a
935 * pre-allocated list of buffers of maximum AMSDU size (4K).
936 */
937struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
938 enum htc_endpoint_id endpoint,
939 int len)
940{
941 struct ath6kl *ar = target->dev->ar;
942 struct htc_packet *packet = NULL;
943 struct list_head *pkt_pos;
944 int refill_cnt = 0, depth = 0;
945
946 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
947 __func__, endpoint, len);
948
949 if ((len <= ATH6KL_BUFFER_SIZE) ||
950 (len > ATH6KL_AMSDU_BUFFER_SIZE))
951 return NULL;
952
953 spin_lock_bh(&ar->lock);
954
955 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
956 spin_unlock_bh(&ar->lock);
957 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
958 goto refill_buf;
959 }
960
961 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
962 struct htc_packet, list);
963 list_del(&packet->list);
964 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
965 depth++;
966
967 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
968 spin_unlock_bh(&ar->lock);
969
970 /* set actual endpoint ID */
971 packet->endpoint = endpoint;
972
973refill_buf:
974 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
975 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
976
977 return packet;
978}
979
980static void aggr_slice_amsdu(struct aggr_info *p_aggr,
981 struct rxtid *rxtid, struct sk_buff *skb)
982{
983 struct sk_buff *new_skb;
984 struct ethhdr *hdr;
985 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
986 u8 *framep;
987
988 mac_hdr_len = sizeof(struct ethhdr);
989 framep = skb->data + mac_hdr_len;
990 amsdu_len = skb->len - mac_hdr_len;
991
992 while (amsdu_len > mac_hdr_len) {
993 hdr = (struct ethhdr *) framep;
994 payload_8023_len = ntohs(hdr->h_proto);
995
996 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
997 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
998 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
999 payload_8023_len);
1000 break;
1001 }
1002
1003 frame_8023_len = payload_8023_len + mac_hdr_len;
1004 new_skb = aggr_get_free_skb(p_aggr);
1005 if (!new_skb) {
1006 ath6kl_err("no buffer available\n");
1007 break;
1008 }
1009
1010 memcpy(new_skb->data, framep, frame_8023_len);
1011 skb_put(new_skb, frame_8023_len);
1012 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1013 ath6kl_err("dot3_2_dix error\n");
1014 dev_kfree_skb(new_skb);
1015 break;
1016 }
1017
1018 skb_queue_tail(&rxtid->q, new_skb);
1019
1020 /* Is this the last subframe within this aggregate ? */
1021 if ((amsdu_len - frame_8023_len) == 0)
1022 break;
1023
1024 /* Add the length of A-MSDU subframe padding bytes -
1025 * Round to nearest word.
1026 */
Vasanthakumar Thiagarajan13e34ea2011-08-16 11:19:38 +05301027 frame_8023_len = ALIGN(frame_8023_len, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001028
1029 framep += frame_8023_len;
1030 amsdu_len -= frame_8023_len;
1031 }
1032
1033 dev_kfree_skb(skb);
1034}
1035
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301036static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
Kalle Valobdcd8172011-07-18 00:22:30 +03001037 u16 seq_no, u8 order)
1038{
1039 struct sk_buff *skb;
1040 struct rxtid *rxtid;
1041 struct skb_hold_q *node;
1042 u16 idx, idx_end, seq_end;
1043 struct rxtid_stats *stats;
1044
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301045 rxtid = &agg_conn->rx_tid[tid];
1046 stats = &agg_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001047
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301048 spin_lock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001049 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1050
1051 /*
1052 * idx_end is typically the last possible frame in the window,
1053 * but changes to 'the' seq_no, when BAR comes. If seq_no
1054 * is non-zero, we will go up to that and stop.
1055 * Note: last seq no in current window will occupy the same
1056 * index position as index that is just previous to start.
1057 * An imp point : if win_sz is 7, for seq_no space of 4095,
1058 * then, there would be holes when sequence wrap around occurs.
1059 * Target should judiciously choose the win_sz, based on
1060 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1061 * 2, 4, 8, 16 win_sz works fine).
1062 * We must deque from "idx" to "idx_end", including both.
1063 */
1064 seq_end = seq_no ? seq_no : rxtid->seq_next;
1065 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1066
Kalle Valobdcd8172011-07-18 00:22:30 +03001067 do {
1068 node = &rxtid->hold_q[idx];
1069 if ((order == 1) && (!node->skb))
1070 break;
1071
1072 if (node->skb) {
1073 if (node->is_amsdu)
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301074 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1075 node->skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001076 else
1077 skb_queue_tail(&rxtid->q, node->skb);
1078 node->skb = NULL;
1079 } else
1080 stats->num_hole++;
1081
1082 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1083 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1084 } while (idx != idx_end);
1085
1086 spin_unlock_bh(&rxtid->lock);
1087
1088 stats->num_delivered += skb_queue_len(&rxtid->q);
1089
1090 while ((skb = skb_dequeue(&rxtid->q)))
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301091 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001092}
1093
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301094static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
Kalle Valobdcd8172011-07-18 00:22:30 +03001095 u16 seq_no,
1096 bool is_amsdu, struct sk_buff *frame)
1097{
1098 struct rxtid *rxtid;
1099 struct rxtid_stats *stats;
1100 struct sk_buff *skb;
1101 struct skb_hold_q *node;
1102 u16 idx, st, cur, end;
1103 bool is_queued = false;
1104 u16 extended_end;
1105
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301106 rxtid = &agg_conn->rx_tid[tid];
1107 stats = &agg_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001108
1109 stats->num_into_aggr++;
1110
1111 if (!rxtid->aggr) {
1112 if (is_amsdu) {
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301113 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
Kalle Valobdcd8172011-07-18 00:22:30 +03001114 is_queued = true;
1115 stats->num_amsdu++;
1116 while ((skb = skb_dequeue(&rxtid->q)))
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301117 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
Kalle Valobdcd8172011-07-18 00:22:30 +03001118 skb);
1119 }
1120 return is_queued;
1121 }
1122
1123 /* Check the incoming sequence no, if it's in the window */
1124 st = rxtid->seq_next;
1125 cur = seq_no;
1126 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1127
1128 if (((st < end) && (cur < st || cur > end)) ||
1129 ((st > end) && (cur > end) && (cur < st))) {
1130 extended_end = (end + rxtid->hold_q_sz - 1) &
1131 ATH6KL_MAX_SEQ_NO;
1132
1133 if (((end < extended_end) &&
1134 (cur < end || cur > extended_end)) ||
1135 ((end > extended_end) && (cur > extended_end) &&
1136 (cur < end))) {
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301137 aggr_deque_frms(agg_conn, tid, 0, 0);
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301138 spin_lock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001139 if (cur >= rxtid->hold_q_sz - 1)
1140 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1141 else
1142 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1143 (rxtid->hold_q_sz - 2 - cur);
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301144 spin_unlock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001145 } else {
1146 /*
1147 * Dequeue only those frames that are outside the
1148 * new shifted window.
1149 */
1150 if (cur >= rxtid->hold_q_sz - 1)
1151 st = cur - (rxtid->hold_q_sz - 1);
1152 else
1153 st = ATH6KL_MAX_SEQ_NO -
1154 (rxtid->hold_q_sz - 2 - cur);
1155
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301156 aggr_deque_frms(agg_conn, tid, st, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001157 }
1158
1159 stats->num_oow++;
1160 }
1161
1162 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1163
1164 node = &rxtid->hold_q[idx];
1165
1166 spin_lock_bh(&rxtid->lock);
1167
1168 /*
1169 * Is the cur frame duplicate or something beyond our window(hold_q
1170 * -> which is 2x, already)?
1171 *
1172 * 1. Duplicate is easy - drop incoming frame.
1173 * 2. Not falling in current sliding window.
1174 * 2a. is the frame_seq_no preceding current tid_seq_no?
1175 * -> drop the frame. perhaps sender did not get our ACK.
1176 * this is taken care of above.
1177 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1178 * -> Taken care of it above, by moving window forward.
1179 */
1180 dev_kfree_skb(node->skb);
1181 stats->num_dups++;
1182
1183 node->skb = frame;
1184 is_queued = true;
1185 node->is_amsdu = is_amsdu;
1186 node->seq_no = seq_no;
1187
1188 if (node->is_amsdu)
1189 stats->num_amsdu++;
1190 else
1191 stats->num_mpdu++;
1192
1193 spin_unlock_bh(&rxtid->lock);
1194
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301195 aggr_deque_frms(agg_conn, tid, 0, 1);
Kalle Valobdcd8172011-07-18 00:22:30 +03001196
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301197 if (agg_conn->timer_scheduled)
Vasanthakumar Thiagarajan7940bad2012-05-30 12:27:12 +05301198 return is_queued;
1199
1200 spin_lock_bh(&rxtid->lock);
1201 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1202 if (rxtid->hold_q[idx].skb) {
1203 /*
1204 * There is a frame in the queue and no
1205 * timer so start a timer to ensure that
1206 * the frame doesn't remain stuck
1207 * forever.
1208 */
1209 agg_conn->timer_scheduled = true;
1210 mod_timer(&agg_conn->timer,
1211 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1212 rxtid->timer_mon = true;
1213 break;
Kalle Valobdcd8172011-07-18 00:22:30 +03001214 }
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301215 }
Vasanthakumar Thiagarajan7940bad2012-05-30 12:27:12 +05301216 spin_unlock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001217
1218 return is_queued;
1219}
1220
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301221static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1222 struct ath6kl_sta *conn)
1223{
1224 struct ath6kl *ar = vif->ar;
1225 bool is_apsdq_empty, is_apsdq_empty_at_start;
1226 u32 num_frames_to_deliver, flags;
1227 struct sk_buff *skb = NULL;
1228
1229 /*
1230 * If the APSD q for this STA is not empty, dequeue and
1231 * send a pkt from the head of the q. Also update the
1232 * More data bit in the WMI_DATA_HDR if there are
1233 * more pkts for this STA in the APSD q.
1234 * If there are no more pkts for this STA,
1235 * update the APSD bitmap for this STA.
1236 */
1237
1238 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1239 ATH6KL_APSD_FRAME_MASK;
1240 /*
1241 * Number of frames to send in a service period is
1242 * indicated by the station
1243 * in the QOS_INFO of the association request
1244 * If it is zero, send all frames
1245 */
1246 if (!num_frames_to_deliver)
1247 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1248
1249 spin_lock_bh(&conn->psq_lock);
1250 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1251 spin_unlock_bh(&conn->psq_lock);
1252 is_apsdq_empty_at_start = is_apsdq_empty;
1253
1254 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1255
1256 spin_lock_bh(&conn->psq_lock);
1257 skb = skb_dequeue(&conn->apsdq);
1258 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1259 spin_unlock_bh(&conn->psq_lock);
1260
1261 /*
1262 * Set the STA flag to Trigger delivery,
1263 * so that the frame will go out
1264 */
1265 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1266 num_frames_to_deliver--;
1267
1268 /* Last frame in the service period, set EOSP or queue empty */
1269 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1270 conn->sta_flags |= STA_PS_APSD_EOSP;
1271
1272 ath6kl_data_tx(skb, vif->ndev);
1273 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1274 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1275 }
1276
1277 if (is_apsdq_empty) {
1278 if (is_apsdq_empty_at_start)
1279 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1280 else
1281 flags = 0;
1282
1283 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
Kalle Valo96f1fad2012-03-07 20:03:57 +02001284 vif->fw_vif_idx,
1285 conn->aid, 0, flags);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301286 }
1287
1288 return;
1289}
1290
Kalle Valobdcd8172011-07-18 00:22:30 +03001291void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1292{
1293 struct ath6kl *ar = target->dev->ar;
1294 struct sk_buff *skb = packet->pkt_cntxt;
1295 struct wmi_rx_meta_v2 *meta;
1296 struct wmi_data_hdr *dhdr;
1297 int min_hdr_len;
1298 u8 meta_type, dot11_hdr = 0;
Kalle Valo8bd5bca2012-03-25 17:15:25 +03001299 u8 pad_before_data_start;
Kalle Valobdcd8172011-07-18 00:22:30 +03001300 int status = packet->status;
1301 enum htc_endpoint_id ept = packet->endpoint;
1302 bool is_amsdu, prev_ps, ps_state = false;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301303 bool trig_state = false;
Kalle Valobdcd8172011-07-18 00:22:30 +03001304 struct ath6kl_sta *conn = NULL;
1305 struct sk_buff *skb1 = NULL;
1306 struct ethhdr *datap = NULL;
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301307 struct ath6kl_vif *vif;
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301308 struct aggr_info_conn *aggr_conn;
Kalle Valobdcd8172011-07-18 00:22:30 +03001309 u16 seq_no, offset;
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301310 u8 tid, if_idx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001311
1312 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1313 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1314 __func__, ar, ept, skb, packet->buf,
1315 packet->act_len, status);
1316
1317 if (status || !(skb->data + HTC_HDR_LENGTH)) {
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301318 dev_kfree_skb(skb);
1319 return;
1320 }
1321
1322 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1323 skb_pull(skb, HTC_HDR_LENGTH);
1324
Vasanthakumar Thiagarajan81db48d2012-02-28 20:20:22 +05301325 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1326 skb->data, skb->len);
1327
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301328 if (ept == ar->ctrl_ep) {
Vasanthakumar Thiagarajan81db48d2012-02-28 20:20:22 +05301329 if (test_bit(WMI_ENABLED, &ar->flag)) {
1330 ath6kl_check_wow_status(ar);
1331 ath6kl_wmi_control_rx(ar->wmi, skb);
1332 return;
1333 }
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301334 if_idx =
1335 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1336 } else {
1337 if_idx =
1338 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1339 }
1340
1341 vif = ath6kl_get_vif_by_index(ar, if_idx);
1342 if (!vif) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001343 dev_kfree_skb(skb);
1344 return;
1345 }
1346
1347 /*
1348 * Take lock to protect buffer counts and adaptive power throughput
1349 * state.
1350 */
Vasanthakumar Thiagarajan478ac022011-10-25 19:34:19 +05301351 spin_lock_bh(&vif->if_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001352
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +05301353 vif->net_stats.rx_packets++;
1354 vif->net_stats.rx_bytes += packet->act_len;
Kalle Valobdcd8172011-07-18 00:22:30 +03001355
Vasanthakumar Thiagarajan478ac022011-10-25 19:34:19 +05301356 spin_unlock_bh(&vif->if_lock);
Vasanthakumar Thiagarajan83dc5f22011-08-14 17:08:33 +05301357
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301358 skb->dev = vif->ndev;
Kalle Valobdcd8172011-07-18 00:22:30 +03001359
1360 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1361 if (EPPING_ALIGNMENT_PAD > 0)
1362 skb_pull(skb, EPPING_ALIGNMENT_PAD);
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301363 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001364 return;
1365 }
1366
Raja Mania918fb32011-11-07 22:52:46 +02001367 ath6kl_check_wow_status(ar);
1368
Vasanthakumar Thiagarajan67f91782011-08-14 17:08:34 +05301369 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1370 sizeof(struct ath6kl_llc_snap_hdr);
Kalle Valobdcd8172011-07-18 00:22:30 +03001371
1372 dhdr = (struct wmi_data_hdr *) skb->data;
1373
1374 /*
1375 * In the case of AP mode we may receive NULL data frames
1376 * that do not have LLC hdr. They are 16 bytes in size.
1377 * Allow these frames in the AP mode.
1378 */
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +05301379 if (vif->nw_type != AP_NETWORK &&
Kalle Valobdcd8172011-07-18 00:22:30 +03001380 ((packet->act_len < min_hdr_len) ||
1381 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1382 ath6kl_info("frame len is too short or too long\n");
Vasanthakumar Thiagarajanb95907a2011-10-25 19:34:11 +05301383 vif->net_stats.rx_errors++;
1384 vif->net_stats.rx_length_errors++;
Kalle Valobdcd8172011-07-18 00:22:30 +03001385 dev_kfree_skb(skb);
1386 return;
1387 }
1388
1389 /* Get the Power save state of the STA */
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +05301390 if (vif->nw_type == AP_NETWORK) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001391 meta_type = wmi_data_hdr_get_meta(dhdr);
1392
1393 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1394 WMI_DATA_HDR_PS_MASK);
1395
1396 offset = sizeof(struct wmi_data_hdr);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301397 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
Kalle Valobdcd8172011-07-18 00:22:30 +03001398
1399 switch (meta_type) {
1400 case 0:
1401 break;
1402 case WMI_META_VERSION_1:
1403 offset += sizeof(struct wmi_rx_meta_v1);
1404 break;
1405 case WMI_META_VERSION_2:
1406 offset += sizeof(struct wmi_rx_meta_v2);
1407 break;
1408 default:
1409 break;
1410 }
1411
1412 datap = (struct ethhdr *) (skb->data + offset);
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301413 conn = ath6kl_find_sta(vif, datap->h_source);
Kalle Valobdcd8172011-07-18 00:22:30 +03001414
1415 if (!conn) {
1416 dev_kfree_skb(skb);
1417 return;
1418 }
1419
1420 /*
1421 * If there is a change in PS state of the STA,
1422 * take appropriate steps:
1423 *
1424 * 1. If Sleep-->Awake, flush the psq for the STA
1425 * Clear the PVB for the STA.
1426 * 2. If Awake-->Sleep, Starting queueing frames
1427 * the STA.
1428 */
1429 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1430
1431 if (ps_state)
1432 conn->sta_flags |= STA_PS_SLEEP;
1433 else
1434 conn->sta_flags &= ~STA_PS_SLEEP;
1435
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301436 /* Accept trigger only when the station is in sleep */
1437 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1438 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1439
Kalle Valobdcd8172011-07-18 00:22:30 +03001440 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1441 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1442 struct sk_buff *skbuff = NULL;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301443 bool is_apsdq_empty;
Naveen Gangadharand0ff7382012-02-08 17:51:36 -08001444 struct ath6kl_mgmt_buff *mgmt;
1445 u8 idx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001446
1447 spin_lock_bh(&conn->psq_lock);
Naveen Gangadharand0ff7382012-02-08 17:51:36 -08001448 while (conn->mgmt_psq_len > 0) {
1449 mgmt = list_first_entry(
1450 &conn->mgmt_psq,
1451 struct ath6kl_mgmt_buff,
1452 list);
1453 list_del(&mgmt->list);
1454 conn->mgmt_psq_len--;
1455 spin_unlock_bh(&conn->psq_lock);
1456 idx = vif->fw_vif_idx;
1457
1458 ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1459 idx,
1460 mgmt->id,
1461 mgmt->freq,
1462 mgmt->wait,
1463 mgmt->buf,
1464 mgmt->len,
1465 mgmt->no_cck);
1466
1467 kfree(mgmt);
1468 spin_lock_bh(&conn->psq_lock);
1469 }
1470 conn->mgmt_psq_len = 0;
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301471 while ((skbuff = skb_dequeue(&conn->psq))) {
1472 spin_unlock_bh(&conn->psq_lock);
1473 ath6kl_data_tx(skbuff, vif->ndev);
1474 spin_lock_bh(&conn->psq_lock);
1475 }
1476
1477 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1478 while ((skbuff = skb_dequeue(&conn->apsdq))) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001479 spin_unlock_bh(&conn->psq_lock);
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301480 ath6kl_data_tx(skbuff, vif->ndev);
Kalle Valobdcd8172011-07-18 00:22:30 +03001481 spin_lock_bh(&conn->psq_lock);
1482 }
1483 spin_unlock_bh(&conn->psq_lock);
Thirumalai Pachamuthuc1762a32012-01-12 18:21:39 +05301484
1485 if (!is_apsdq_empty)
1486 ath6kl_wmi_set_apsd_bfrd_traf(
1487 ar->wmi,
1488 vif->fw_vif_idx,
1489 conn->aid, 0, 0);
1490
Kalle Valobdcd8172011-07-18 00:22:30 +03001491 /* Clear the PVB for this STA */
Vasanthakumar Thiagarajan334234b2011-10-25 19:34:12 +05301492 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1493 conn->aid, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001494 }
1495 }
1496
1497 /* drop NULL data frames here */
1498 if ((packet->act_len < min_hdr_len) ||
1499 (packet->act_len >
1500 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1501 dev_kfree_skb(skb);
1502 return;
1503 }
1504 }
1505
1506 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1507 tid = wmi_data_hdr_get_up(dhdr);
1508 seq_no = wmi_data_hdr_get_seqno(dhdr);
1509 meta_type = wmi_data_hdr_get_meta(dhdr);
1510 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
Kalle Valo8bd5bca2012-03-25 17:15:25 +03001511 pad_before_data_start =
1512 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1513 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1514
Vasanthakumar Thiagarajan594a0bc2011-08-14 17:08:35 +05301515 skb_pull(skb, sizeof(struct wmi_data_hdr));
Kalle Valobdcd8172011-07-18 00:22:30 +03001516
1517 switch (meta_type) {
1518 case WMI_META_VERSION_1:
1519 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1520 break;
1521 case WMI_META_VERSION_2:
1522 meta = (struct wmi_rx_meta_v2 *) skb->data;
1523 if (meta->csum_flags & 0x1) {
1524 skb->ip_summed = CHECKSUM_COMPLETE;
1525 skb->csum = (__force __wsum) meta->csum;
1526 }
1527 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1528 break;
1529 default:
1530 break;
1531 }
1532
Kalle Valo8bd5bca2012-03-25 17:15:25 +03001533 skb_pull(skb, pad_before_data_start);
1534
Kalle Valobdcd8172011-07-18 00:22:30 +03001535 if (dot11_hdr)
1536 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1537 else if (!is_amsdu)
1538 status = ath6kl_wmi_dot3_2_dix(skb);
1539
1540 if (status) {
1541 /*
1542 * Drop frames that could not be processed (lack of
1543 * memory, etc.)
1544 */
1545 dev_kfree_skb(skb);
1546 return;
1547 }
1548
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301549 if (!(vif->ndev->flags & IFF_UP)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001550 dev_kfree_skb(skb);
1551 return;
1552 }
1553
Vasanthakumar Thiagarajanf5938f22011-10-25 19:34:03 +05301554 if (vif->nw_type == AP_NETWORK) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001555 datap = (struct ethhdr *) skb->data;
1556 if (is_multicast_ether_addr(datap->h_dest))
1557 /*
1558 * Bcast/Mcast frames should be sent to the
1559 * OS stack as well as on the air.
1560 */
1561 skb1 = skb_copy(skb, GFP_ATOMIC);
1562 else {
1563 /*
1564 * Search for a connected STA with dstMac
1565 * as the Mac address. If found send the
1566 * frame to it on the air else send the
1567 * frame up the stack.
1568 */
Vasanthakumar Thiagarajan6765d0a2011-10-25 19:34:17 +05301569 conn = ath6kl_find_sta(vif, datap->h_dest);
Kalle Valobdcd8172011-07-18 00:22:30 +03001570
1571 if (conn && ar->intra_bss) {
1572 skb1 = skb;
1573 skb = NULL;
1574 } else if (conn && !ar->intra_bss) {
1575 dev_kfree_skb(skb);
1576 skb = NULL;
1577 }
1578 }
1579 if (skb1)
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301580 ath6kl_data_tx(skb1, vif->ndev);
Kalle Vaload3f78b2011-10-06 14:32:32 +03001581
1582 if (skb == NULL) {
1583 /* nothing to deliver up the stack */
1584 return;
1585 }
Kalle Valobdcd8172011-07-18 00:22:30 +03001586 }
1587
Kalle Valo5694f9622011-09-19 21:38:44 +03001588 datap = (struct ethhdr *) skb->data;
1589
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301590 if (is_unicast_ether_addr(datap->h_dest)) {
1591 if (vif->nw_type == AP_NETWORK) {
1592 conn = ath6kl_find_sta(vif, datap->h_source);
1593 if (!conn)
1594 return;
1595 aggr_conn = conn->aggr_conn;
1596 } else
1597 aggr_conn = vif->aggr_cntxt->aggr_conn;
1598
1599 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
Kalle Valo96f1fad2012-03-07 20:03:57 +02001600 is_amsdu, skb)) {
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301601 /* aggregation code will handle the skb */
1602 return;
1603 }
Vasanthakumar Thiagarajanb514fab2012-04-03 14:13:46 +05301604 } else if (!is_broadcast_ether_addr(datap->h_dest))
1605 vif->net_stats.multicast++;
Kalle Valo5694f9622011-09-19 21:38:44 +03001606
Vasanthakumar Thiagarajan28ae58d2011-10-25 19:34:14 +05301607 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
Kalle Valobdcd8172011-07-18 00:22:30 +03001608}
1609
1610static void aggr_timeout(unsigned long arg)
1611{
1612 u8 i, j;
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301613 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
Kalle Valobdcd8172011-07-18 00:22:30 +03001614 struct rxtid *rxtid;
1615 struct rxtid_stats *stats;
1616
1617 for (i = 0; i < NUM_OF_TIDS; i++) {
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301618 rxtid = &aggr_conn->rx_tid[i];
1619 stats = &aggr_conn->stat[i];
Kalle Valobdcd8172011-07-18 00:22:30 +03001620
Vasanthakumar Thiagarajan7940bad2012-05-30 12:27:12 +05301621 if (!rxtid->aggr || !rxtid->timer_mon)
Kalle Valobdcd8172011-07-18 00:22:30 +03001622 continue;
1623
1624 stats->num_timeouts++;
Kalle Valo37ca6332011-07-21 10:54:26 +03001625 ath6kl_dbg(ATH6KL_DBG_AGGR,
1626 "aggr timeout (st %d end %d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001627 rxtid->seq_next,
1628 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1629 ATH6KL_MAX_SEQ_NO));
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301630 aggr_deque_frms(aggr_conn, i, 0, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001631 }
1632
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301633 aggr_conn->timer_scheduled = false;
Kalle Valobdcd8172011-07-18 00:22:30 +03001634
1635 for (i = 0; i < NUM_OF_TIDS; i++) {
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301636 rxtid = &aggr_conn->rx_tid[i];
Kalle Valobdcd8172011-07-18 00:22:30 +03001637
1638 if (rxtid->aggr && rxtid->hold_q) {
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301639 spin_lock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001640 for (j = 0; j < rxtid->hold_q_sz; j++) {
1641 if (rxtid->hold_q[j].skb) {
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301642 aggr_conn->timer_scheduled = true;
Kalle Valobdcd8172011-07-18 00:22:30 +03001643 rxtid->timer_mon = true;
Kalle Valobdcd8172011-07-18 00:22:30 +03001644 break;
1645 }
1646 }
Vasanthakumar Thiagarajan0faf7452012-05-30 12:27:11 +05301647 spin_unlock_bh(&rxtid->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +03001648
1649 if (j >= rxtid->hold_q_sz)
1650 rxtid->timer_mon = false;
1651 }
1652 }
1653
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301654 if (aggr_conn->timer_scheduled)
1655 mod_timer(&aggr_conn->timer,
Kalle Valobdcd8172011-07-18 00:22:30 +03001656 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1657}
1658
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301659static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
Kalle Valobdcd8172011-07-18 00:22:30 +03001660{
1661 struct rxtid *rxtid;
1662 struct rxtid_stats *stats;
1663
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301664 if (!aggr_conn || tid >= NUM_OF_TIDS)
Kalle Valobdcd8172011-07-18 00:22:30 +03001665 return;
1666
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301667 rxtid = &aggr_conn->rx_tid[tid];
1668 stats = &aggr_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001669
1670 if (rxtid->aggr)
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301671 aggr_deque_frms(aggr_conn, tid, 0, 0);
Kalle Valobdcd8172011-07-18 00:22:30 +03001672
1673 rxtid->aggr = false;
Kalle Valobdcd8172011-07-18 00:22:30 +03001674 rxtid->timer_mon = false;
1675 rxtid->win_sz = 0;
1676 rxtid->seq_next = 0;
1677 rxtid->hold_q_sz = 0;
1678
1679 kfree(rxtid->hold_q);
1680 rxtid->hold_q = NULL;
1681
1682 memset(stats, 0, sizeof(struct rxtid_stats));
1683}
1684
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301685void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
Vasanthakumar Thiagarajan240d2792011-10-25 19:34:13 +05301686 u8 win_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03001687{
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301688 struct ath6kl_sta *sta;
1689 struct aggr_info_conn *aggr_conn = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +03001690 struct rxtid *rxtid;
1691 struct rxtid_stats *stats;
1692 u16 hold_q_size;
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301693 u8 tid, aid;
Kalle Valobdcd8172011-07-18 00:22:30 +03001694
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301695 if (vif->nw_type == AP_NETWORK) {
1696 aid = ath6kl_get_aid(tid_mux);
1697 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1698 if (sta)
1699 aggr_conn = sta->aggr_conn;
1700 } else
1701 aggr_conn = vif->aggr_cntxt->aggr_conn;
1702
1703 if (!aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001704 return;
1705
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301706 tid = ath6kl_get_tid(tid_mux);
1707 if (tid >= NUM_OF_TIDS)
1708 return;
1709
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301710 rxtid = &aggr_conn->rx_tid[tid];
1711 stats = &aggr_conn->stat[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001712
1713 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1714 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1715 __func__, win_sz, tid);
1716
1717 if (rxtid->aggr)
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301718 aggr_delete_tid_state(aggr_conn, tid);
Kalle Valobdcd8172011-07-18 00:22:30 +03001719
1720 rxtid->seq_next = seq_no;
1721 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1722 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1723 if (!rxtid->hold_q)
1724 return;
1725
1726 rxtid->win_sz = win_sz;
1727 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1728 if (!skb_queue_empty(&rxtid->q))
1729 return;
1730
1731 rxtid->aggr = true;
1732}
1733
Vasanthakumar Thiagarajanc8651542012-01-26 13:17:18 +05301734void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1735 struct aggr_info_conn *aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001736{
Kalle Valobdcd8172011-07-18 00:22:30 +03001737 struct rxtid *rxtid;
1738 u8 i;
1739
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301740 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1741 aggr_conn->dev = vif->ndev;
1742 init_timer(&aggr_conn->timer);
1743 aggr_conn->timer.function = aggr_timeout;
1744 aggr_conn->timer.data = (unsigned long) aggr_conn;
Vasanthakumar Thiagarajanc8651542012-01-26 13:17:18 +05301745 aggr_conn->aggr_info = aggr_info;
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301746
1747 aggr_conn->timer_scheduled = false;
1748
1749 for (i = 0; i < NUM_OF_TIDS; i++) {
1750 rxtid = &aggr_conn->rx_tid[i];
1751 rxtid->aggr = false;
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301752 rxtid->timer_mon = false;
1753 skb_queue_head_init(&rxtid->q);
1754 spin_lock_init(&rxtid->lock);
1755 }
1756
1757}
1758
1759struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1760{
1761 struct aggr_info *p_aggr = NULL;
1762
Kalle Valobdcd8172011-07-18 00:22:30 +03001763 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1764 if (!p_aggr) {
1765 ath6kl_err("failed to alloc memory for aggr_node\n");
1766 return NULL;
1767 }
1768
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301769 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1770 if (!p_aggr->aggr_conn) {
1771 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1772 kfree(p_aggr);
1773 return NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +03001774 }
1775
Vasanthakumar Thiagarajanc8651542012-01-26 13:17:18 +05301776 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301777
1778 skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1779 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1780
Kalle Valobdcd8172011-07-18 00:22:30 +03001781 return p_aggr;
1782}
1783
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301784void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
Kalle Valobdcd8172011-07-18 00:22:30 +03001785{
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301786 struct ath6kl_sta *sta;
Kalle Valobdcd8172011-07-18 00:22:30 +03001787 struct rxtid *rxtid;
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301788 struct aggr_info_conn *aggr_conn = NULL;
1789 u8 tid, aid;
Kalle Valobdcd8172011-07-18 00:22:30 +03001790
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301791 if (vif->nw_type == AP_NETWORK) {
1792 aid = ath6kl_get_aid(tid_mux);
1793 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1794 if (sta)
1795 aggr_conn = sta->aggr_conn;
1796 } else
1797 aggr_conn = vif->aggr_cntxt->aggr_conn;
1798
1799 if (!aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001800 return;
1801
Vasanthakumar Thiagarajan3fdc0992012-01-21 15:22:52 +05301802 tid = ath6kl_get_tid(tid_mux);
1803 if (tid >= NUM_OF_TIDS)
1804 return;
1805
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301806 rxtid = &aggr_conn->rx_tid[tid];
Kalle Valobdcd8172011-07-18 00:22:30 +03001807
1808 if (rxtid->aggr)
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301809 aggr_delete_tid_state(aggr_conn, tid);
Kalle Valobdcd8172011-07-18 00:22:30 +03001810}
1811
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301812void aggr_reset_state(struct aggr_info_conn *aggr_conn)
Kalle Valobdcd8172011-07-18 00:22:30 +03001813{
1814 u8 tid;
1815
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301816 if (!aggr_conn)
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301817 return;
1818
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301819 if (aggr_conn->timer_scheduled) {
1820 del_timer(&aggr_conn->timer);
1821 aggr_conn->timer_scheduled = false;
Vasanthakumar Thiagarajan7a950ea2012-01-21 15:22:48 +05301822 }
1823
Kalle Valobdcd8172011-07-18 00:22:30 +03001824 for (tid = 0; tid < NUM_OF_TIDS; tid++)
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301825 aggr_delete_tid_state(aggr_conn, tid);
Kalle Valobdcd8172011-07-18 00:22:30 +03001826}
1827
1828/* clean up our amsdu buffer list */
1829void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1830{
1831 struct htc_packet *packet, *tmp_pkt;
1832
1833 spin_lock_bh(&ar->lock);
1834 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1835 spin_unlock_bh(&ar->lock);
1836 return;
1837 }
1838
1839 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1840 list) {
1841 list_del(&packet->list);
1842 spin_unlock_bh(&ar->lock);
1843 dev_kfree_skb(packet->pkt_cntxt);
1844 spin_lock_bh(&ar->lock);
1845 }
1846
1847 spin_unlock_bh(&ar->lock);
1848}
1849
1850void aggr_module_destroy(struct aggr_info *aggr_info)
1851{
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301852 if (!aggr_info)
Kalle Valobdcd8172011-07-18 00:22:30 +03001853 return;
1854
Vasanthakumar Thiagarajan1d2a4452012-01-21 15:22:53 +05301855 aggr_reset_state(aggr_info->aggr_conn);
Vasanthakumar Thiagarajan7baef812012-01-21 15:22:50 +05301856 skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1857 kfree(aggr_info->aggr_conn);
Kalle Valobdcd8172011-07-18 00:22:30 +03001858 kfree(aggr_info);
1859}