blob: ce1206aff5e5147db9881309017c9b957712bfad [file] [log] [blame]
Vladimir Kondratiev02525a72014-08-06 10:31:51 +03001/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Vladimir Kondratievb4490f42014-02-27 16:20:44 +020017#include "wil6210.h"
18#include "txrx.h"
19
20#define SEQ_MODULO 0x1000
21#define SEQ_MASK 0xfff
22
23static inline int seq_less(u16 sq1, u16 sq2)
24{
25 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
26}
27
28static inline u16 seq_inc(u16 sq)
29{
30 return (sq + 1) & SEQ_MASK;
31}
32
33static inline u16 seq_sub(u16 sq1, u16 sq2)
34{
35 return (sq1 - sq2) & SEQ_MASK;
36}
37
38static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
39{
40 return seq_sub(seq, r->ssn) % r->buf_size;
41}
42
43static void wil_release_reorder_frame(struct wil6210_priv *wil,
44 struct wil_tid_ampdu_rx *r,
45 int index)
46{
47 struct net_device *ndev = wil_to_ndev(wil);
48 struct sk_buff *skb = r->reorder_buf[index];
49
50 if (!skb)
51 goto no_frame;
52
53 /* release the frame from the reorder ring buffer */
54 r->stored_mpdu_num--;
55 r->reorder_buf[index] = NULL;
56 wil_netif_rx_any(skb, ndev);
57
58no_frame:
59 r->head_seq_num = seq_inc(r->head_seq_num);
60}
61
62static void wil_release_reorder_frames(struct wil6210_priv *wil,
63 struct wil_tid_ampdu_rx *r,
64 u16 hseq)
65{
66 int index;
67
Vladimir Kondratievcf42c4e2014-05-27 14:45:50 +030068 /* note: this function is never called with
69 * hseq preceding r->head_seq_num, i.e it is always true
70 * !seq_less(hseq, r->head_seq_num)
71 * and thus on loop exit it should be
72 * r->head_seq_num == hseq
73 */
74 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
Vladimir Kondratievb4490f42014-02-27 16:20:44 +020075 index = reorder_index(r, r->head_seq_num);
76 wil_release_reorder_frame(wil, r, index);
77 }
Vladimir Kondratievcf42c4e2014-05-27 14:45:50 +030078 r->head_seq_num = hseq;
Vladimir Kondratievb4490f42014-02-27 16:20:44 +020079}
80
81static void wil_reorder_release(struct wil6210_priv *wil,
82 struct wil_tid_ampdu_rx *r)
83{
84 int index = reorder_index(r, r->head_seq_num);
85
86 while (r->reorder_buf[index]) {
87 wil_release_reorder_frame(wil, r, index);
88 index = reorder_index(r, r->head_seq_num);
89 }
90}
91
92void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
93{
94 struct net_device *ndev = wil_to_ndev(wil);
95 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
96 int tid = wil_rxdesc_tid(d);
97 int cid = wil_rxdesc_cid(d);
98 int mid = wil_rxdesc_mid(d);
99 u16 seq = wil_rxdesc_seq(d);
100 struct wil_sta_info *sta = &wil->sta[cid];
Dedy Lanskyec81b5a2014-09-10 16:34:42 +0300101 struct wil_tid_ampdu_rx *r;
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200102 u16 hseq;
103 int index;
Dedy Lanskyec81b5a2014-09-10 16:34:42 +0300104 unsigned long flags;
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200105
106 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
107 mid, cid, tid, seq);
108
Dedy Lanskyec81b5a2014-09-10 16:34:42 +0300109 spin_lock_irqsave(&sta->tid_rx_lock, flags);
110
111 r = sta->tid_rx[tid];
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200112 if (!r) {
Dedy Lanskyec81b5a2014-09-10 16:34:42 +0300113 spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200114 wil_netif_rx_any(skb, ndev);
115 return;
116 }
117
118 hseq = r->head_seq_num;
119
Vladimir Kondratievc888cdd2014-04-08 11:36:18 +0300120 /** Due to the race between WMI events, where BACK establishment
121 * reported, and data Rx, few packets may be pass up before reorder
122 * buffer get allocated. Catch up by pretending SSN is what we
123 * see in the 1-st Rx packet
124 */
125 if (r->first_time) {
126 r->first_time = false;
127 if (seq != r->head_seq_num) {
128 wil_err(wil, "Error: 1-st frame with wrong sequence"
129 " %d, should be %d. Fixing...\n", seq,
130 r->head_seq_num);
131 r->head_seq_num = seq;
132 r->ssn = seq;
133 }
134 }
135
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200136 /* frame with out of date sequence number */
137 if (seq_less(seq, r->head_seq_num)) {
Vladimir Kondratievd5b1c322014-06-16 19:37:07 +0300138 r->ssn_last_drop = seq;
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200139 dev_kfree_skb(skb);
140 goto out;
141 }
142
143 /*
144 * If frame the sequence number exceeds our buffering window
145 * size release some previous frames to make room for this one.
146 */
147 if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
148 hseq = seq_inc(seq_sub(seq, r->buf_size));
149 /* release stored frames up to new head to stack */
150 wil_release_reorder_frames(wil, r, hseq);
151 }
152
153 /* Now the new frame is always in the range of the reordering buffer */
154
155 index = reorder_index(r, seq);
156
157 /* check if we already stored this frame */
158 if (r->reorder_buf[index]) {
159 dev_kfree_skb(skb);
160 goto out;
161 }
162
163 /*
164 * If the current MPDU is in the right order and nothing else
165 * is stored we can process it directly, no need to buffer it.
166 * If it is first but there's something stored, we may be able
167 * to release frames after this one.
168 */
169 if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
170 r->head_seq_num = seq_inc(r->head_seq_num);
171 wil_netif_rx_any(skb, ndev);
172 goto out;
173 }
174
175 /* put the frame in the reordering buffer */
176 r->reorder_buf[index] = skb;
177 r->reorder_time[index] = jiffies;
178 r->stored_mpdu_num++;
179 wil_reorder_release(wil, r);
180
181out:
Dedy Lanskyec81b5a2014-09-10 16:34:42 +0300182 spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200183}
184
185struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
186 int size, u16 ssn)
187{
188 struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
Vladimir Kondratiev8fe59622014-09-10 16:34:34 +0300189
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200190 if (!r)
191 return NULL;
192
193 r->reorder_buf =
194 kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
195 r->reorder_time =
196 kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
197 if (!r->reorder_buf || !r->reorder_time) {
198 kfree(r->reorder_buf);
199 kfree(r->reorder_time);
200 kfree(r);
201 return NULL;
202 }
203
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200204 r->ssn = ssn;
205 r->head_seq_num = ssn;
206 r->buf_size = size;
207 r->stored_mpdu_num = 0;
Vladimir Kondratievc888cdd2014-04-08 11:36:18 +0300208 r->first_time = true;
Vladimir Kondratievb4490f42014-02-27 16:20:44 +0200209 return r;
210}
211
212void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
213 struct wil_tid_ampdu_rx *r)
214{
215 if (!r)
216 return;
217 wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
218 kfree(r->reorder_buf);
219 kfree(r->reorder_time);
220 kfree(r);
221}
Vladimir Kondratiev32772132014-12-23 09:47:03 +0200222
223/* ADDBA processing */
224static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
225{
226 u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
227 (mtu_max + WIL_MAX_MPDU_OVERHEAD));
228
229 if (!req_agg_wsize)
230 return max_agg_size;
231
232 return min(max_agg_size, req_agg_wsize);
233}
234
235/* Block Ack - Rx side (recipient */
236int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
237 u8 dialog_token, __le16 ba_param_set,
238 __le16 ba_timeout, __le16 ba_seq_ctrl)
239{
240 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
241
242 if (!req)
243 return -ENOMEM;
244
245 req->cidxtid = cidxtid;
246 req->dialog_token = dialog_token;
247 req->ba_param_set = le16_to_cpu(ba_param_set);
248 req->ba_timeout = le16_to_cpu(ba_timeout);
249 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
250
251 mutex_lock(&wil->back_rx_mutex);
252 list_add_tail(&req->list, &wil->back_rx_pending);
253 mutex_unlock(&wil->back_rx_mutex);
254
255 queue_work(wil->wq_service, &wil->back_rx_worker);
256
257 return 0;
258}
259
260static void wil_back_rx_handle(struct wil6210_priv *wil,
261 struct wil_back_rx *req)
262{
263 struct wil_sta_info *sta;
264 u8 cid, tid;
265 u16 agg_wsize = 0;
266 /* bit 0: A-MSDU supported
267 * bit 1: policy (should be 0 for us)
268 * bits 2..5: TID
269 * bits 6..15: buffer size
270 */
271 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
272 bool agg_amsdu = !!(req->ba_param_set & BIT(0));
273 int ba_policy = req->ba_param_set & BIT(1);
274 u16 agg_timeout = req->ba_timeout;
275 u16 status = WLAN_STATUS_SUCCESS;
276 unsigned long flags;
277 int rc;
278
279 parse_cidxtid(req->cidxtid, &cid, &tid);
280
281 /* sanity checks */
282 if (cid >= WIL6210_MAX_CID) {
283 wil_err(wil, "BACK: invalid CID %d\n", cid);
284 return;
285 }
286
287 sta = &wil->sta[cid];
288 if (sta->status != wil_sta_connected) {
289 wil_err(wil, "BACK: CID %d not connected\n", cid);
290 return;
291 }
292
293 wil_dbg_wmi(wil,
294 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d\n",
295 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
296 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token);
297
298 /* apply policies */
299 if (ba_policy) {
300 wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
301 status = WLAN_STATUS_INVALID_QOS_PARAM;
302 }
303 if (status == WLAN_STATUS_SUCCESS)
304 agg_wsize = wil_agg_size(wil, req_agg_wsize);
305
306 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
307 agg_amsdu, agg_wsize, agg_timeout);
308 if (rc || (status != WLAN_STATUS_SUCCESS))
309 return;
310
311 /* apply */
312 spin_lock_irqsave(&sta->tid_rx_lock, flags);
313
314 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
315 sta->tid_rx[tid] = wil_tid_ampdu_rx_alloc(wil, agg_wsize,
316 req->ba_seq_ctrl >> 4);
317
318 spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
319}
320
321void wil_back_rx_flush(struct wil6210_priv *wil)
322{
323 struct wil_back_rx *evt, *t;
324
325 wil_dbg_misc(wil, "%s()\n", __func__);
326
327 mutex_lock(&wil->back_rx_mutex);
328
329 list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
330 list_del(&evt->list);
331 kfree(evt);
332 }
333
334 mutex_unlock(&wil->back_rx_mutex);
335}
336
337/* Retrieve next ADDBA request from the pending list */
338static struct list_head *next_back_rx(struct wil6210_priv *wil)
339{
340 struct list_head *ret = NULL;
341
342 mutex_lock(&wil->back_rx_mutex);
343
344 if (!list_empty(&wil->back_rx_pending)) {
345 ret = wil->back_rx_pending.next;
346 list_del(ret);
347 }
348
349 mutex_unlock(&wil->back_rx_mutex);
350
351 return ret;
352}
353
354void wil_back_rx_worker(struct work_struct *work)
355{
356 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
357 back_rx_worker);
358 struct wil_back_rx *evt;
359 struct list_head *lh;
360
361 while ((lh = next_back_rx(wil)) != NULL) {
362 evt = list_entry(lh, struct wil_back_rx, list);
363
364 wil_back_rx_handle(wil, evt);
365 kfree(evt);
366 }
367}
Vladimir Kondratiev3a124ed2014-12-23 09:47:04 +0200368
369/* BACK - Tx (originator) side */
370static void wil_back_tx_handle(struct wil6210_priv *wil,
371 struct wil_back_tx *req)
372{
373 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
374 int rc;
375
376 if (txdata->addba_in_progress) {
377 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
378 req->ringid);
379 return;
380 }
381 if (txdata->agg_wsize) {
382 wil_dbg_misc(wil,
383 "ADDBA for vring[%d] already established wsize %d\n",
384 req->ringid, txdata->agg_wsize);
385 return;
386 }
387 txdata->addba_in_progress = true;
388 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
389 if (rc)
390 txdata->addba_in_progress = false;
391}
392
393static struct list_head *next_back_tx(struct wil6210_priv *wil)
394{
395 struct list_head *ret = NULL;
396
397 mutex_lock(&wil->back_tx_mutex);
398
399 if (!list_empty(&wil->back_tx_pending)) {
400 ret = wil->back_tx_pending.next;
401 list_del(ret);
402 }
403
404 mutex_unlock(&wil->back_tx_mutex);
405
406 return ret;
407}
408
409void wil_back_tx_worker(struct work_struct *work)
410{
411 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
412 back_tx_worker);
413 struct wil_back_tx *evt;
414 struct list_head *lh;
415
416 while ((lh = next_back_tx(wil)) != NULL) {
417 evt = list_entry(lh, struct wil_back_tx, list);
418
419 wil_back_tx_handle(wil, evt);
420 kfree(evt);
421 }
422}
423
424void wil_back_tx_flush(struct wil6210_priv *wil)
425{
426 struct wil_back_tx *evt, *t;
427
428 wil_dbg_misc(wil, "%s()\n", __func__);
429
430 mutex_lock(&wil->back_tx_mutex);
431
432 list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
433 list_del(&evt->list);
434 kfree(evt);
435 }
436
437 mutex_unlock(&wil->back_tx_mutex);
438}
439
440int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid)
441{
442 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
443
444 if (!req)
445 return -ENOMEM;
446
447 req->ringid = ringid;
448 req->agg_wsize = wil_agg_size(wil, 0);
449 req->agg_timeout = 0;
450
451 mutex_lock(&wil->back_tx_mutex);
452 list_add_tail(&req->list, &wil->back_tx_pending);
453 mutex_unlock(&wil->back_tx_mutex);
454
455 queue_work(wil->wq_service, &wil->back_tx_worker);
456
457 return 0;
458}