blob: f832aa68616c4641b4ea57282670cbac2a7c6583 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan50220ce2016-02-18 20:11:33 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== header file includes ===*/
29/* generic utilities */
Nirav Shahcbc6d722016-03-01 16:24:53 +053030#include <qdf_nbuf.h> /* cdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33#include <ieee80211.h> /* IEEE80211_SEQ_MAX */
34
35/* external interfaces */
36#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
37#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
38#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
39#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
40#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
41
42/* datapath internal interfaces */
43#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
44#include <ol_txrx_internal.h> /* TXRX_ASSERT */
45#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
46#include <ol_rx_reorder.h>
47#include <ol_rx_defrag.h>
48
49/*=== data types and defines ===*/
50#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
51
52/*=== global variables ===*/
53
54static char g_log2ceil[] = {
55 1, /* 0 -> 1 */
56 1, /* 1 -> 1 */
57 2, /* 2 -> 2 */
58 4, 4, /* 3-4 -> 4 */
59 8, 8, 8, 8, /* 5-8 -> 8 */
60 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
61 32, 32, 32, 32, 32, 32, 32, 32,
62 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
63 64, 64, 64, 64, 64, 64, 64, 64,
64 64, 64, 64, 64, 64, 64, 64, 64,
65 64, 64, 64, 64, 64, 64, 64, 64,
66 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
67};
68
69/*=== function definitions ===*/
70
71/*---*/
72
73#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
74#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
75#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
76#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
77#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
78#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
79#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
80#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
81
82/*---*/
83
84/* reorder array elements are known to be non-NULL */
85#define OL_RX_REORDER_PTR_CHECK(ptr) /* no-op */
86#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
87 do { \
88 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053089 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080090 rx_reorder_array_elem->head); \
91 } \
92 } while (0)
93
94/* functions called by txrx components */
95
96void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
97{
98 rx_reorder->win_sz = 1;
99 rx_reorder->win_sz_mask = 0;
100 rx_reorder->array = &rx_reorder->base;
101 rx_reorder->base.head = rx_reorder->base.tail = NULL;
102 rx_reorder->tid = tid;
103 rx_reorder->defrag_timeout_ms = 0;
104
105 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
106 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
107}
108
109static enum htt_rx_status
110ol_rx_reorder_seq_num_check(
111 struct ol_txrx_pdev_t *pdev,
112 struct ol_txrx_peer_t *peer,
113 unsigned tid, unsigned seq_num)
114{
115 unsigned seq_num_delta;
116
117 /* don't check the new seq_num against last_seq
118 if last_seq is not valid */
119 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
120 return htt_rx_status_ok;
121
122 /*
123 * For duplicate detection, it might be helpful to also check
124 * whether the retry bit is set or not - a strict duplicate packet
125 * should be the one with retry bit set.
126 * However, since many implementations do not set the retry bit,
127 * and since this same function is also used for filtering out
128 * late-arriving frames (frames that arive after their rx reorder
129 * timeout has expired) which are not retries, don't bother checking
130 * the retry bit for now.
131 */
132 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
133 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
134 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
135
136 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
137 return htt_rx_status_err_replay;
138 /* or maybe htt_rx_status_err_dup */
139 }
140 return htt_rx_status_ok;
141}
142
143/**
144 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
145 * duplicate detection & check for out-of-order
146 * packets for unicast packets.
147 * @pdev: Pointer to pdev maintained by OL
148 * @peer: Pointer to peer structure maintained by OL
149 * @tid: TID value passed as part of HTT msg by f/w
150 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
151 *
152 * This function
153 * 1) For Multicast Frames -- does duplicate detection
154 * A frame is considered duplicate & dropped if it has a seq.number
155 * which is received twice in succession and with the retry bit set
156 * in the second case.
157 * A frame which is older than the last sequence number received
158 * is not considered duplicate but out-of-order. This function does
159 * perform out-of-order check for multicast frames, which is in
160 * keeping with the 802.11 2012 spec section 9.3.2.10
161 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
162 * only for non-aggregation tids.
163 *
164 * Return: Returns htt_rx_status_err_replay, if packet needs to be
165 * dropped, htt_rx_status_ok otherwise.
166 */
167enum htt_rx_status
168ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
169 struct ol_txrx_peer_t *peer,
170 uint8_t tid,
171 void *rx_mpdu_desc)
172{
173 uint16_t pkt_tid = 0xffff;
174 uint16_t seq_num = IEEE80211_SEQ_MAX;
175 bool retry = 0;
176
177 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
178
179 /* For mcast packets, we only the dup-detection, not re-order check */
180
Anurag Chouhanc5548422016-02-24 18:33:27 +0530181 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800182
183 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
184
185 /* Invalid packet TID, expected only for HL */
186 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530187 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800188 return htt_rx_status_ok;
189
190 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
191
192 /*
193 * At this point, we define frames to be duplicate if they arrive
194 * "ONLY" in succession with the same sequence number and the last
195 * one has the retry bit set. For an older frame, we consider that
196 * as an out of order frame, and hence do not perform the dup-detection
197 * or out-of-order check for multicast frames as per discussions & spec
198 * Hence "seq_num <= last_seq_num" check is not necessary.
199 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530200 if (qdf_unlikely(retry &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800201 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {/* drop mcast */
202 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
203 return htt_rx_status_err_replay;
204 } else {
205 /*
206 * This is a multicast packet likely to be passed on...
207 * Set the mcast last seq number here
208 * This is fairly accurate since:
209 * a) f/w sends multicast as separate PPDU/HTT messages
210 * b) Mcast packets are not aggregated & hence single
211 * c) Result of b) is that, flush / release bit is set always
212 * on the mcast packets, so likely to be immediatedly released.
213 */
214 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
215 return htt_rx_status_ok;
216 }
217 } else
218 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
219}
220
221
222void
223ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
224 struct ol_txrx_peer_t *peer,
225 unsigned tid,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530226 unsigned idx, qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800227{
228 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
229
230 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
231 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
232 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530233 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234 } else {
235 rx_reorder_array_elem->head = head_msdu;
236 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
237 }
238 rx_reorder_array_elem->tail = tail_msdu;
239}
240
241void
242ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
243 struct ol_txrx_peer_t *peer,
244 unsigned tid, unsigned idx_start, unsigned idx_end)
245{
246 unsigned idx;
247 unsigned win_sz, win_sz_mask;
248 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530249 qdf_nbuf_t head_msdu;
250 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251
252 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
253 /* may get reset below */
254 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
255
256 win_sz = peer->tids_rx_reorder[tid].win_sz;
257 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
258 idx_start &= win_sz_mask;
259 idx_end &= win_sz_mask;
260 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
261
262 head_msdu = rx_reorder_array_elem->head;
263 tail_msdu = rx_reorder_array_elem->tail;
264 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
265 OL_RX_REORDER_PTR_CHECK(head_msdu) {
266 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
267 }
268
269 idx = (idx_start + 1);
270 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
271 while (idx != idx_end) {
272 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
273 OL_RX_REORDER_PTR_CHECK(rx_reorder_array_elem->head) {
274 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
275 1);
276 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
277 rx_reorder_array_elem);
278 tail_msdu = rx_reorder_array_elem->tail;
279 }
280 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
281 NULL;
282 idx++;
283 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
284 }
285 OL_RX_REORDER_PTR_CHECK(head_msdu) {
286 uint16_t seq_num;
287 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
288
289 /*
290 * This logic is not quite correct - the last_seq value should
291 * be the sequence number of the final MPDU released rather than
292 * the initial MPDU released.
293 * However, tracking the sequence number of the first MPDU in
294 * the released batch works well enough:
295 * For Peregrine and Rome, the last_seq is checked only for
296 * non-aggregate cases, where only one MPDU at a time is
297 * released.
298 * For Riva, Pronto, and Northstar, the last_seq is checked to
299 * filter out late-arriving rx frames, whose sequence number
300 * will be less than the first MPDU in this release batch.
301 */
302 seq_num = htt_rx_mpdu_desc_seq_num(
303 htt_pdev,
304 htt_rx_msdu_desc_retrieve(htt_pdev,
305 head_msdu));
306 peer->tids_last_seq[tid] = seq_num;
307 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530308 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
310 }
311 /*
312 * If the rx reorder timeout is handled by host SW rather than the
313 * target's rx reorder logic, then stop the timer here.
314 * (If there are remaining rx holes, then the timer will be restarted.)
315 */
316 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
317}
318
319void
320ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
321 struct ol_txrx_peer_t *peer,
322 unsigned tid,
323 unsigned idx_start,
324 unsigned idx_end, enum htt_rx_flush_action action)
325{
326 struct ol_txrx_pdev_t *pdev;
327 unsigned win_sz;
328 uint8_t win_sz_mask;
329 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530330 qdf_nbuf_t head_msdu = NULL;
331 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332
333 pdev = vdev->pdev;
334 win_sz = peer->tids_rx_reorder[tid].win_sz;
335 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
336
337 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
338 /* a idx_end value of 0xffff means to flush the entire array */
339 if (idx_end == 0xffff) {
340 idx_end = idx_start;
341 /*
342 * The array is being flushed in entirety because the block
343 * ack window has been shifted to a new position that does not
344 * overlap with the old position. (Or due to reception of a
345 * DELBA.)
346 * Thus, since the block ack window is essentially being reset,
347 * reset the "next release index".
348 */
349 peer->tids_next_rel_idx[tid] =
350 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
351 } else {
352 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
353 }
354
355 idx_start &= win_sz_mask;
356 idx_end &= win_sz_mask;
357
358 do {
359 rx_reorder_array_elem =
360 &peer->tids_rx_reorder[tid].array[idx_start];
361 idx_start = (idx_start + 1);
362 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
363
364 if (rx_reorder_array_elem->head) {
365 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
366 1);
367 if (head_msdu == NULL) {
368 head_msdu = rx_reorder_array_elem->head;
369 tail_msdu = rx_reorder_array_elem->tail;
370 rx_reorder_array_elem->head = NULL;
371 rx_reorder_array_elem->tail = NULL;
372 continue;
373 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530374 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 rx_reorder_array_elem->head);
376 tail_msdu = rx_reorder_array_elem->tail;
377 rx_reorder_array_elem->head =
378 rx_reorder_array_elem->tail = NULL;
379 }
380 } while (idx_start != idx_end);
381
382 ol_rx_defrag_waitlist_remove(peer, tid);
383
384 if (head_msdu) {
385 uint16_t seq_num;
386 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
387
388 seq_num = htt_rx_mpdu_desc_seq_num(
389 htt_pdev,
390 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
391 peer->tids_last_seq[tid] = seq_num;
392 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530393 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800394 if (action == htt_rx_flush_release) {
395 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
396 } else {
397 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530398 qdf_nbuf_t next;
399 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 htt_rx_desc_frame_free(pdev->htt_pdev,
401 head_msdu);
402 head_msdu = next;
403 } while (head_msdu);
404 }
405 }
406 /*
407 * If the rx reorder array is empty, then reset the last_seq value -
408 * it is likely that a BAR or a sequence number shift caused the
409 * sequence number to jump, so the old last_seq value is not relevant.
410 */
411 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
412 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
413
414 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
415}
416
417void
418ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
419 unsigned tid, unsigned *idx_end)
420{
421 unsigned win_sz, win_sz_mask;
422 unsigned idx_start = 0, tmp_idx = 0;
423
424 win_sz = peer->tids_rx_reorder[tid].win_sz;
425 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
426
427 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
428 tmp_idx++;
429 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
430 /* bypass the initial hole */
431 while (tmp_idx != idx_start &&
432 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
433 tmp_idx++;
434 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
435 }
436 /* bypass the present frames following the initial hole */
437 while (tmp_idx != idx_start &&
438 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
439 tmp_idx++;
440 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
441 }
442 /*
443 * idx_end is exclusive rather than inclusive.
444 * In other words, it is the index of the first slot of the second
445 * hole, rather than the index of the final present frame following
446 * the first hole.
447 */
448 *idx_end = tmp_idx;
449}
450
451void
452ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
453 struct ol_txrx_peer_t *peer)
454{
455 int tid;
456 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
457 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
458 htt_rx_flush_discard);
459 }
460 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
461}
462
463/* functions called by HTT */
464
465void
466ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
467 uint16_t peer_id,
468 uint8_t tid,
469 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
470{
471 uint8_t round_pwr2_win_sz;
472 unsigned array_size;
473 struct ol_txrx_peer_t *peer;
474 struct ol_rx_reorder_t *rx_reorder;
475
476 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
477 if (peer == NULL)
478 return;
479
480 if (pdev->cfg.host_addba) {
481 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
482 &peer->mac_addr.raw[0], tid, failed);
483 }
484 if (failed)
485 return;
486
487 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
488 rx_reorder = &peer->tids_rx_reorder[tid];
489
490 TXRX_ASSERT2(win_sz <= 64);
491 rx_reorder->win_sz = win_sz;
492 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
493 array_size =
494 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530495 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496 TXRX_ASSERT1(rx_reorder->array);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530497 qdf_mem_set(rx_reorder->array, array_size, 0x0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498
499 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
500 rx_reorder->num_mpdus = 0;
501
502 peer->tids_next_rel_idx[tid] =
503 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
504 rx_reorder->win_sz_mask);
505}
506
507void
508ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
509{
510 struct ol_txrx_peer_t *peer;
511 struct ol_rx_reorder_t *rx_reorder;
512
513 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
514 if (peer == NULL)
515 return;
516
517 peer->tids_next_rel_idx[tid] = 0xffff; /* invalid value */
518 rx_reorder = &peer->tids_rx_reorder[tid];
519
520 /* check that there really was a block ack agreement */
521 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
522 /*
523 * Deallocate the old rx reorder array.
524 * The call to ol_rx_reorder_init below
525 * will reset rx_reorder->array to point to
526 * the single-element statically-allocated reorder array
527 * used for non block-ack cases.
528 */
529 if (rx_reorder->array != &rx_reorder->base) {
530 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
531 "%s, delete reorder array, tid:%d\n", __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530532 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533 }
534
535 /* set up the TID with default parameters (ARQ window size = 1) */
536 ol_rx_reorder_init(rx_reorder, tid);
537}
538
539void
540ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
541 uint16_t peer_id,
542 uint8_t tid,
543 uint16_t idx_start,
544 uint16_t idx_end, enum htt_rx_flush_action action)
545{
546 struct ol_txrx_vdev_t *vdev = NULL;
547 void *rx_desc;
548 struct ol_txrx_peer_t *peer;
549 int idx;
550 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
551 htt_pdev_handle htt_pdev = pdev->htt_pdev;
552
553 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
554 if (peer)
555 vdev = peer->vdev;
556 else
557 return;
558
559 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
560
561 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
562 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
563 if (rx_reorder_array_elem->head) {
564 rx_desc =
565 htt_rx_msdu_desc_retrieve(htt_pdev,
566 rx_reorder_array_elem->head);
567 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
568 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
569 idx_start);
570 /*
571 * Assuming flush message sent seperately for frags
572 * and for normal frames
573 */
574 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
575 return;
576 }
577 }
578 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
579 /*
580 * If the rx reorder timeout is handled by host SW, see if there are
581 * remaining rx holes that require the timer to be restarted.
582 */
583 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
584 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
585}
586
587void
588ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
589 uint16_t peer_id,
590 uint8_t tid,
591 int seq_num_start,
592 int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
593{
594 struct ol_txrx_vdev_t *vdev = NULL;
595 void *rx_desc;
596 struct ol_txrx_peer_t *peer;
597 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
598 unsigned win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530599 qdf_nbuf_t head_msdu = NULL;
600 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800601 htt_pdev_handle htt_pdev = pdev->htt_pdev;
602 int seq_num, i = 0;
603
604 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
605
606 if (!peer) {
607 /*
608 * If we can't find a peer send this packet to OCB interface
609 * using OCB self peer
610 */
611 if (!ol_txrx_get_ocb_peer(pdev, &peer))
612 peer = NULL;
613 }
614
615 if (peer)
616 vdev = peer->vdev;
617 else
618 return;
619
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530620 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800621 /*TODO: Fragmentation case */
622 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
623 seq_num_start &= win_sz_mask;
624 seq_num_end &= win_sz_mask;
625 seq_num = seq_num_start;
626
627 do {
628 rx_reorder_array_elem =
629 &peer->tids_rx_reorder[tid].array[seq_num];
630
631 if (rx_reorder_array_elem->head) {
632 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530633 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800634 mpdu_tail;
635 static uint32_t last_pncheck_print_time;
636 /* Do not need to initialize as C does it */
637
638 int log_level;
639 uint32_t current_time_ms;
640 union htt_rx_pn_t pn = { 0 };
641 int index, pn_len;
642
643 mpdu_head = msdu = rx_reorder_array_elem->head;
644 mpdu_tail = rx_reorder_array_elem->tail;
645
646 pn_ie_cnt--;
647 i++;
648 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
649 msdu);
650 index = htt_rx_msdu_is_wlan_mcast(
651 pdev->htt_pdev, rx_desc)
652 ? txrx_sec_mcast
653 : txrx_sec_ucast;
654 pn_len = pdev->rx_pn[peer->security[index].
655 sec_type].len;
656 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
657 pn_len);
658
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530659 current_time_ms = qdf_system_ticks_to_msecs(
660 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
662 (current_time_ms -
663 last_pncheck_print_time)) {
664 last_pncheck_print_time =
665 current_time_ms;
666 log_level = TXRX_PRINT_LEVEL_WARN;
667 } else {
668 log_level = TXRX_PRINT_LEVEL_INFO2;
669 }
670 TXRX_PRINT(log_level,
671 "Tgt PN check failed - TID %d, peer %p "
672 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
673 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
674 " new seq num = %d\n",
675 tid, peer,
676 peer->mac_addr.raw[0],
677 peer->mac_addr.raw[1],
678 peer->mac_addr.raw[2],
679 peer->mac_addr.raw[3],
680 peer->mac_addr.raw[4],
681 peer->mac_addr.raw[5], pn.pn128[1],
682 pn.pn128[0],
683 pn.pn128[0] & 0xffffffffffffULL,
684 htt_rx_mpdu_desc_seq_num(htt_pdev,
685 rx_desc));
686 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
687 peer->mac_addr.raw, tid,
688 htt_rx_mpdu_desc_tsf32(htt_pdev,
689 rx_desc),
690 OL_RX_ERR_PN, mpdu_head, NULL, 0);
691
692 /* free all MSDUs within this MPDU */
693 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530694 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695 htt_rx_desc_frame_free(htt_pdev, msdu);
696 if (msdu == mpdu_tail)
697 break;
698 else
699 msdu = next_msdu;
700 } while (1);
701
702 } else {
703 if (head_msdu == NULL) {
704 head_msdu = rx_reorder_array_elem->head;
705 tail_msdu = rx_reorder_array_elem->tail;
706 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530707 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800708 tail_msdu,
709 rx_reorder_array_elem->head);
710 tail_msdu = rx_reorder_array_elem->tail;
711 }
712 }
713 rx_reorder_array_elem->head = NULL;
714 rx_reorder_array_elem->tail = NULL;
715 }
716 seq_num = (seq_num + 1) & win_sz_mask;
717 } while (seq_num != seq_num_end);
718
719 if (head_msdu) {
720 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530721 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
723 }
724}
725
726#if defined(ENABLE_RX_REORDER_TRACE)
727
728A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
729{
730 int num_elems;
731
732 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
733 pdev->rx_reorder_trace.idx = 0;
734 pdev->rx_reorder_trace.cnt = 0;
735 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530736 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800737 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
738 if (!pdev->rx_reorder_trace.data)
739 return A_NO_MEMORY;
740
741 while (--num_elems >= 0)
742 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
743
744 return A_OK;
745}
746
747void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
748{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530749 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800750}
751
752void
753ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
754 uint8_t tid,
755 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
756{
757 uint32_t idx = pdev->rx_reorder_trace.idx;
758
759 pdev->rx_reorder_trace.data[idx].tid = tid;
760 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
761 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
762 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
763 pdev->rx_reorder_trace.cnt++;
764 idx++;
765 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
766}
767
768void
769ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
770{
771 static int print_count;
772 uint32_t i, start, end;
773 uint64_t cnt;
774 int elems;
775
776 if (print_count != 0 && just_once)
777 return;
778
779 print_count++;
780
781 end = pdev->rx_reorder_trace.idx;
782 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
783 /* trace log has not yet wrapped around - start at the top */
784 start = 0;
785 cnt = 0;
786 } else {
787 start = end;
788 cnt = pdev->rx_reorder_trace.cnt -
789 (pdev->rx_reorder_trace.mask + 1);
790 }
791 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
792 if (limit > 0 && elems > limit) {
793 int delta;
794 delta = elems - limit;
795 start += delta;
796 start &= pdev->rx_reorder_trace.mask;
797 cnt += delta;
798 }
799
800 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530801 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800802 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530803 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800804 " count idx tid idx num (LSBs)");
805 do {
806 uint16_t seq_num, reorder_idx;
807 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
808 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
809 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530810 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800811 " %6lld %4d %3d %4d %4d (%d)",
812 cnt, i, pdev->rx_reorder_trace.data[i].tid,
813 reorder_idx, seq_num, seq_num & 63);
814 } else {
815 int err = TXRX_SEQ_NUM_ERR(seq_num);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530816 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800817 " %6lld %4d err %d (%d MPDUs)",
818 cnt, i, err,
819 pdev->rx_reorder_trace.data[i].num_mpdus);
820 }
821 cnt++;
822 i++;
823 i &= pdev->rx_reorder_trace.mask;
824 } while (i != end);
825}
826
827#endif /* ENABLE_RX_REORDER_TRACE */