blob: 2572ccdc34d0b2d86ef085e14f7baeead6bcccfa [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan50220ce2016-02-18 20:11:33 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== header file includes ===*/
29/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33#include <ieee80211.h> /* IEEE80211_SEQ_MAX */
34
35/* external interfaces */
36#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
37#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
38#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
39#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
40#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
41
42/* datapath internal interfaces */
43#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
44#include <ol_txrx_internal.h> /* TXRX_ASSERT */
45#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
46#include <ol_rx_reorder.h>
47#include <ol_rx_defrag.h>
48
49/*=== data types and defines ===*/
50#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
51
52/*=== global variables ===*/
53
54static char g_log2ceil[] = {
55 1, /* 0 -> 1 */
56 1, /* 1 -> 1 */
57 2, /* 2 -> 2 */
58 4, 4, /* 3-4 -> 4 */
59 8, 8, 8, 8, /* 5-8 -> 8 */
60 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
61 32, 32, 32, 32, 32, 32, 32, 32,
62 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
63 64, 64, 64, 64, 64, 64, 64, 64,
64 64, 64, 64, 64, 64, 64, 64, 64,
65 64, 64, 64, 64, 64, 64, 64, 64,
66 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
67};
68
69/*=== function definitions ===*/
70
71/*---*/
72
73#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
74#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
75#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
76#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
77#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
78#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
79#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
80#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
81
82/*---*/
83
84/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
86 do { \
87 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053088 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089 rx_reorder_array_elem->head); \
90 } \
91 } while (0)
92
93/* functions called by txrx components */
94
95void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
96{
97 rx_reorder->win_sz = 1;
98 rx_reorder->win_sz_mask = 0;
99 rx_reorder->array = &rx_reorder->base;
100 rx_reorder->base.head = rx_reorder->base.tail = NULL;
101 rx_reorder->tid = tid;
102 rx_reorder->defrag_timeout_ms = 0;
103
104 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
105 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
106}
107
108static enum htt_rx_status
109ol_rx_reorder_seq_num_check(
110 struct ol_txrx_pdev_t *pdev,
111 struct ol_txrx_peer_t *peer,
112 unsigned tid, unsigned seq_num)
113{
114 unsigned seq_num_delta;
115
116 /* don't check the new seq_num against last_seq
117 if last_seq is not valid */
118 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
119 return htt_rx_status_ok;
120
121 /*
122 * For duplicate detection, it might be helpful to also check
123 * whether the retry bit is set or not - a strict duplicate packet
124 * should be the one with retry bit set.
125 * However, since many implementations do not set the retry bit,
126 * and since this same function is also used for filtering out
127 * late-arriving frames (frames that arive after their rx reorder
128 * timeout has expired) which are not retries, don't bother checking
129 * the retry bit for now.
130 */
131 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
132 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
133 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
134
135 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
136 return htt_rx_status_err_replay;
137 /* or maybe htt_rx_status_err_dup */
138 }
139 return htt_rx_status_ok;
140}
141
142/**
143 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
144 * duplicate detection & check for out-of-order
145 * packets for unicast packets.
146 * @pdev: Pointer to pdev maintained by OL
147 * @peer: Pointer to peer structure maintained by OL
148 * @tid: TID value passed as part of HTT msg by f/w
149 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
150 *
151 * This function
152 * 1) For Multicast Frames -- does duplicate detection
153 * A frame is considered duplicate & dropped if it has a seq.number
154 * which is received twice in succession and with the retry bit set
155 * in the second case.
156 * A frame which is older than the last sequence number received
157 * is not considered duplicate but out-of-order. This function does
158 * perform out-of-order check for multicast frames, which is in
159 * keeping with the 802.11 2012 spec section 9.3.2.10
160 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
161 * only for non-aggregation tids.
162 *
163 * Return: Returns htt_rx_status_err_replay, if packet needs to be
164 * dropped, htt_rx_status_ok otherwise.
165 */
166enum htt_rx_status
167ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
168 struct ol_txrx_peer_t *peer,
169 uint8_t tid,
170 void *rx_mpdu_desc)
171{
172 uint16_t pkt_tid = 0xffff;
173 uint16_t seq_num = IEEE80211_SEQ_MAX;
174 bool retry = 0;
175
176 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
177
178 /* For mcast packets, we only the dup-detection, not re-order check */
179
Anurag Chouhanc5548422016-02-24 18:33:27 +0530180 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800181
182 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
183
184 /* Invalid packet TID, expected only for HL */
185 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530186 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return htt_rx_status_ok;
188
189 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
190
191 /*
192 * At this point, we define frames to be duplicate if they arrive
193 * "ONLY" in succession with the same sequence number and the last
194 * one has the retry bit set. For an older frame, we consider that
195 * as an out of order frame, and hence do not perform the dup-detection
196 * or out-of-order check for multicast frames as per discussions & spec
197 * Hence "seq_num <= last_seq_num" check is not necessary.
198 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530199 if (qdf_unlikely(retry &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {/* drop mcast */
201 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
202 return htt_rx_status_err_replay;
203 } else {
204 /*
205 * This is a multicast packet likely to be passed on...
206 * Set the mcast last seq number here
207 * This is fairly accurate since:
208 * a) f/w sends multicast as separate PPDU/HTT messages
209 * b) Mcast packets are not aggregated & hence single
210 * c) Result of b) is that, flush / release bit is set always
211 * on the mcast packets, so likely to be immediatedly released.
212 */
213 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
214 return htt_rx_status_ok;
215 }
216 } else
217 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
218}
219
220
221void
222ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
223 struct ol_txrx_peer_t *peer,
224 unsigned tid,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530225 unsigned idx, qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226{
227 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
228
229 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
230 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
231 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530232 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233 } else {
234 rx_reorder_array_elem->head = head_msdu;
235 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
236 }
237 rx_reorder_array_elem->tail = tail_msdu;
238}
239
240void
241ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
242 struct ol_txrx_peer_t *peer,
243 unsigned tid, unsigned idx_start, unsigned idx_end)
244{
245 unsigned idx;
246 unsigned win_sz, win_sz_mask;
247 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530248 qdf_nbuf_t head_msdu;
249 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800250
251 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
252 /* may get reset below */
253 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
254
255 win_sz = peer->tids_rx_reorder[tid].win_sz;
256 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
257 idx_start &= win_sz_mask;
258 idx_end &= win_sz_mask;
259 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
260
261 head_msdu = rx_reorder_array_elem->head;
262 tail_msdu = rx_reorder_array_elem->tail;
263 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530264 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800265 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266
267 idx = (idx_start + 1);
268 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
269 while (idx != idx_end) {
270 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530271 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
273 1);
274 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
275 rx_reorder_array_elem);
276 tail_msdu = rx_reorder_array_elem->tail;
277 }
278 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
279 NULL;
280 idx++;
281 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
282 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530283 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 uint16_t seq_num;
285 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
286
287 /*
288 * This logic is not quite correct - the last_seq value should
289 * be the sequence number of the final MPDU released rather than
290 * the initial MPDU released.
291 * However, tracking the sequence number of the first MPDU in
292 * the released batch works well enough:
293 * For Peregrine and Rome, the last_seq is checked only for
294 * non-aggregate cases, where only one MPDU at a time is
295 * released.
296 * For Riva, Pronto, and Northstar, the last_seq is checked to
297 * filter out late-arriving rx frames, whose sequence number
298 * will be less than the first MPDU in this release batch.
299 */
300 seq_num = htt_rx_mpdu_desc_seq_num(
301 htt_pdev,
302 htt_rx_msdu_desc_retrieve(htt_pdev,
303 head_msdu));
304 peer->tids_last_seq[tid] = seq_num;
305 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530306 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
308 }
309 /*
310 * If the rx reorder timeout is handled by host SW rather than the
311 * target's rx reorder logic, then stop the timer here.
312 * (If there are remaining rx holes, then the timer will be restarted.)
313 */
314 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
315}
316
317void
318ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
319 struct ol_txrx_peer_t *peer,
320 unsigned tid,
321 unsigned idx_start,
322 unsigned idx_end, enum htt_rx_flush_action action)
323{
324 struct ol_txrx_pdev_t *pdev;
325 unsigned win_sz;
326 uint8_t win_sz_mask;
327 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530328 qdf_nbuf_t head_msdu = NULL;
329 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330
331 pdev = vdev->pdev;
332 win_sz = peer->tids_rx_reorder[tid].win_sz;
333 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
334
335 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
336 /* a idx_end value of 0xffff means to flush the entire array */
337 if (idx_end == 0xffff) {
338 idx_end = idx_start;
339 /*
340 * The array is being flushed in entirety because the block
341 * ack window has been shifted to a new position that does not
342 * overlap with the old position. (Or due to reception of a
343 * DELBA.)
344 * Thus, since the block ack window is essentially being reset,
345 * reset the "next release index".
346 */
347 peer->tids_next_rel_idx[tid] =
348 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
349 } else {
350 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
351 }
352
353 idx_start &= win_sz_mask;
354 idx_end &= win_sz_mask;
355
356 do {
357 rx_reorder_array_elem =
358 &peer->tids_rx_reorder[tid].array[idx_start];
359 idx_start = (idx_start + 1);
360 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
361
362 if (rx_reorder_array_elem->head) {
363 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
364 1);
365 if (head_msdu == NULL) {
366 head_msdu = rx_reorder_array_elem->head;
367 tail_msdu = rx_reorder_array_elem->tail;
368 rx_reorder_array_elem->head = NULL;
369 rx_reorder_array_elem->tail = NULL;
370 continue;
371 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530372 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 rx_reorder_array_elem->head);
374 tail_msdu = rx_reorder_array_elem->tail;
375 rx_reorder_array_elem->head =
376 rx_reorder_array_elem->tail = NULL;
377 }
378 } while (idx_start != idx_end);
379
380 ol_rx_defrag_waitlist_remove(peer, tid);
381
382 if (head_msdu) {
383 uint16_t seq_num;
384 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
385
386 seq_num = htt_rx_mpdu_desc_seq_num(
387 htt_pdev,
388 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
389 peer->tids_last_seq[tid] = seq_num;
390 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530391 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800392 if (action == htt_rx_flush_release) {
393 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
394 } else {
395 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530396 qdf_nbuf_t next;
397 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398 htt_rx_desc_frame_free(pdev->htt_pdev,
399 head_msdu);
400 head_msdu = next;
401 } while (head_msdu);
402 }
403 }
404 /*
405 * If the rx reorder array is empty, then reset the last_seq value -
406 * it is likely that a BAR or a sequence number shift caused the
407 * sequence number to jump, so the old last_seq value is not relevant.
408 */
409 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
410 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
411
412 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
413}
414
415void
416ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
417 unsigned tid, unsigned *idx_end)
418{
419 unsigned win_sz, win_sz_mask;
420 unsigned idx_start = 0, tmp_idx = 0;
421
422 win_sz = peer->tids_rx_reorder[tid].win_sz;
423 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
424
425 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
426 tmp_idx++;
427 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
428 /* bypass the initial hole */
429 while (tmp_idx != idx_start &&
430 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
431 tmp_idx++;
432 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
433 }
434 /* bypass the present frames following the initial hole */
435 while (tmp_idx != idx_start &&
436 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
437 tmp_idx++;
438 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
439 }
440 /*
441 * idx_end is exclusive rather than inclusive.
442 * In other words, it is the index of the first slot of the second
443 * hole, rather than the index of the final present frame following
444 * the first hole.
445 */
446 *idx_end = tmp_idx;
447}
448
449void
450ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
451 struct ol_txrx_peer_t *peer)
452{
453 int tid;
454 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
455 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
456 htt_rx_flush_discard);
457 }
458 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
459}
460
461/* functions called by HTT */
462
463void
464ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
465 uint16_t peer_id,
466 uint8_t tid,
467 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
468{
469 uint8_t round_pwr2_win_sz;
470 unsigned array_size;
471 struct ol_txrx_peer_t *peer;
472 struct ol_rx_reorder_t *rx_reorder;
473
474 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
475 if (peer == NULL)
476 return;
477
478 if (pdev->cfg.host_addba) {
479 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
480 &peer->mac_addr.raw[0], tid, failed);
481 }
482 if (failed)
483 return;
484
485 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
486 rx_reorder = &peer->tids_rx_reorder[tid];
487
488 TXRX_ASSERT2(win_sz <= 64);
489 rx_reorder->win_sz = win_sz;
490 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
491 array_size =
492 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530493 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800494 TXRX_ASSERT1(rx_reorder->array);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530495 qdf_mem_set(rx_reorder->array, array_size, 0x0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496
497 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
498 rx_reorder->num_mpdus = 0;
499
500 peer->tids_next_rel_idx[tid] =
501 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
502 rx_reorder->win_sz_mask);
503}
504
505void
506ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
507{
508 struct ol_txrx_peer_t *peer;
509 struct ol_rx_reorder_t *rx_reorder;
510
511 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
512 if (peer == NULL)
513 return;
514
515 peer->tids_next_rel_idx[tid] = 0xffff; /* invalid value */
516 rx_reorder = &peer->tids_rx_reorder[tid];
517
518 /* check that there really was a block ack agreement */
519 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
520 /*
521 * Deallocate the old rx reorder array.
522 * The call to ol_rx_reorder_init below
523 * will reset rx_reorder->array to point to
524 * the single-element statically-allocated reorder array
525 * used for non block-ack cases.
526 */
527 if (rx_reorder->array != &rx_reorder->base) {
528 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
529 "%s, delete reorder array, tid:%d\n", __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530530 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 }
532
533 /* set up the TID with default parameters (ARQ window size = 1) */
534 ol_rx_reorder_init(rx_reorder, tid);
535}
536
537void
538ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
539 uint16_t peer_id,
540 uint8_t tid,
541 uint16_t idx_start,
542 uint16_t idx_end, enum htt_rx_flush_action action)
543{
544 struct ol_txrx_vdev_t *vdev = NULL;
545 void *rx_desc;
546 struct ol_txrx_peer_t *peer;
547 int idx;
548 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
549 htt_pdev_handle htt_pdev = pdev->htt_pdev;
550
551 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
552 if (peer)
553 vdev = peer->vdev;
554 else
555 return;
556
557 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
558
559 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
560 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
561 if (rx_reorder_array_elem->head) {
562 rx_desc =
563 htt_rx_msdu_desc_retrieve(htt_pdev,
564 rx_reorder_array_elem->head);
565 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
566 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
567 idx_start);
568 /*
569 * Assuming flush message sent seperately for frags
570 * and for normal frames
571 */
572 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
573 return;
574 }
575 }
576 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
577 /*
578 * If the rx reorder timeout is handled by host SW, see if there are
579 * remaining rx holes that require the timer to be restarted.
580 */
581 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
582 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
583}
584
585void
586ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
587 uint16_t peer_id,
588 uint8_t tid,
589 int seq_num_start,
590 int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
591{
592 struct ol_txrx_vdev_t *vdev = NULL;
593 void *rx_desc;
594 struct ol_txrx_peer_t *peer;
595 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
596 unsigned win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530597 qdf_nbuf_t head_msdu = NULL;
598 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800599 htt_pdev_handle htt_pdev = pdev->htt_pdev;
600 int seq_num, i = 0;
601
602 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
603
604 if (!peer) {
605 /*
606 * If we can't find a peer send this packet to OCB interface
607 * using OCB self peer
608 */
609 if (!ol_txrx_get_ocb_peer(pdev, &peer))
610 peer = NULL;
611 }
612
613 if (peer)
614 vdev = peer->vdev;
615 else
616 return;
617
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530618 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800619 /*TODO: Fragmentation case */
620 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
621 seq_num_start &= win_sz_mask;
622 seq_num_end &= win_sz_mask;
623 seq_num = seq_num_start;
624
625 do {
626 rx_reorder_array_elem =
627 &peer->tids_rx_reorder[tid].array[seq_num];
628
629 if (rx_reorder_array_elem->head) {
630 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530631 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800632 mpdu_tail;
633 static uint32_t last_pncheck_print_time;
634 /* Do not need to initialize as C does it */
635
636 int log_level;
637 uint32_t current_time_ms;
638 union htt_rx_pn_t pn = { 0 };
639 int index, pn_len;
640
641 mpdu_head = msdu = rx_reorder_array_elem->head;
642 mpdu_tail = rx_reorder_array_elem->tail;
643
644 pn_ie_cnt--;
645 i++;
646 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
647 msdu);
648 index = htt_rx_msdu_is_wlan_mcast(
649 pdev->htt_pdev, rx_desc)
650 ? txrx_sec_mcast
651 : txrx_sec_ucast;
652 pn_len = pdev->rx_pn[peer->security[index].
653 sec_type].len;
654 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
655 pn_len);
656
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530657 current_time_ms = qdf_system_ticks_to_msecs(
658 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800659 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
660 (current_time_ms -
661 last_pncheck_print_time)) {
662 last_pncheck_print_time =
663 current_time_ms;
664 log_level = TXRX_PRINT_LEVEL_WARN;
665 } else {
666 log_level = TXRX_PRINT_LEVEL_INFO2;
667 }
668 TXRX_PRINT(log_level,
669 "Tgt PN check failed - TID %d, peer %p "
670 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
671 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
672 " new seq num = %d\n",
673 tid, peer,
674 peer->mac_addr.raw[0],
675 peer->mac_addr.raw[1],
676 peer->mac_addr.raw[2],
677 peer->mac_addr.raw[3],
678 peer->mac_addr.raw[4],
679 peer->mac_addr.raw[5], pn.pn128[1],
680 pn.pn128[0],
681 pn.pn128[0] & 0xffffffffffffULL,
682 htt_rx_mpdu_desc_seq_num(htt_pdev,
683 rx_desc));
684 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
685 peer->mac_addr.raw, tid,
686 htt_rx_mpdu_desc_tsf32(htt_pdev,
687 rx_desc),
688 OL_RX_ERR_PN, mpdu_head, NULL, 0);
689
690 /* free all MSDUs within this MPDU */
691 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530692 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693 htt_rx_desc_frame_free(htt_pdev, msdu);
694 if (msdu == mpdu_tail)
695 break;
696 else
697 msdu = next_msdu;
698 } while (1);
699
700 } else {
701 if (head_msdu == NULL) {
702 head_msdu = rx_reorder_array_elem->head;
703 tail_msdu = rx_reorder_array_elem->tail;
704 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530705 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706 tail_msdu,
707 rx_reorder_array_elem->head);
708 tail_msdu = rx_reorder_array_elem->tail;
709 }
710 }
711 rx_reorder_array_elem->head = NULL;
712 rx_reorder_array_elem->tail = NULL;
713 }
714 seq_num = (seq_num + 1) & win_sz_mask;
715 } while (seq_num != seq_num_end);
716
717 if (head_msdu) {
718 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530719 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
721 }
722}
723
724#if defined(ENABLE_RX_REORDER_TRACE)
725
726A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
727{
728 int num_elems;
729
730 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
731 pdev->rx_reorder_trace.idx = 0;
732 pdev->rx_reorder_trace.cnt = 0;
733 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530734 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
736 if (!pdev->rx_reorder_trace.data)
737 return A_NO_MEMORY;
738
739 while (--num_elems >= 0)
740 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
741
742 return A_OK;
743}
744
745void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
746{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530747 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800748}
749
750void
751ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
752 uint8_t tid,
753 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
754{
755 uint32_t idx = pdev->rx_reorder_trace.idx;
756
757 pdev->rx_reorder_trace.data[idx].tid = tid;
758 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
759 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
760 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
761 pdev->rx_reorder_trace.cnt++;
762 idx++;
763 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
764}
765
766void
767ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
768{
769 static int print_count;
770 uint32_t i, start, end;
771 uint64_t cnt;
772 int elems;
773
774 if (print_count != 0 && just_once)
775 return;
776
777 print_count++;
778
779 end = pdev->rx_reorder_trace.idx;
780 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
781 /* trace log has not yet wrapped around - start at the top */
782 start = 0;
783 cnt = 0;
784 } else {
785 start = end;
786 cnt = pdev->rx_reorder_trace.cnt -
787 (pdev->rx_reorder_trace.mask + 1);
788 }
789 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
790 if (limit > 0 && elems > limit) {
791 int delta;
792 delta = elems - limit;
793 start += delta;
794 start &= pdev->rx_reorder_trace.mask;
795 cnt += delta;
796 }
797
798 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530799 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530801 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800802 " count idx tid idx num (LSBs)");
803 do {
804 uint16_t seq_num, reorder_idx;
805 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
806 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
807 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530808 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800809 " %6lld %4d %3d %4d %4d (%d)",
810 cnt, i, pdev->rx_reorder_trace.data[i].tid,
811 reorder_idx, seq_num, seq_num & 63);
812 } else {
813 int err = TXRX_SEQ_NUM_ERR(seq_num);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530814 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800815 " %6lld %4d err %d (%d MPDUs)",
816 cnt, i, err,
817 pdev->rx_reorder_trace.data[i].num_mpdus);
818 }
819 cnt++;
820 i++;
821 i &= pdev->rx_reorder_trace.mask;
822 } while (i != end);
823}
824
825#endif /* ENABLE_RX_REORDER_TRACE */