blob: c4c9e446548e1caaeade5adb8f552200b2085e4c [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarth14521792017-03-14 21:19:42 +05302 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== header file includes ===*/
29/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33#include <ieee80211.h> /* IEEE80211_SEQ_MAX */
34
35/* external interfaces */
36#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
37#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
38#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
39#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
40#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
41
42/* datapath internal interfaces */
43#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
44#include <ol_txrx_internal.h> /* TXRX_ASSERT */
45#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
46#include <ol_rx_reorder.h>
47#include <ol_rx_defrag.h>
48
49/*=== data types and defines ===*/
50#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
51
52/*=== global variables ===*/
53
54static char g_log2ceil[] = {
55 1, /* 0 -> 1 */
56 1, /* 1 -> 1 */
57 2, /* 2 -> 2 */
58 4, 4, /* 3-4 -> 4 */
59 8, 8, 8, 8, /* 5-8 -> 8 */
60 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
61 32, 32, 32, 32, 32, 32, 32, 32,
62 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
63 64, 64, 64, 64, 64, 64, 64, 64,
64 64, 64, 64, 64, 64, 64, 64, 64,
65 64, 64, 64, 64, 64, 64, 64, 64,
66 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
67};
68
69/*=== function definitions ===*/
70
71/*---*/
72
73#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
74#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
75#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
76#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
77#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
78#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
79#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
80#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
81
82/*---*/
83
84/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
86 do { \
87 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053088 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089 rx_reorder_array_elem->head); \
90 } \
91 } while (0)
92
93/* functions called by txrx components */
94
95void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
96{
97 rx_reorder->win_sz = 1;
98 rx_reorder->win_sz_mask = 0;
99 rx_reorder->array = &rx_reorder->base;
100 rx_reorder->base.head = rx_reorder->base.tail = NULL;
101 rx_reorder->tid = tid;
102 rx_reorder->defrag_timeout_ms = 0;
103
104 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
105 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
106}
107
108static enum htt_rx_status
109ol_rx_reorder_seq_num_check(
110 struct ol_txrx_pdev_t *pdev,
111 struct ol_txrx_peer_t *peer,
112 unsigned tid, unsigned seq_num)
113{
114 unsigned seq_num_delta;
115
116 /* don't check the new seq_num against last_seq
117 if last_seq is not valid */
118 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
119 return htt_rx_status_ok;
120
121 /*
122 * For duplicate detection, it might be helpful to also check
123 * whether the retry bit is set or not - a strict duplicate packet
124 * should be the one with retry bit set.
125 * However, since many implementations do not set the retry bit,
126 * and since this same function is also used for filtering out
127 * late-arriving frames (frames that arive after their rx reorder
128 * timeout has expired) which are not retries, don't bother checking
129 * the retry bit for now.
130 */
131 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
132 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
133 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
134
135 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
136 return htt_rx_status_err_replay;
137 /* or maybe htt_rx_status_err_dup */
138 }
139 return htt_rx_status_ok;
140}
141
142/**
143 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
144 * duplicate detection & check for out-of-order
145 * packets for unicast packets.
146 * @pdev: Pointer to pdev maintained by OL
147 * @peer: Pointer to peer structure maintained by OL
148 * @tid: TID value passed as part of HTT msg by f/w
149 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
150 *
151 * This function
152 * 1) For Multicast Frames -- does duplicate detection
153 * A frame is considered duplicate & dropped if it has a seq.number
154 * which is received twice in succession and with the retry bit set
155 * in the second case.
156 * A frame which is older than the last sequence number received
157 * is not considered duplicate but out-of-order. This function does
158 * perform out-of-order check for multicast frames, which is in
159 * keeping with the 802.11 2012 spec section 9.3.2.10
160 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
161 * only for non-aggregation tids.
162 *
163 * Return: Returns htt_rx_status_err_replay, if packet needs to be
164 * dropped, htt_rx_status_ok otherwise.
165 */
166enum htt_rx_status
167ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
168 struct ol_txrx_peer_t *peer,
169 uint8_t tid,
170 void *rx_mpdu_desc)
171{
172 uint16_t pkt_tid = 0xffff;
173 uint16_t seq_num = IEEE80211_SEQ_MAX;
174 bool retry = 0;
175
176 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
177
178 /* For mcast packets, we only the dup-detection, not re-order check */
179
Anurag Chouhanc5548422016-02-24 18:33:27 +0530180 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800181
182 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
183
184 /* Invalid packet TID, expected only for HL */
185 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530186 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return htt_rx_status_ok;
188
189 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
190
191 /*
192 * At this point, we define frames to be duplicate if they arrive
193 * "ONLY" in succession with the same sequence number and the last
194 * one has the retry bit set. For an older frame, we consider that
195 * as an out of order frame, and hence do not perform the dup-detection
196 * or out-of-order check for multicast frames as per discussions & spec
197 * Hence "seq_num <= last_seq_num" check is not necessary.
198 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530199 if (qdf_unlikely(retry &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {/* drop mcast */
201 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
202 return htt_rx_status_err_replay;
203 } else {
204 /*
205 * This is a multicast packet likely to be passed on...
206 * Set the mcast last seq number here
207 * This is fairly accurate since:
208 * a) f/w sends multicast as separate PPDU/HTT messages
209 * b) Mcast packets are not aggregated & hence single
210 * c) Result of b) is that, flush / release bit is set always
211 * on the mcast packets, so likely to be immediatedly released.
212 */
213 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
214 return htt_rx_status_ok;
215 }
216 } else
217 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
218}
219
220
221void
222ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
223 struct ol_txrx_peer_t *peer,
224 unsigned tid,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530225 unsigned idx, qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226{
227 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
228
229 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
230 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
231 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530232 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233 } else {
234 rx_reorder_array_elem->head = head_msdu;
235 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
236 }
237 rx_reorder_array_elem->tail = tail_msdu;
238}
239
240void
241ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
242 struct ol_txrx_peer_t *peer,
243 unsigned tid, unsigned idx_start, unsigned idx_end)
244{
245 unsigned idx;
246 unsigned win_sz, win_sz_mask;
247 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530248 qdf_nbuf_t head_msdu;
249 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800250
251 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
252 /* may get reset below */
253 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
254
255 win_sz = peer->tids_rx_reorder[tid].win_sz;
256 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
257 idx_start &= win_sz_mask;
258 idx_end &= win_sz_mask;
259 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
260
261 head_msdu = rx_reorder_array_elem->head;
262 tail_msdu = rx_reorder_array_elem->tail;
263 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530264 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800265 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266
267 idx = (idx_start + 1);
268 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
269 while (idx != idx_end) {
270 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530271 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
273 1);
274 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
275 rx_reorder_array_elem);
276 tail_msdu = rx_reorder_array_elem->tail;
277 }
278 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
279 NULL;
280 idx++;
281 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
282 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530283 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 uint16_t seq_num;
285 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
286
287 /*
288 * This logic is not quite correct - the last_seq value should
289 * be the sequence number of the final MPDU released rather than
290 * the initial MPDU released.
291 * However, tracking the sequence number of the first MPDU in
292 * the released batch works well enough:
293 * For Peregrine and Rome, the last_seq is checked only for
294 * non-aggregate cases, where only one MPDU at a time is
295 * released.
296 * For Riva, Pronto, and Northstar, the last_seq is checked to
297 * filter out late-arriving rx frames, whose sequence number
298 * will be less than the first MPDU in this release batch.
299 */
300 seq_num = htt_rx_mpdu_desc_seq_num(
301 htt_pdev,
302 htt_rx_msdu_desc_retrieve(htt_pdev,
303 head_msdu));
304 peer->tids_last_seq[tid] = seq_num;
305 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530306 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
308 }
309 /*
310 * If the rx reorder timeout is handled by host SW rather than the
311 * target's rx reorder logic, then stop the timer here.
312 * (If there are remaining rx holes, then the timer will be restarted.)
313 */
314 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
315}
316
317void
318ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
319 struct ol_txrx_peer_t *peer,
320 unsigned tid,
321 unsigned idx_start,
322 unsigned idx_end, enum htt_rx_flush_action action)
323{
324 struct ol_txrx_pdev_t *pdev;
325 unsigned win_sz;
326 uint8_t win_sz_mask;
327 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530328 qdf_nbuf_t head_msdu = NULL;
329 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330
331 pdev = vdev->pdev;
332 win_sz = peer->tids_rx_reorder[tid].win_sz;
333 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
334
335 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
336 /* a idx_end value of 0xffff means to flush the entire array */
337 if (idx_end == 0xffff) {
338 idx_end = idx_start;
339 /*
340 * The array is being flushed in entirety because the block
341 * ack window has been shifted to a new position that does not
342 * overlap with the old position. (Or due to reception of a
343 * DELBA.)
344 * Thus, since the block ack window is essentially being reset,
345 * reset the "next release index".
346 */
347 peer->tids_next_rel_idx[tid] =
348 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
349 } else {
350 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
351 }
352
353 idx_start &= win_sz_mask;
354 idx_end &= win_sz_mask;
355
356 do {
357 rx_reorder_array_elem =
358 &peer->tids_rx_reorder[tid].array[idx_start];
359 idx_start = (idx_start + 1);
360 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
361
362 if (rx_reorder_array_elem->head) {
363 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
364 1);
365 if (head_msdu == NULL) {
366 head_msdu = rx_reorder_array_elem->head;
367 tail_msdu = rx_reorder_array_elem->tail;
368 rx_reorder_array_elem->head = NULL;
369 rx_reorder_array_elem->tail = NULL;
370 continue;
371 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530372 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 rx_reorder_array_elem->head);
374 tail_msdu = rx_reorder_array_elem->tail;
375 rx_reorder_array_elem->head =
376 rx_reorder_array_elem->tail = NULL;
377 }
378 } while (idx_start != idx_end);
379
380 ol_rx_defrag_waitlist_remove(peer, tid);
381
382 if (head_msdu) {
383 uint16_t seq_num;
384 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
385
386 seq_num = htt_rx_mpdu_desc_seq_num(
387 htt_pdev,
388 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
389 peer->tids_last_seq[tid] = seq_num;
390 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530391 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800392 if (action == htt_rx_flush_release) {
393 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
394 } else {
395 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530396 qdf_nbuf_t next;
397 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398 htt_rx_desc_frame_free(pdev->htt_pdev,
399 head_msdu);
400 head_msdu = next;
401 } while (head_msdu);
402 }
403 }
404 /*
405 * If the rx reorder array is empty, then reset the last_seq value -
406 * it is likely that a BAR or a sequence number shift caused the
407 * sequence number to jump, so the old last_seq value is not relevant.
408 */
409 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
410 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
411
412 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
413}
414
415void
416ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
417 unsigned tid, unsigned *idx_end)
418{
419 unsigned win_sz, win_sz_mask;
420 unsigned idx_start = 0, tmp_idx = 0;
421
422 win_sz = peer->tids_rx_reorder[tid].win_sz;
423 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
424
425 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
426 tmp_idx++;
427 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
428 /* bypass the initial hole */
429 while (tmp_idx != idx_start &&
430 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
431 tmp_idx++;
432 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
433 }
434 /* bypass the present frames following the initial hole */
435 while (tmp_idx != idx_start &&
436 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
437 tmp_idx++;
438 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
439 }
440 /*
441 * idx_end is exclusive rather than inclusive.
442 * In other words, it is the index of the first slot of the second
443 * hole, rather than the index of the final present frame following
444 * the first hole.
445 */
446 *idx_end = tmp_idx;
447}
448
449void
450ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
451 struct ol_txrx_peer_t *peer)
452{
453 int tid;
454 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
455 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
456 htt_rx_flush_discard);
457 }
458 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
459}
460
461/* functions called by HTT */
462
463void
464ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
465 uint16_t peer_id,
466 uint8_t tid,
467 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
468{
469 uint8_t round_pwr2_win_sz;
470 unsigned array_size;
471 struct ol_txrx_peer_t *peer;
472 struct ol_rx_reorder_t *rx_reorder;
473
474 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
475 if (peer == NULL)
476 return;
477
478 if (pdev->cfg.host_addba) {
479 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
480 &peer->mac_addr.raw[0], tid, failed);
481 }
482 if (failed)
483 return;
484
485 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
486 rx_reorder = &peer->tids_rx_reorder[tid];
487
488 TXRX_ASSERT2(win_sz <= 64);
489 rx_reorder->win_sz = win_sz;
490 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
491 array_size =
492 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530493 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800494 TXRX_ASSERT1(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495
496 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
497 rx_reorder->num_mpdus = 0;
498
499 peer->tids_next_rel_idx[tid] =
500 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
501 rx_reorder->win_sz_mask);
502}
503
504void
505ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
506{
507 struct ol_txrx_peer_t *peer;
508 struct ol_rx_reorder_t *rx_reorder;
509
510 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
511 if (peer == NULL)
512 return;
513
514 peer->tids_next_rel_idx[tid] = 0xffff; /* invalid value */
515 rx_reorder = &peer->tids_rx_reorder[tid];
516
517 /* check that there really was a block ack agreement */
518 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
519 /*
520 * Deallocate the old rx reorder array.
521 * The call to ol_rx_reorder_init below
522 * will reset rx_reorder->array to point to
523 * the single-element statically-allocated reorder array
524 * used for non block-ack cases.
525 */
526 if (rx_reorder->array != &rx_reorder->base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530527 ol_txrx_dbg("%s, delete reorder array, tid:%d\n",
528 __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530529 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800530 }
531
532 /* set up the TID with default parameters (ARQ window size = 1) */
533 ol_rx_reorder_init(rx_reorder, tid);
534}
535
536void
537ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
538 uint16_t peer_id,
539 uint8_t tid,
540 uint16_t idx_start,
541 uint16_t idx_end, enum htt_rx_flush_action action)
542{
543 struct ol_txrx_vdev_t *vdev = NULL;
544 void *rx_desc;
545 struct ol_txrx_peer_t *peer;
546 int idx;
547 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
548 htt_pdev_handle htt_pdev = pdev->htt_pdev;
549
550 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
551 if (peer)
552 vdev = peer->vdev;
553 else
554 return;
555
556 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
557
558 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
559 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
560 if (rx_reorder_array_elem->head) {
561 rx_desc =
562 htt_rx_msdu_desc_retrieve(htt_pdev,
563 rx_reorder_array_elem->head);
564 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
565 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
566 idx_start);
567 /*
568 * Assuming flush message sent seperately for frags
569 * and for normal frames
570 */
571 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
572 return;
573 }
574 }
575 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
576 /*
577 * If the rx reorder timeout is handled by host SW, see if there are
578 * remaining rx holes that require the timer to be restarted.
579 */
580 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
581 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
582}
583
584void
585ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
586 uint16_t peer_id,
587 uint8_t tid,
588 int seq_num_start,
589 int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
590{
591 struct ol_txrx_vdev_t *vdev = NULL;
592 void *rx_desc;
593 struct ol_txrx_peer_t *peer;
594 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
595 unsigned win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530596 qdf_nbuf_t head_msdu = NULL;
597 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598 htt_pdev_handle htt_pdev = pdev->htt_pdev;
599 int seq_num, i = 0;
600
601 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
602
603 if (!peer) {
604 /*
605 * If we can't find a peer send this packet to OCB interface
606 * using OCB self peer
607 */
608 if (!ol_txrx_get_ocb_peer(pdev, &peer))
609 peer = NULL;
610 }
611
612 if (peer)
613 vdev = peer->vdev;
614 else
615 return;
616
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530617 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800618 /*TODO: Fragmentation case */
619 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
620 seq_num_start &= win_sz_mask;
621 seq_num_end &= win_sz_mask;
622 seq_num = seq_num_start;
623
624 do {
625 rx_reorder_array_elem =
626 &peer->tids_rx_reorder[tid].array[seq_num];
627
628 if (rx_reorder_array_elem->head) {
629 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530630 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631 mpdu_tail;
632 static uint32_t last_pncheck_print_time;
633 /* Do not need to initialize as C does it */
634
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800635 uint32_t current_time_ms;
636 union htt_rx_pn_t pn = { 0 };
637 int index, pn_len;
638
639 mpdu_head = msdu = rx_reorder_array_elem->head;
640 mpdu_tail = rx_reorder_array_elem->tail;
641
642 pn_ie_cnt--;
643 i++;
644 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
645 msdu);
646 index = htt_rx_msdu_is_wlan_mcast(
647 pdev->htt_pdev, rx_desc)
648 ? txrx_sec_mcast
649 : txrx_sec_ucast;
650 pn_len = pdev->rx_pn[peer->security[index].
651 sec_type].len;
652 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
653 pn_len);
654
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530655 current_time_ms = qdf_system_ticks_to_msecs(
656 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800657 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
658 (current_time_ms -
659 last_pncheck_print_time)) {
660 last_pncheck_print_time =
661 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530662 ol_txrx_warn(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800663 "Tgt PN check failed - TID %d, peer %p "
664 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
665 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
666 " new seq num = %d\n",
667 tid, peer,
668 peer->mac_addr.raw[0],
669 peer->mac_addr.raw[1],
670 peer->mac_addr.raw[2],
671 peer->mac_addr.raw[3],
672 peer->mac_addr.raw[4],
673 peer->mac_addr.raw[5], pn.pn128[1],
674 pn.pn128[0],
675 pn.pn128[0] & 0xffffffffffffULL,
676 htt_rx_mpdu_desc_seq_num(htt_pdev,
677 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530678 } else {
679 ol_txrx_dbg(
680 "Tgt PN check failed - TID %d, peer %p "
681 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
682 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
683 " new seq num = %d\n",
684 tid, peer,
685 peer->mac_addr.raw[0],
686 peer->mac_addr.raw[1],
687 peer->mac_addr.raw[2],
688 peer->mac_addr.raw[3],
689 peer->mac_addr.raw[4],
690 peer->mac_addr.raw[5], pn.pn128[1],
691 pn.pn128[0],
692 pn.pn128[0] & 0xffffffffffffULL,
693 htt_rx_mpdu_desc_seq_num(htt_pdev,
694 rx_desc));
695 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800696 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
697 peer->mac_addr.raw, tid,
698 htt_rx_mpdu_desc_tsf32(htt_pdev,
699 rx_desc),
700 OL_RX_ERR_PN, mpdu_head, NULL, 0);
701
702 /* free all MSDUs within this MPDU */
703 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530704 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800705 htt_rx_desc_frame_free(htt_pdev, msdu);
706 if (msdu == mpdu_tail)
707 break;
708 else
709 msdu = next_msdu;
710 } while (1);
711
712 } else {
713 if (head_msdu == NULL) {
714 head_msdu = rx_reorder_array_elem->head;
715 tail_msdu = rx_reorder_array_elem->tail;
716 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530717 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718 tail_msdu,
719 rx_reorder_array_elem->head);
720 tail_msdu = rx_reorder_array_elem->tail;
721 }
722 }
723 rx_reorder_array_elem->head = NULL;
724 rx_reorder_array_elem->tail = NULL;
725 }
726 seq_num = (seq_num + 1) & win_sz_mask;
727 } while (seq_num != seq_num_end);
728
729 if (head_msdu) {
730 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530731 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800732 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
733 }
734}
735
736#if defined(ENABLE_RX_REORDER_TRACE)
737
738A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
739{
740 int num_elems;
741
742 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
743 pdev->rx_reorder_trace.idx = 0;
744 pdev->rx_reorder_trace.cnt = 0;
745 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530746 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
748 if (!pdev->rx_reorder_trace.data)
749 return A_NO_MEMORY;
750
751 while (--num_elems >= 0)
752 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
753
754 return A_OK;
755}
756
757void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
758{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530759 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800760}
761
762void
763ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
764 uint8_t tid,
765 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
766{
767 uint32_t idx = pdev->rx_reorder_trace.idx;
768
769 pdev->rx_reorder_trace.data[idx].tid = tid;
770 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
771 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
772 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
773 pdev->rx_reorder_trace.cnt++;
774 idx++;
775 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
776}
777
778void
779ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
780{
781 static int print_count;
782 uint32_t i, start, end;
783 uint64_t cnt;
784 int elems;
785
786 if (print_count != 0 && just_once)
787 return;
788
789 print_count++;
790
791 end = pdev->rx_reorder_trace.idx;
792 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
793 /* trace log has not yet wrapped around - start at the top */
794 start = 0;
795 cnt = 0;
796 } else {
797 start = end;
798 cnt = pdev->rx_reorder_trace.cnt -
799 (pdev->rx_reorder_trace.mask + 1);
800 }
801 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
802 if (limit > 0 && elems > limit) {
803 int delta;
804 delta = elems - limit;
805 start += delta;
806 start &= pdev->rx_reorder_trace.mask;
807 cnt += delta;
808 }
809
810 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530811 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800812 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530813 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800814 " count idx tid idx num (LSBs)");
815 do {
816 uint16_t seq_num, reorder_idx;
817 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
818 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
819 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530820 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821 " %6lld %4d %3d %4d %4d (%d)",
822 cnt, i, pdev->rx_reorder_trace.data[i].tid,
823 reorder_idx, seq_num, seq_num & 63);
824 } else {
825 int err = TXRX_SEQ_NUM_ERR(seq_num);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530826 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800827 " %6lld %4d err %d (%d MPDUs)",
828 cnt, i, err,
829 pdev->rx_reorder_trace.data[i].num_mpdus);
830 }
831 cnt++;
832 i++;
833 i &= pdev->rx_reorder_trace.mask;
834 } while (i != end);
835}
836
837#endif /* ENABLE_RX_REORDER_TRACE */