blob: 284adf950de1ce949b2e76e70f542411c67efea1 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar4278b692018-01-11 11:16:53 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== header file includes ===*/
20/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053021#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080024/* external interfaces */
25#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
26#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
27#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
28#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
29#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
30
31/* datapath internal interfaces */
32#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
33#include <ol_txrx_internal.h> /* TXRX_ASSERT */
34#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
35#include <ol_rx_reorder.h>
36#include <ol_rx_defrag.h>
37
38/*=== data types and defines ===*/
39#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
40
41/*=== global variables ===*/
42
43static char g_log2ceil[] = {
44 1, /* 0 -> 1 */
45 1, /* 1 -> 1 */
46 2, /* 2 -> 2 */
47 4, 4, /* 3-4 -> 4 */
48 8, 8, 8, 8, /* 5-8 -> 8 */
49 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
50 32, 32, 32, 32, 32, 32, 32, 32,
51 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
52 64, 64, 64, 64, 64, 64, 64, 64,
53 64, 64, 64, 64, 64, 64, 64, 64,
54 64, 64, 64, 64, 64, 64, 64, 64,
55 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
56};
57
58/*=== function definitions ===*/
59
60/*---*/
61
62#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
63#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
64#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
65#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
66#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
67#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
68#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
69#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
70
71/*---*/
72
73/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080074#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
75 do { \
76 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053077 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078 rx_reorder_array_elem->head); \
79 } \
80 } while (0)
81
82/* functions called by txrx components */
83
84void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
85{
86 rx_reorder->win_sz = 1;
87 rx_reorder->win_sz_mask = 0;
88 rx_reorder->array = &rx_reorder->base;
89 rx_reorder->base.head = rx_reorder->base.tail = NULL;
90 rx_reorder->tid = tid;
91 rx_reorder->defrag_timeout_ms = 0;
92
93 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
94 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
95}
96
97static enum htt_rx_status
98ol_rx_reorder_seq_num_check(
Yun Park28390e32017-04-05 12:19:26 -070099 struct ol_txrx_pdev_t *pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800100 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700101 unsigned int tid, unsigned int seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800102{
Yun Park28390e32017-04-05 12:19:26 -0700103 unsigned int seq_num_delta;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800104
105 /* don't check the new seq_num against last_seq
106 if last_seq is not valid */
107 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
108 return htt_rx_status_ok;
109
110 /*
111 * For duplicate detection, it might be helpful to also check
112 * whether the retry bit is set or not - a strict duplicate packet
113 * should be the one with retry bit set.
114 * However, since many implementations do not set the retry bit,
115 * and since this same function is also used for filtering out
116 * late-arriving frames (frames that arive after their rx reorder
117 * timeout has expired) which are not retries, don't bother checking
118 * the retry bit for now.
119 */
120 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
121 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
122 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
123
124 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
125 return htt_rx_status_err_replay;
126 /* or maybe htt_rx_status_err_dup */
127 }
128 return htt_rx_status_ok;
129}
130
131/**
132 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
133 * duplicate detection & check for out-of-order
134 * packets for unicast packets.
135 * @pdev: Pointer to pdev maintained by OL
136 * @peer: Pointer to peer structure maintained by OL
137 * @tid: TID value passed as part of HTT msg by f/w
138 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
139 *
140 * This function
141 * 1) For Multicast Frames -- does duplicate detection
142 * A frame is considered duplicate & dropped if it has a seq.number
143 * which is received twice in succession and with the retry bit set
144 * in the second case.
145 * A frame which is older than the last sequence number received
146 * is not considered duplicate but out-of-order. This function does
147 * perform out-of-order check for multicast frames, which is in
148 * keeping with the 802.11 2012 spec section 9.3.2.10
149 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
150 * only for non-aggregation tids.
151 *
152 * Return: Returns htt_rx_status_err_replay, if packet needs to be
153 * dropped, htt_rx_status_ok otherwise.
154 */
155enum htt_rx_status
156ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
157 struct ol_txrx_peer_t *peer,
158 uint8_t tid,
159 void *rx_mpdu_desc)
160{
161 uint16_t pkt_tid = 0xffff;
162 uint16_t seq_num = IEEE80211_SEQ_MAX;
163 bool retry = 0;
164
165 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
166
167 /* For mcast packets, we only the dup-detection, not re-order check */
168
Anurag Chouhanc5548422016-02-24 18:33:27 +0530169 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170
171 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
172
173 /* Invalid packet TID, expected only for HL */
174 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530175 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176 return htt_rx_status_ok;
177
178 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
179
180 /*
Yun Park28390e32017-04-05 12:19:26 -0700181 * At this point, we define frames to be duplicate if they
182 * arrive "ONLY" in succession with the same sequence number
183 * and the last one has the retry bit set. For an older frame,
184 * we consider that as an out of order frame, and hence do not
185 * perform the dup-detection or out-of-order check for multicast
186 * frames as per discussions & spec.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 * Hence "seq_num <= last_seq_num" check is not necessary.
188 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530189 if (qdf_unlikely(retry &&
Yun Park28390e32017-04-05 12:19:26 -0700190 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
191 /* drop mcast */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800192 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
193 return htt_rx_status_err_replay;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194 }
Yun Park28390e32017-04-05 12:19:26 -0700195
196 /*
197 * This is a multicast packet likely to be passed on...
198 * Set the mcast last seq number here
199 * This is fairly accurate since:
200 * a) f/w sends multicast as separate PPDU/HTT messages
201 * b) Mcast packets are not aggregated & hence single
202 * c) Result of b) is that, flush / release bit is set
203 * always on the mcast packets, so likely to be
204 * immediatedly released.
205 */
206 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
207 return htt_rx_status_ok;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208 } else
209 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
210}
211
212
213void
214ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
215 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700216 unsigned int tid,
217 unsigned int idx, qdf_nbuf_t head_msdu,
218 qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219{
220 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
221
222 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
223 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
224 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530225 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226 } else {
227 rx_reorder_array_elem->head = head_msdu;
228 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
229 }
230 rx_reorder_array_elem->tail = tail_msdu;
231}
232
233void
234ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
235 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700236 unsigned int tid, unsigned int idx_start,
237 unsigned int idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800238{
Yun Park28390e32017-04-05 12:19:26 -0700239 unsigned int idx;
240 unsigned int win_sz, win_sz_mask;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800241 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530242 qdf_nbuf_t head_msdu;
243 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800244
245 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
246 /* may get reset below */
247 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
248
249 win_sz = peer->tids_rx_reorder[tid].win_sz;
250 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
251 idx_start &= win_sz_mask;
252 idx_end &= win_sz_mask;
253 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
254
255 head_msdu = rx_reorder_array_elem->head;
256 tail_msdu = rx_reorder_array_elem->tail;
257 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530258 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800259 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260
261 idx = (idx_start + 1);
262 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
263 while (idx != idx_end) {
264 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530265 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
267 1);
268 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
269 rx_reorder_array_elem);
270 tail_msdu = rx_reorder_array_elem->tail;
271 }
272 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
273 NULL;
274 idx++;
275 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
276 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530277 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800278 uint16_t seq_num;
279 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
280
281 /*
282 * This logic is not quite correct - the last_seq value should
283 * be the sequence number of the final MPDU released rather than
284 * the initial MPDU released.
285 * However, tracking the sequence number of the first MPDU in
286 * the released batch works well enough:
287 * For Peregrine and Rome, the last_seq is checked only for
288 * non-aggregate cases, where only one MPDU at a time is
289 * released.
290 * For Riva, Pronto, and Northstar, the last_seq is checked to
291 * filter out late-arriving rx frames, whose sequence number
292 * will be less than the first MPDU in this release batch.
293 */
294 seq_num = htt_rx_mpdu_desc_seq_num(
295 htt_pdev,
296 htt_rx_msdu_desc_retrieve(htt_pdev,
297 head_msdu));
298 peer->tids_last_seq[tid] = seq_num;
299 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530300 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
302 }
303 /*
304 * If the rx reorder timeout is handled by host SW rather than the
305 * target's rx reorder logic, then stop the timer here.
306 * (If there are remaining rx holes, then the timer will be restarted.)
307 */
308 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
309}
310
311void
312ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
313 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700314 unsigned int tid,
315 unsigned int idx_start,
316 unsigned int idx_end, enum htt_rx_flush_action action)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800317{
318 struct ol_txrx_pdev_t *pdev;
Yun Park28390e32017-04-05 12:19:26 -0700319 unsigned int win_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320 uint8_t win_sz_mask;
321 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530322 qdf_nbuf_t head_msdu = NULL;
323 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324
325 pdev = vdev->pdev;
326 win_sz = peer->tids_rx_reorder[tid].win_sz;
327 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
328
329 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
330 /* a idx_end value of 0xffff means to flush the entire array */
331 if (idx_end == 0xffff) {
332 idx_end = idx_start;
333 /*
334 * The array is being flushed in entirety because the block
335 * ack window has been shifted to a new position that does not
336 * overlap with the old position. (Or due to reception of a
337 * DELBA.)
338 * Thus, since the block ack window is essentially being reset,
339 * reset the "next release index".
340 */
341 peer->tids_next_rel_idx[tid] =
342 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
343 } else {
344 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
345 }
346
347 idx_start &= win_sz_mask;
348 idx_end &= win_sz_mask;
349
350 do {
351 rx_reorder_array_elem =
352 &peer->tids_rx_reorder[tid].array[idx_start];
353 idx_start = (idx_start + 1);
354 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
355
356 if (rx_reorder_array_elem->head) {
357 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
358 1);
359 if (head_msdu == NULL) {
360 head_msdu = rx_reorder_array_elem->head;
361 tail_msdu = rx_reorder_array_elem->tail;
362 rx_reorder_array_elem->head = NULL;
363 rx_reorder_array_elem->tail = NULL;
364 continue;
365 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530366 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367 rx_reorder_array_elem->head);
368 tail_msdu = rx_reorder_array_elem->tail;
369 rx_reorder_array_elem->head =
370 rx_reorder_array_elem->tail = NULL;
371 }
372 } while (idx_start != idx_end);
373
374 ol_rx_defrag_waitlist_remove(peer, tid);
375
376 if (head_msdu) {
377 uint16_t seq_num;
378 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
379
380 seq_num = htt_rx_mpdu_desc_seq_num(
381 htt_pdev,
382 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
383 peer->tids_last_seq[tid] = seq_num;
384 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530385 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 if (action == htt_rx_flush_release) {
387 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
388 } else {
389 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530390 qdf_nbuf_t next;
Yun Park28390e32017-04-05 12:19:26 -0700391
Nirav Shahcbc6d722016-03-01 16:24:53 +0530392 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393 htt_rx_desc_frame_free(pdev->htt_pdev,
394 head_msdu);
395 head_msdu = next;
396 } while (head_msdu);
397 }
398 }
399 /*
400 * If the rx reorder array is empty, then reset the last_seq value -
401 * it is likely that a BAR or a sequence number shift caused the
402 * sequence number to jump, so the old last_seq value is not relevant.
403 */
404 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
405 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
406
407 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
408}
409
410void
411ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700412 unsigned int tid, unsigned int *idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413{
Yun Park28390e32017-04-05 12:19:26 -0700414 unsigned int win_sz, win_sz_mask;
415 unsigned int idx_start = 0, tmp_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416
417 win_sz = peer->tids_rx_reorder[tid].win_sz;
418 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
419
420 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
421 tmp_idx++;
422 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
423 /* bypass the initial hole */
424 while (tmp_idx != idx_start &&
425 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
426 tmp_idx++;
427 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
428 }
429 /* bypass the present frames following the initial hole */
430 while (tmp_idx != idx_start &&
431 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
432 tmp_idx++;
433 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
434 }
435 /*
436 * idx_end is exclusive rather than inclusive.
437 * In other words, it is the index of the first slot of the second
438 * hole, rather than the index of the final present frame following
439 * the first hole.
440 */
441 *idx_end = tmp_idx;
442}
443
lifeng74c9a6d2017-02-22 15:15:38 +0800444#ifdef HL_RX_AGGREGATION_HOLE_DETECTION
445
446/**
447 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
448 * @peer: ol_txrx_peer_t
449 * @tid: tid
450 * @idx_start: idx_start
451 *
452 * Return: void
453 */
454static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
455 uint32_t tid,
456 uint32_t idx_start)
457{
458 uint32_t win_sz_mask, next_rel_idx, hole_size;
459
460 if (peer->tids_next_rel_idx[tid] == INVALID_REORDER_INDEX)
461 return;
462
463 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
464 /* Return directly if block-ack not enable */
465 if (win_sz_mask == 0)
466 return;
467
468 idx_start &= win_sz_mask;
469 next_rel_idx = peer->tids_next_rel_idx[tid] & win_sz_mask;
470
471 if (idx_start != next_rel_idx) {
472 hole_size = ((int)idx_start - (int)next_rel_idx) & win_sz_mask;
473
474 ol_rx_aggregation_hole(hole_size);
475 }
476
477 return;
478}
479
480#else
481
482/**
483 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
484 * @peer: ol_txrx_peer_t
485 * @tid: tid
486 * @idx_start: idx_start
487 *
488 * Return: void
489 */
490static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
491 uint32_t tid,
492 uint32_t idx_start)
493{
494 /* no-op */
495}
496
497#endif
498
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499void
500ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
501 struct ol_txrx_peer_t *peer)
502{
503 int tid;
Yun Park28390e32017-04-05 12:19:26 -0700504
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800505 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
506 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
507 htt_rx_flush_discard);
508 }
509 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
510}
511
512/* functions called by HTT */
513
514void
515ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
516 uint16_t peer_id,
517 uint8_t tid,
518 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
519{
520 uint8_t round_pwr2_win_sz;
Yun Park28390e32017-04-05 12:19:26 -0700521 unsigned int array_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522 struct ol_txrx_peer_t *peer;
523 struct ol_rx_reorder_t *rx_reorder;
524
Alok Kumar4278b692018-01-11 11:16:53 +0530525 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
526 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
527 WARN_ON(1);
528 return;
529 }
530
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
532 if (peer == NULL)
533 return;
534
535 if (pdev->cfg.host_addba) {
536 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
537 &peer->mac_addr.raw[0], tid, failed);
538 }
539 if (failed)
540 return;
541
542 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
543 rx_reorder = &peer->tids_rx_reorder[tid];
544
545 TXRX_ASSERT2(win_sz <= 64);
546 rx_reorder->win_sz = win_sz;
547 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
548 array_size =
549 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530550 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800551 TXRX_ASSERT1(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800552
553 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
554 rx_reorder->num_mpdus = 0;
555
556 peer->tids_next_rel_idx[tid] =
557 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
558 rx_reorder->win_sz_mask);
559}
560
561void
562ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
563{
564 struct ol_txrx_peer_t *peer;
565 struct ol_rx_reorder_t *rx_reorder;
566
Alok Kumar4278b692018-01-11 11:16:53 +0530567 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
568 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
569 WARN_ON(1);
570 return;
571 }
572
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
574 if (peer == NULL)
575 return;
576
lifeng74c9a6d2017-02-22 15:15:38 +0800577 peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 rx_reorder = &peer->tids_rx_reorder[tid];
579
580 /* check that there really was a block ack agreement */
581 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
582 /*
583 * Deallocate the old rx reorder array.
584 * The call to ol_rx_reorder_init below
585 * will reset rx_reorder->array to point to
586 * the single-element statically-allocated reorder array
587 * used for non block-ack cases.
588 */
589 if (rx_reorder->array != &rx_reorder->base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530590 ol_txrx_dbg("%s, delete reorder array, tid:%d\n",
591 __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530592 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800593 }
594
595 /* set up the TID with default parameters (ARQ window size = 1) */
596 ol_rx_reorder_init(rx_reorder, tid);
597}
598
599void
600ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
601 uint16_t peer_id,
602 uint8_t tid,
603 uint16_t idx_start,
604 uint16_t idx_end, enum htt_rx_flush_action action)
605{
606 struct ol_txrx_vdev_t *vdev = NULL;
607 void *rx_desc;
608 struct ol_txrx_peer_t *peer;
609 int idx;
610 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
611 htt_pdev_handle htt_pdev = pdev->htt_pdev;
612
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800613 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
614 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
615 return;
616 }
617
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800618 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
619 if (peer)
620 vdev = peer->vdev;
621 else
622 return;
623
624 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
625
626 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
627 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
628 if (rx_reorder_array_elem->head) {
629 rx_desc =
630 htt_rx_msdu_desc_retrieve(htt_pdev,
631 rx_reorder_array_elem->head);
632 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
633 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
634 idx_start);
635 /*
Jeff Johnsonfa7d9602018-05-06 11:25:31 -0700636 * Assuming flush message sent separately for frags
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637 * and for normal frames
638 */
639 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
640 return;
641 }
642 }
lifeng74c9a6d2017-02-22 15:15:38 +0800643
644 if (action == htt_rx_flush_release)
645 ol_rx_reorder_detect_hole(peer, tid, idx_start);
646
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800647 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
648 /*
649 * If the rx reorder timeout is handled by host SW, see if there are
650 * remaining rx holes that require the timer to be restarted.
651 */
652 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
653 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
654}
655
656void
657ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
658 uint16_t peer_id,
659 uint8_t tid,
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800660 uint16_t seq_num_start,
661 uint16_t seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800662{
663 struct ol_txrx_vdev_t *vdev = NULL;
664 void *rx_desc;
665 struct ol_txrx_peer_t *peer;
666 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Yun Park28390e32017-04-05 12:19:26 -0700667 unsigned int win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530668 qdf_nbuf_t head_msdu = NULL;
669 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800670 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800671 uint16_t seq_num;
672 int i = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800673
Alok Kumar4278b692018-01-11 11:16:53 +0530674 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
675 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
676 WARN_ON(1);
677 return;
678 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
680
681 if (!peer) {
682 /*
683 * If we can't find a peer send this packet to OCB interface
684 * using OCB self peer
685 */
686 if (!ol_txrx_get_ocb_peer(pdev, &peer))
687 peer = NULL;
688 }
689
690 if (peer)
691 vdev = peer->vdev;
692 else
693 return;
694
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530695 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800696 /*TODO: Fragmentation case */
697 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
698 seq_num_start &= win_sz_mask;
699 seq_num_end &= win_sz_mask;
700 seq_num = seq_num_start;
701
702 do {
703 rx_reorder_array_elem =
704 &peer->tids_rx_reorder[tid].array[seq_num];
705
706 if (rx_reorder_array_elem->head) {
707 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530708 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709 mpdu_tail;
710 static uint32_t last_pncheck_print_time;
711 /* Do not need to initialize as C does it */
712
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800713 uint32_t current_time_ms;
714 union htt_rx_pn_t pn = { 0 };
715 int index, pn_len;
716
717 mpdu_head = msdu = rx_reorder_array_elem->head;
718 mpdu_tail = rx_reorder_array_elem->tail;
719
720 pn_ie_cnt--;
721 i++;
722 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
723 msdu);
724 index = htt_rx_msdu_is_wlan_mcast(
725 pdev->htt_pdev, rx_desc)
726 ? txrx_sec_mcast
727 : txrx_sec_ucast;
728 pn_len = pdev->rx_pn[peer->security[index].
729 sec_type].len;
730 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
731 pn_len);
732
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530733 current_time_ms = qdf_system_ticks_to_msecs(
734 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
736 (current_time_ms -
737 last_pncheck_print_time)) {
738 last_pncheck_print_time =
739 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530740 ol_txrx_warn(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700741 "Tgt PN check failed - TID %d, peer %pK "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800742 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
743 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
744 " new seq num = %d\n",
745 tid, peer,
746 peer->mac_addr.raw[0],
747 peer->mac_addr.raw[1],
748 peer->mac_addr.raw[2],
749 peer->mac_addr.raw[3],
750 peer->mac_addr.raw[4],
751 peer->mac_addr.raw[5], pn.pn128[1],
752 pn.pn128[0],
753 pn.pn128[0] & 0xffffffffffffULL,
754 htt_rx_mpdu_desc_seq_num(htt_pdev,
755 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530756 } else {
757 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700758 "Tgt PN check failed - TID %d, peer %pK "
Poddar, Siddarth14521792017-03-14 21:19:42 +0530759 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
760 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
761 " new seq num = %d\n",
762 tid, peer,
763 peer->mac_addr.raw[0],
764 peer->mac_addr.raw[1],
765 peer->mac_addr.raw[2],
766 peer->mac_addr.raw[3],
767 peer->mac_addr.raw[4],
768 peer->mac_addr.raw[5], pn.pn128[1],
769 pn.pn128[0],
770 pn.pn128[0] & 0xffffffffffffULL,
771 htt_rx_mpdu_desc_seq_num(htt_pdev,
772 rx_desc));
773 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800774 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
775 peer->mac_addr.raw, tid,
776 htt_rx_mpdu_desc_tsf32(htt_pdev,
777 rx_desc),
778 OL_RX_ERR_PN, mpdu_head, NULL, 0);
779
780 /* free all MSDUs within this MPDU */
781 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530782 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800783 htt_rx_desc_frame_free(htt_pdev, msdu);
784 if (msdu == mpdu_tail)
785 break;
Yun Park28390e32017-04-05 12:19:26 -0700786 msdu = next_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787 } while (1);
788
789 } else {
790 if (head_msdu == NULL) {
791 head_msdu = rx_reorder_array_elem->head;
792 tail_msdu = rx_reorder_array_elem->tail;
793 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530794 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800795 tail_msdu,
796 rx_reorder_array_elem->head);
797 tail_msdu = rx_reorder_array_elem->tail;
798 }
799 }
800 rx_reorder_array_elem->head = NULL;
801 rx_reorder_array_elem->tail = NULL;
802 }
803 seq_num = (seq_num + 1) & win_sz_mask;
804 } while (seq_num != seq_num_end);
805
806 if (head_msdu) {
807 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530808 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800809 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
810 }
811}
812
813#if defined(ENABLE_RX_REORDER_TRACE)
814
815A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
816{
817 int num_elems;
818
819 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
820 pdev->rx_reorder_trace.idx = 0;
821 pdev->rx_reorder_trace.cnt = 0;
822 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530823 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800824 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
825 if (!pdev->rx_reorder_trace.data)
826 return A_NO_MEMORY;
827
828 while (--num_elems >= 0)
829 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
830
831 return A_OK;
832}
833
834void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
835{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530836 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800837}
838
839void
840ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
841 uint8_t tid,
842 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
843{
844 uint32_t idx = pdev->rx_reorder_trace.idx;
845
846 pdev->rx_reorder_trace.data[idx].tid = tid;
847 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
848 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
849 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
850 pdev->rx_reorder_trace.cnt++;
851 idx++;
852 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
853}
854
855void
856ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
857{
858 static int print_count;
859 uint32_t i, start, end;
860 uint64_t cnt;
861 int elems;
862
863 if (print_count != 0 && just_once)
864 return;
865
866 print_count++;
867
868 end = pdev->rx_reorder_trace.idx;
869 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
870 /* trace log has not yet wrapped around - start at the top */
871 start = 0;
872 cnt = 0;
873 } else {
874 start = end;
875 cnt = pdev->rx_reorder_trace.cnt -
876 (pdev->rx_reorder_trace.mask + 1);
877 }
878 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
879 if (limit > 0 && elems > limit) {
880 int delta;
Yun Park28390e32017-04-05 12:19:26 -0700881
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882 delta = elems - limit;
883 start += delta;
884 start &= pdev->rx_reorder_trace.mask;
885 cnt += delta;
886 }
887
888 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530889 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800890 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530891 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 " count idx tid idx num (LSBs)");
893 do {
894 uint16_t seq_num, reorder_idx;
Yun Park28390e32017-04-05 12:19:26 -0700895
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800896 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
897 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
898 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530899 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800900 " %6lld %4d %3d %4d %4d (%d)",
901 cnt, i, pdev->rx_reorder_trace.data[i].tid,
902 reorder_idx, seq_num, seq_num & 63);
903 } else {
904 int err = TXRX_SEQ_NUM_ERR(seq_num);
Yun Park28390e32017-04-05 12:19:26 -0700905
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530906 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800907 " %6lld %4d err %d (%d MPDUs)",
908 cnt, i, err,
909 pdev->rx_reorder_trace.data[i].num_mpdus);
910 }
911 cnt++;
912 i++;
913 i &= pdev->rx_reorder_trace.mask;
914 } while (i != end);
915}
916
917#endif /* ENABLE_RX_REORDER_TRACE */