blob: 5629fd2b7bf0c11c64b6fdd3ba24c6e6482d8af2 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar4278b692018-01-11 11:16:53 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== header file includes ===*/
20/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053021#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080024/* external interfaces */
25#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
26#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
27#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
28#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
29#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
30
31/* datapath internal interfaces */
32#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
33#include <ol_txrx_internal.h> /* TXRX_ASSERT */
34#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
35#include <ol_rx_reorder.h>
36#include <ol_rx_defrag.h>
37
38/*=== data types and defines ===*/
39#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
40
41/*=== global variables ===*/
42
43static char g_log2ceil[] = {
44 1, /* 0 -> 1 */
45 1, /* 1 -> 1 */
46 2, /* 2 -> 2 */
47 4, 4, /* 3-4 -> 4 */
48 8, 8, 8, 8, /* 5-8 -> 8 */
49 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
50 32, 32, 32, 32, 32, 32, 32, 32,
51 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
52 64, 64, 64, 64, 64, 64, 64, 64,
53 64, 64, 64, 64, 64, 64, 64, 64,
54 64, 64, 64, 64, 64, 64, 64, 64,
55 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
56};
57
58/*=== function definitions ===*/
59
60/*---*/
61
62#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
63#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
64#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
65#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
66#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
67#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
68#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
69#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
70
71/*---*/
72
73/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080074#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
75 do { \
76 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053077 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078 rx_reorder_array_elem->head); \
79 } \
80 } while (0)
81
82/* functions called by txrx components */
83
84void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
85{
86 rx_reorder->win_sz = 1;
87 rx_reorder->win_sz_mask = 0;
88 rx_reorder->array = &rx_reorder->base;
89 rx_reorder->base.head = rx_reorder->base.tail = NULL;
90 rx_reorder->tid = tid;
91 rx_reorder->defrag_timeout_ms = 0;
92
93 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
94 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
95}
96
97static enum htt_rx_status
98ol_rx_reorder_seq_num_check(
Yun Park28390e32017-04-05 12:19:26 -070099 struct ol_txrx_pdev_t *pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800100 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700101 unsigned int tid, unsigned int seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800102{
Yun Park28390e32017-04-05 12:19:26 -0700103 unsigned int seq_num_delta;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800104
105 /* don't check the new seq_num against last_seq
106 if last_seq is not valid */
107 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
108 return htt_rx_status_ok;
109
110 /*
111 * For duplicate detection, it might be helpful to also check
112 * whether the retry bit is set or not - a strict duplicate packet
113 * should be the one with retry bit set.
114 * However, since many implementations do not set the retry bit,
115 * and since this same function is also used for filtering out
116 * late-arriving frames (frames that arive after their rx reorder
117 * timeout has expired) which are not retries, don't bother checking
118 * the retry bit for now.
119 */
120 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
121 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
122 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
123
124 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
125 return htt_rx_status_err_replay;
126 /* or maybe htt_rx_status_err_dup */
127 }
128 return htt_rx_status_ok;
129}
130
131/**
132 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
133 * duplicate detection & check for out-of-order
134 * packets for unicast packets.
135 * @pdev: Pointer to pdev maintained by OL
136 * @peer: Pointer to peer structure maintained by OL
137 * @tid: TID value passed as part of HTT msg by f/w
138 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
139 *
140 * This function
141 * 1) For Multicast Frames -- does duplicate detection
142 * A frame is considered duplicate & dropped if it has a seq.number
143 * which is received twice in succession and with the retry bit set
144 * in the second case.
145 * A frame which is older than the last sequence number received
146 * is not considered duplicate but out-of-order. This function does
147 * perform out-of-order check for multicast frames, which is in
148 * keeping with the 802.11 2012 spec section 9.3.2.10
149 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
150 * only for non-aggregation tids.
151 *
152 * Return: Returns htt_rx_status_err_replay, if packet needs to be
153 * dropped, htt_rx_status_ok otherwise.
154 */
155enum htt_rx_status
156ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
157 struct ol_txrx_peer_t *peer,
158 uint8_t tid,
159 void *rx_mpdu_desc)
160{
161 uint16_t pkt_tid = 0xffff;
162 uint16_t seq_num = IEEE80211_SEQ_MAX;
163 bool retry = 0;
164
165 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
166
167 /* For mcast packets, we only the dup-detection, not re-order check */
168
Anurag Chouhanc5548422016-02-24 18:33:27 +0530169 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170
171 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
172
173 /* Invalid packet TID, expected only for HL */
174 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530175 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176 return htt_rx_status_ok;
177
178 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
179
180 /*
Yun Park28390e32017-04-05 12:19:26 -0700181 * At this point, we define frames to be duplicate if they
182 * arrive "ONLY" in succession with the same sequence number
183 * and the last one has the retry bit set. For an older frame,
184 * we consider that as an out of order frame, and hence do not
185 * perform the dup-detection or out-of-order check for multicast
186 * frames as per discussions & spec.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 * Hence "seq_num <= last_seq_num" check is not necessary.
188 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530189 if (qdf_unlikely(retry &&
Yun Park28390e32017-04-05 12:19:26 -0700190 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
191 /* drop mcast */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800192 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
193 return htt_rx_status_err_replay;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194 }
Yun Park28390e32017-04-05 12:19:26 -0700195
196 /*
197 * This is a multicast packet likely to be passed on...
198 * Set the mcast last seq number here
199 * This is fairly accurate since:
200 * a) f/w sends multicast as separate PPDU/HTT messages
201 * b) Mcast packets are not aggregated & hence single
202 * c) Result of b) is that, flush / release bit is set
203 * always on the mcast packets, so likely to be
204 * immediatedly released.
205 */
206 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
207 return htt_rx_status_ok;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208 } else
209 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
210}
211
212
213void
214ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
215 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700216 unsigned int tid,
217 unsigned int idx, qdf_nbuf_t head_msdu,
218 qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219{
220 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
221
222 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
223 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
224 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530225 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226 } else {
227 rx_reorder_array_elem->head = head_msdu;
228 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
229 }
230 rx_reorder_array_elem->tail = tail_msdu;
231}
232
233void
234ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
235 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700236 unsigned int tid, unsigned int idx_start,
237 unsigned int idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800238{
Yun Park28390e32017-04-05 12:19:26 -0700239 unsigned int idx;
240 unsigned int win_sz, win_sz_mask;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800241 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530242 qdf_nbuf_t head_msdu;
243 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800244
245 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
246 /* may get reset below */
247 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
248
249 win_sz = peer->tids_rx_reorder[tid].win_sz;
250 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
251 idx_start &= win_sz_mask;
252 idx_end &= win_sz_mask;
253 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
254
255 head_msdu = rx_reorder_array_elem->head;
256 tail_msdu = rx_reorder_array_elem->tail;
257 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530258 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800259 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260
261 idx = (idx_start + 1);
262 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
263 while (idx != idx_end) {
264 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530265 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
267 1);
268 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
269 rx_reorder_array_elem);
270 tail_msdu = rx_reorder_array_elem->tail;
271 }
272 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
273 NULL;
274 idx++;
275 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
276 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530277 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800278 uint16_t seq_num;
279 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
280
281 /*
282 * This logic is not quite correct - the last_seq value should
283 * be the sequence number of the final MPDU released rather than
284 * the initial MPDU released.
285 * However, tracking the sequence number of the first MPDU in
286 * the released batch works well enough:
287 * For Peregrine and Rome, the last_seq is checked only for
288 * non-aggregate cases, where only one MPDU at a time is
289 * released.
290 * For Riva, Pronto, and Northstar, the last_seq is checked to
291 * filter out late-arriving rx frames, whose sequence number
292 * will be less than the first MPDU in this release batch.
293 */
294 seq_num = htt_rx_mpdu_desc_seq_num(
295 htt_pdev,
296 htt_rx_msdu_desc_retrieve(htt_pdev,
297 head_msdu));
298 peer->tids_last_seq[tid] = seq_num;
299 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530300 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
302 }
303 /*
304 * If the rx reorder timeout is handled by host SW rather than the
305 * target's rx reorder logic, then stop the timer here.
306 * (If there are remaining rx holes, then the timer will be restarted.)
307 */
308 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
309}
310
311void
312ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
313 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700314 unsigned int tid,
315 unsigned int idx_start,
316 unsigned int idx_end, enum htt_rx_flush_action action)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800317{
318 struct ol_txrx_pdev_t *pdev;
Yun Park28390e32017-04-05 12:19:26 -0700319 unsigned int win_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320 uint8_t win_sz_mask;
321 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530322 qdf_nbuf_t head_msdu = NULL;
323 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324
325 pdev = vdev->pdev;
326 win_sz = peer->tids_rx_reorder[tid].win_sz;
327 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
328
329 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
330 /* a idx_end value of 0xffff means to flush the entire array */
331 if (idx_end == 0xffff) {
332 idx_end = idx_start;
333 /*
334 * The array is being flushed in entirety because the block
335 * ack window has been shifted to a new position that does not
336 * overlap with the old position. (Or due to reception of a
337 * DELBA.)
338 * Thus, since the block ack window is essentially being reset,
339 * reset the "next release index".
340 */
341 peer->tids_next_rel_idx[tid] =
342 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
343 } else {
344 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
345 }
346
347 idx_start &= win_sz_mask;
348 idx_end &= win_sz_mask;
349
350 do {
351 rx_reorder_array_elem =
352 &peer->tids_rx_reorder[tid].array[idx_start];
353 idx_start = (idx_start + 1);
354 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
355
356 if (rx_reorder_array_elem->head) {
357 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
358 1);
359 if (head_msdu == NULL) {
360 head_msdu = rx_reorder_array_elem->head;
361 tail_msdu = rx_reorder_array_elem->tail;
362 rx_reorder_array_elem->head = NULL;
363 rx_reorder_array_elem->tail = NULL;
364 continue;
365 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530366 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367 rx_reorder_array_elem->head);
368 tail_msdu = rx_reorder_array_elem->tail;
369 rx_reorder_array_elem->head =
370 rx_reorder_array_elem->tail = NULL;
371 }
372 } while (idx_start != idx_end);
373
374 ol_rx_defrag_waitlist_remove(peer, tid);
375
376 if (head_msdu) {
377 uint16_t seq_num;
378 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
379
380 seq_num = htt_rx_mpdu_desc_seq_num(
381 htt_pdev,
382 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
383 peer->tids_last_seq[tid] = seq_num;
384 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530385 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 if (action == htt_rx_flush_release) {
387 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
388 } else {
389 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530390 qdf_nbuf_t next;
Yun Park28390e32017-04-05 12:19:26 -0700391
Nirav Shahcbc6d722016-03-01 16:24:53 +0530392 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393 htt_rx_desc_frame_free(pdev->htt_pdev,
394 head_msdu);
395 head_msdu = next;
396 } while (head_msdu);
397 }
398 }
399 /*
400 * If the rx reorder array is empty, then reset the last_seq value -
401 * it is likely that a BAR or a sequence number shift caused the
402 * sequence number to jump, so the old last_seq value is not relevant.
403 */
404 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
405 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
406
407 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
408}
409
410void
411ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700412 unsigned int tid, unsigned int *idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413{
Yun Park28390e32017-04-05 12:19:26 -0700414 unsigned int win_sz, win_sz_mask;
415 unsigned int idx_start = 0, tmp_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416
417 win_sz = peer->tids_rx_reorder[tid].win_sz;
418 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
419
420 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
421 tmp_idx++;
422 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
423 /* bypass the initial hole */
424 while (tmp_idx != idx_start &&
425 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
426 tmp_idx++;
427 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
428 }
429 /* bypass the present frames following the initial hole */
430 while (tmp_idx != idx_start &&
431 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
432 tmp_idx++;
433 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
434 }
435 /*
436 * idx_end is exclusive rather than inclusive.
437 * In other words, it is the index of the first slot of the second
438 * hole, rather than the index of the final present frame following
439 * the first hole.
440 */
441 *idx_end = tmp_idx;
442}
443
lifeng74c9a6d2017-02-22 15:15:38 +0800444#ifdef HL_RX_AGGREGATION_HOLE_DETECTION
445
446/**
447 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
448 * @peer: ol_txrx_peer_t
449 * @tid: tid
450 * @idx_start: idx_start
451 *
452 * Return: void
453 */
454static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
455 uint32_t tid,
456 uint32_t idx_start)
457{
458 uint32_t win_sz_mask, next_rel_idx, hole_size;
459
Sravan Kumar Kairamadbff872018-05-09 16:38:26 +0530460 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
461 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
462 return;
463 }
464
lifeng74c9a6d2017-02-22 15:15:38 +0800465 if (peer->tids_next_rel_idx[tid] == INVALID_REORDER_INDEX)
466 return;
467
468 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
469 /* Return directly if block-ack not enable */
470 if (win_sz_mask == 0)
471 return;
472
473 idx_start &= win_sz_mask;
474 next_rel_idx = peer->tids_next_rel_idx[tid] & win_sz_mask;
475
476 if (idx_start != next_rel_idx) {
477 hole_size = ((int)idx_start - (int)next_rel_idx) & win_sz_mask;
478
479 ol_rx_aggregation_hole(hole_size);
480 }
481
482 return;
483}
484
485#else
486
487/**
488 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
489 * @peer: ol_txrx_peer_t
490 * @tid: tid
491 * @idx_start: idx_start
492 *
493 * Return: void
494 */
495static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
496 uint32_t tid,
497 uint32_t idx_start)
498{
499 /* no-op */
500}
501
502#endif
503
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504void
505ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
506 struct ol_txrx_peer_t *peer)
507{
508 int tid;
Yun Park28390e32017-04-05 12:19:26 -0700509
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800510 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
511 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
512 htt_rx_flush_discard);
513 }
514 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
515}
516
517/* functions called by HTT */
518
519void
520ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
521 uint16_t peer_id,
522 uint8_t tid,
523 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
524{
525 uint8_t round_pwr2_win_sz;
Yun Park28390e32017-04-05 12:19:26 -0700526 unsigned int array_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800527 struct ol_txrx_peer_t *peer;
528 struct ol_rx_reorder_t *rx_reorder;
529
Alok Kumar4278b692018-01-11 11:16:53 +0530530 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
531 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
532 WARN_ON(1);
533 return;
534 }
535
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800536 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
537 if (peer == NULL)
538 return;
539
540 if (pdev->cfg.host_addba) {
541 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
542 &peer->mac_addr.raw[0], tid, failed);
543 }
544 if (failed)
545 return;
546
547 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
548 rx_reorder = &peer->tids_rx_reorder[tid];
549
550 TXRX_ASSERT2(win_sz <= 64);
551 rx_reorder->win_sz = win_sz;
552 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
553 array_size =
554 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530555 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 TXRX_ASSERT1(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800557
558 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
559 rx_reorder->num_mpdus = 0;
560
561 peer->tids_next_rel_idx[tid] =
562 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
563 rx_reorder->win_sz_mask);
564}
565
566void
567ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
568{
569 struct ol_txrx_peer_t *peer;
570 struct ol_rx_reorder_t *rx_reorder;
571
Alok Kumar4278b692018-01-11 11:16:53 +0530572 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
573 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
574 WARN_ON(1);
575 return;
576 }
577
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
579 if (peer == NULL)
580 return;
581
lifeng74c9a6d2017-02-22 15:15:38 +0800582 peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 rx_reorder = &peer->tids_rx_reorder[tid];
584
585 /* check that there really was a block ack agreement */
586 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
587 /*
588 * Deallocate the old rx reorder array.
589 * The call to ol_rx_reorder_init below
590 * will reset rx_reorder->array to point to
591 * the single-element statically-allocated reorder array
592 * used for non block-ack cases.
593 */
594 if (rx_reorder->array != &rx_reorder->base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530595 ol_txrx_dbg("%s, delete reorder array, tid:%d\n",
596 __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530597 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598 }
599
600 /* set up the TID with default parameters (ARQ window size = 1) */
601 ol_rx_reorder_init(rx_reorder, tid);
602}
603
604void
605ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
606 uint16_t peer_id,
607 uint8_t tid,
608 uint16_t idx_start,
609 uint16_t idx_end, enum htt_rx_flush_action action)
610{
611 struct ol_txrx_vdev_t *vdev = NULL;
612 void *rx_desc;
613 struct ol_txrx_peer_t *peer;
614 int idx;
615 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
616 htt_pdev_handle htt_pdev = pdev->htt_pdev;
617
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800618 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
619 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
620 return;
621 }
622
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800623 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
624 if (peer)
625 vdev = peer->vdev;
626 else
627 return;
628
629 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
630
631 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
632 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
633 if (rx_reorder_array_elem->head) {
634 rx_desc =
635 htt_rx_msdu_desc_retrieve(htt_pdev,
636 rx_reorder_array_elem->head);
637 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
638 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
639 idx_start);
640 /*
Jeff Johnsonfa7d9602018-05-06 11:25:31 -0700641 * Assuming flush message sent separately for frags
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800642 * and for normal frames
643 */
644 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
645 return;
646 }
647 }
lifeng74c9a6d2017-02-22 15:15:38 +0800648
649 if (action == htt_rx_flush_release)
650 ol_rx_reorder_detect_hole(peer, tid, idx_start);
651
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800652 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
653 /*
654 * If the rx reorder timeout is handled by host SW, see if there are
655 * remaining rx holes that require the timer to be restarted.
656 */
657 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
658 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
659}
660
661void
662ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
663 uint16_t peer_id,
664 uint8_t tid,
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800665 uint16_t seq_num_start,
666 uint16_t seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800667{
668 struct ol_txrx_vdev_t *vdev = NULL;
669 void *rx_desc;
670 struct ol_txrx_peer_t *peer;
671 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Yun Park28390e32017-04-05 12:19:26 -0700672 unsigned int win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530673 qdf_nbuf_t head_msdu = NULL;
674 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800675 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800676 uint16_t seq_num;
677 int i = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800678
Alok Kumar4278b692018-01-11 11:16:53 +0530679 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
680 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
681 WARN_ON(1);
682 return;
683 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800684 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
685
686 if (!peer) {
687 /*
688 * If we can't find a peer send this packet to OCB interface
689 * using OCB self peer
690 */
691 if (!ol_txrx_get_ocb_peer(pdev, &peer))
692 peer = NULL;
693 }
694
695 if (peer)
696 vdev = peer->vdev;
697 else
698 return;
699
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530700 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 /*TODO: Fragmentation case */
702 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
703 seq_num_start &= win_sz_mask;
704 seq_num_end &= win_sz_mask;
705 seq_num = seq_num_start;
706
707 do {
708 rx_reorder_array_elem =
709 &peer->tids_rx_reorder[tid].array[seq_num];
710
711 if (rx_reorder_array_elem->head) {
712 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530713 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714 mpdu_tail;
715 static uint32_t last_pncheck_print_time;
716 /* Do not need to initialize as C does it */
717
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718 uint32_t current_time_ms;
719 union htt_rx_pn_t pn = { 0 };
720 int index, pn_len;
721
722 mpdu_head = msdu = rx_reorder_array_elem->head;
723 mpdu_tail = rx_reorder_array_elem->tail;
724
725 pn_ie_cnt--;
726 i++;
727 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
728 msdu);
729 index = htt_rx_msdu_is_wlan_mcast(
730 pdev->htt_pdev, rx_desc)
731 ? txrx_sec_mcast
732 : txrx_sec_ucast;
733 pn_len = pdev->rx_pn[peer->security[index].
734 sec_type].len;
735 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
736 pn_len);
737
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530738 current_time_ms = qdf_system_ticks_to_msecs(
739 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800740 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
741 (current_time_ms -
742 last_pncheck_print_time)) {
743 last_pncheck_print_time =
744 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530745 ol_txrx_warn(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700746 "Tgt PN check failed - TID %d, peer %pK "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
748 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
749 " new seq num = %d\n",
750 tid, peer,
751 peer->mac_addr.raw[0],
752 peer->mac_addr.raw[1],
753 peer->mac_addr.raw[2],
754 peer->mac_addr.raw[3],
755 peer->mac_addr.raw[4],
756 peer->mac_addr.raw[5], pn.pn128[1],
757 pn.pn128[0],
758 pn.pn128[0] & 0xffffffffffffULL,
759 htt_rx_mpdu_desc_seq_num(htt_pdev,
760 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530761 } else {
762 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700763 "Tgt PN check failed - TID %d, peer %pK "
Poddar, Siddarth14521792017-03-14 21:19:42 +0530764 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
765 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
766 " new seq num = %d\n",
767 tid, peer,
768 peer->mac_addr.raw[0],
769 peer->mac_addr.raw[1],
770 peer->mac_addr.raw[2],
771 peer->mac_addr.raw[3],
772 peer->mac_addr.raw[4],
773 peer->mac_addr.raw[5], pn.pn128[1],
774 pn.pn128[0],
775 pn.pn128[0] & 0xffffffffffffULL,
776 htt_rx_mpdu_desc_seq_num(htt_pdev,
777 rx_desc));
778 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800779 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
780 peer->mac_addr.raw, tid,
781 htt_rx_mpdu_desc_tsf32(htt_pdev,
782 rx_desc),
783 OL_RX_ERR_PN, mpdu_head, NULL, 0);
784
785 /* free all MSDUs within this MPDU */
786 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530787 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800788 htt_rx_desc_frame_free(htt_pdev, msdu);
789 if (msdu == mpdu_tail)
790 break;
Yun Park28390e32017-04-05 12:19:26 -0700791 msdu = next_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800792 } while (1);
793
794 } else {
795 if (head_msdu == NULL) {
796 head_msdu = rx_reorder_array_elem->head;
797 tail_msdu = rx_reorder_array_elem->tail;
798 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530799 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800 tail_msdu,
801 rx_reorder_array_elem->head);
802 tail_msdu = rx_reorder_array_elem->tail;
803 }
804 }
805 rx_reorder_array_elem->head = NULL;
806 rx_reorder_array_elem->tail = NULL;
807 }
808 seq_num = (seq_num + 1) & win_sz_mask;
809 } while (seq_num != seq_num_end);
810
811 if (head_msdu) {
812 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530813 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800814 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
815 }
816}
817
818#if defined(ENABLE_RX_REORDER_TRACE)
819
820A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
821{
822 int num_elems;
823
824 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
825 pdev->rx_reorder_trace.idx = 0;
826 pdev->rx_reorder_trace.cnt = 0;
827 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530828 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
830 if (!pdev->rx_reorder_trace.data)
831 return A_NO_MEMORY;
832
833 while (--num_elems >= 0)
834 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
835
836 return A_OK;
837}
838
839void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
840{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530841 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800842}
843
844void
845ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
846 uint8_t tid,
847 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
848{
849 uint32_t idx = pdev->rx_reorder_trace.idx;
850
851 pdev->rx_reorder_trace.data[idx].tid = tid;
852 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
853 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
854 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
855 pdev->rx_reorder_trace.cnt++;
856 idx++;
857 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
858}
859
860void
861ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
862{
863 static int print_count;
864 uint32_t i, start, end;
865 uint64_t cnt;
866 int elems;
867
868 if (print_count != 0 && just_once)
869 return;
870
871 print_count++;
872
873 end = pdev->rx_reorder_trace.idx;
874 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
875 /* trace log has not yet wrapped around - start at the top */
876 start = 0;
877 cnt = 0;
878 } else {
879 start = end;
880 cnt = pdev->rx_reorder_trace.cnt -
881 (pdev->rx_reorder_trace.mask + 1);
882 }
883 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
884 if (limit > 0 && elems > limit) {
885 int delta;
Yun Park28390e32017-04-05 12:19:26 -0700886
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800887 delta = elems - limit;
888 start += delta;
889 start &= pdev->rx_reorder_trace.mask;
890 cnt += delta;
891 }
892
893 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530894 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530896 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800897 " count idx tid idx num (LSBs)");
898 do {
899 uint16_t seq_num, reorder_idx;
Yun Park28390e32017-04-05 12:19:26 -0700900
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800901 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
902 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
903 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530904 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800905 " %6lld %4d %3d %4d %4d (%d)",
906 cnt, i, pdev->rx_reorder_trace.data[i].tid,
907 reorder_idx, seq_num, seq_num & 63);
908 } else {
909 int err = TXRX_SEQ_NUM_ERR(seq_num);
Yun Park28390e32017-04-05 12:19:26 -0700910
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530911 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800912 " %6lld %4d err %d (%d MPDUs)",
913 cnt, i, err,
914 pdev->rx_reorder_trace.data[i].num_mpdus);
915 }
916 cnt++;
917 i++;
918 i &= pdev->rx_reorder_trace.mask;
919 } while (i != end);
920}
921
922#endif /* ENABLE_RX_REORDER_TRACE */