blob: 6335a9ef34f7710931032caa63d19483844b4d1c [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar4278b692018-01-11 11:16:53 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== header file includes ===*/
20/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053021#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080024/* external interfaces */
25#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
26#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
27#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
28#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
29#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
30
31/* datapath internal interfaces */
32#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
33#include <ol_txrx_internal.h> /* TXRX_ASSERT */
34#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
35#include <ol_rx_reorder.h>
36#include <ol_rx_defrag.h>
37
38/*=== data types and defines ===*/
Alok Kumarb8919e12018-05-23 17:55:54 +053039
40/*---*/
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041
42/*=== global variables ===*/
43
Alok Kumarb8919e12018-05-23 17:55:54 +053044/*---*/
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045
46/*=== function definitions ===*/
47
48/*---*/
49
50#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
51#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
52#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
53#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
54#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
55#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
56#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
57#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
58
59/*---*/
60
61/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
63 do { \
64 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053065 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080066 rx_reorder_array_elem->head); \
67 } \
68 } while (0)
69
70/* functions called by txrx components */
71
72void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
73{
74 rx_reorder->win_sz = 1;
75 rx_reorder->win_sz_mask = 0;
76 rx_reorder->array = &rx_reorder->base;
77 rx_reorder->base.head = rx_reorder->base.tail = NULL;
78 rx_reorder->tid = tid;
79 rx_reorder->defrag_timeout_ms = 0;
80
81 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
82 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
83}
84
85static enum htt_rx_status
86ol_rx_reorder_seq_num_check(
Yun Park28390e32017-04-05 12:19:26 -070087 struct ol_txrx_pdev_t *pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -070089 unsigned int tid, unsigned int seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080090{
Yun Park28390e32017-04-05 12:19:26 -070091 unsigned int seq_num_delta;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080092
93 /* don't check the new seq_num against last_seq
94 if last_seq is not valid */
95 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
96 return htt_rx_status_ok;
97
98 /*
99 * For duplicate detection, it might be helpful to also check
100 * whether the retry bit is set or not - a strict duplicate packet
101 * should be the one with retry bit set.
102 * However, since many implementations do not set the retry bit,
103 * and since this same function is also used for filtering out
104 * late-arriving frames (frames that arive after their rx reorder
105 * timeout has expired) which are not retries, don't bother checking
106 * the retry bit for now.
107 */
108 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
109 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
110 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
111
112 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
113 return htt_rx_status_err_replay;
114 /* or maybe htt_rx_status_err_dup */
115 }
116 return htt_rx_status_ok;
117}
118
119/**
120 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
121 * duplicate detection & check for out-of-order
122 * packets for unicast packets.
123 * @pdev: Pointer to pdev maintained by OL
124 * @peer: Pointer to peer structure maintained by OL
125 * @tid: TID value passed as part of HTT msg by f/w
126 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
127 *
128 * This function
129 * 1) For Multicast Frames -- does duplicate detection
130 * A frame is considered duplicate & dropped if it has a seq.number
131 * which is received twice in succession and with the retry bit set
132 * in the second case.
133 * A frame which is older than the last sequence number received
134 * is not considered duplicate but out-of-order. This function does
135 * perform out-of-order check for multicast frames, which is in
136 * keeping with the 802.11 2012 spec section 9.3.2.10
137 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
138 * only for non-aggregation tids.
139 *
140 * Return: Returns htt_rx_status_err_replay, if packet needs to be
141 * dropped, htt_rx_status_ok otherwise.
142 */
143enum htt_rx_status
144ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
145 struct ol_txrx_peer_t *peer,
146 uint8_t tid,
147 void *rx_mpdu_desc)
148{
149 uint16_t pkt_tid = 0xffff;
150 uint16_t seq_num = IEEE80211_SEQ_MAX;
151 bool retry = 0;
152
153 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
154
155 /* For mcast packets, we only the dup-detection, not re-order check */
156
Anurag Chouhanc5548422016-02-24 18:33:27 +0530157 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158
159 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
160
161 /* Invalid packet TID, expected only for HL */
162 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530163 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 return htt_rx_status_ok;
165
166 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
167
168 /*
Yun Park28390e32017-04-05 12:19:26 -0700169 * At this point, we define frames to be duplicate if they
170 * arrive "ONLY" in succession with the same sequence number
171 * and the last one has the retry bit set. For an older frame,
172 * we consider that as an out of order frame, and hence do not
173 * perform the dup-detection or out-of-order check for multicast
174 * frames as per discussions & spec.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 * Hence "seq_num <= last_seq_num" check is not necessary.
176 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530177 if (qdf_unlikely(retry &&
Yun Park28390e32017-04-05 12:19:26 -0700178 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
179 /* drop mcast */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
181 return htt_rx_status_err_replay;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800182 }
Yun Park28390e32017-04-05 12:19:26 -0700183
184 /*
185 * This is a multicast packet likely to be passed on...
186 * Set the mcast last seq number here
187 * This is fairly accurate since:
188 * a) f/w sends multicast as separate PPDU/HTT messages
189 * b) Mcast packets are not aggregated & hence single
190 * c) Result of b) is that, flush / release bit is set
191 * always on the mcast packets, so likely to be
192 * immediatedly released.
193 */
194 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
195 return htt_rx_status_ok;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800196 } else
197 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
198}
199
200
201void
202ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
203 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700204 unsigned int tid,
205 unsigned int idx, qdf_nbuf_t head_msdu,
206 qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207{
208 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
209
210 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
211 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
212 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530213 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214 } else {
215 rx_reorder_array_elem->head = head_msdu;
216 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
217 }
218 rx_reorder_array_elem->tail = tail_msdu;
219}
220
221void
222ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
223 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700224 unsigned int tid, unsigned int idx_start,
225 unsigned int idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226{
Yun Park28390e32017-04-05 12:19:26 -0700227 unsigned int idx;
228 unsigned int win_sz, win_sz_mask;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800229 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530230 qdf_nbuf_t head_msdu;
231 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232
233 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
234 /* may get reset below */
235 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
236
237 win_sz = peer->tids_rx_reorder[tid].win_sz;
238 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
239 idx_start &= win_sz_mask;
240 idx_end &= win_sz_mask;
241 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
242
243 head_msdu = rx_reorder_array_elem->head;
244 tail_msdu = rx_reorder_array_elem->tail;
245 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530246 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800247 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248
249 idx = (idx_start + 1);
250 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
251 while (idx != idx_end) {
252 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530253 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800254 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
255 1);
256 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
257 rx_reorder_array_elem);
258 tail_msdu = rx_reorder_array_elem->tail;
259 }
260 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
261 NULL;
262 idx++;
263 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
264 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530265 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266 uint16_t seq_num;
267 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
268
269 /*
270 * This logic is not quite correct - the last_seq value should
271 * be the sequence number of the final MPDU released rather than
272 * the initial MPDU released.
273 * However, tracking the sequence number of the first MPDU in
274 * the released batch works well enough:
275 * For Peregrine and Rome, the last_seq is checked only for
276 * non-aggregate cases, where only one MPDU at a time is
277 * released.
278 * For Riva, Pronto, and Northstar, the last_seq is checked to
279 * filter out late-arriving rx frames, whose sequence number
280 * will be less than the first MPDU in this release batch.
281 */
282 seq_num = htt_rx_mpdu_desc_seq_num(
283 htt_pdev,
284 htt_rx_msdu_desc_retrieve(htt_pdev,
285 head_msdu));
286 peer->tids_last_seq[tid] = seq_num;
287 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530288 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
290 }
291 /*
292 * If the rx reorder timeout is handled by host SW rather than the
293 * target's rx reorder logic, then stop the timer here.
294 * (If there are remaining rx holes, then the timer will be restarted.)
295 */
296 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
297}
298
299void
300ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
301 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700302 unsigned int tid,
303 unsigned int idx_start,
304 unsigned int idx_end, enum htt_rx_flush_action action)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800305{
306 struct ol_txrx_pdev_t *pdev;
Yun Park28390e32017-04-05 12:19:26 -0700307 unsigned int win_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800308 uint8_t win_sz_mask;
309 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530310 qdf_nbuf_t head_msdu = NULL;
311 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312
313 pdev = vdev->pdev;
314 win_sz = peer->tids_rx_reorder[tid].win_sz;
315 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
316
317 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
318 /* a idx_end value of 0xffff means to flush the entire array */
319 if (idx_end == 0xffff) {
320 idx_end = idx_start;
321 /*
322 * The array is being flushed in entirety because the block
323 * ack window has been shifted to a new position that does not
324 * overlap with the old position. (Or due to reception of a
325 * DELBA.)
326 * Thus, since the block ack window is essentially being reset,
327 * reset the "next release index".
328 */
329 peer->tids_next_rel_idx[tid] =
330 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
331 } else {
332 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
333 }
334
335 idx_start &= win_sz_mask;
336 idx_end &= win_sz_mask;
337
338 do {
339 rx_reorder_array_elem =
340 &peer->tids_rx_reorder[tid].array[idx_start];
341 idx_start = (idx_start + 1);
342 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
343
344 if (rx_reorder_array_elem->head) {
345 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
346 1);
347 if (head_msdu == NULL) {
348 head_msdu = rx_reorder_array_elem->head;
349 tail_msdu = rx_reorder_array_elem->tail;
350 rx_reorder_array_elem->head = NULL;
351 rx_reorder_array_elem->tail = NULL;
352 continue;
353 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530354 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355 rx_reorder_array_elem->head);
356 tail_msdu = rx_reorder_array_elem->tail;
357 rx_reorder_array_elem->head =
358 rx_reorder_array_elem->tail = NULL;
359 }
360 } while (idx_start != idx_end);
361
362 ol_rx_defrag_waitlist_remove(peer, tid);
363
364 if (head_msdu) {
365 uint16_t seq_num;
366 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
367
368 seq_num = htt_rx_mpdu_desc_seq_num(
369 htt_pdev,
370 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
371 peer->tids_last_seq[tid] = seq_num;
372 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530373 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374 if (action == htt_rx_flush_release) {
375 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
376 } else {
377 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530378 qdf_nbuf_t next;
Yun Park28390e32017-04-05 12:19:26 -0700379
Nirav Shahcbc6d722016-03-01 16:24:53 +0530380 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800381 htt_rx_desc_frame_free(pdev->htt_pdev,
382 head_msdu);
383 head_msdu = next;
384 } while (head_msdu);
385 }
386 }
387 /*
388 * If the rx reorder array is empty, then reset the last_seq value -
389 * it is likely that a BAR or a sequence number shift caused the
390 * sequence number to jump, so the old last_seq value is not relevant.
391 */
392 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
393 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
394
395 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
396}
397
398void
399ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700400 unsigned int tid, unsigned int *idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401{
Yun Park28390e32017-04-05 12:19:26 -0700402 unsigned int win_sz, win_sz_mask;
403 unsigned int idx_start = 0, tmp_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404
405 win_sz = peer->tids_rx_reorder[tid].win_sz;
406 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
407
408 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
409 tmp_idx++;
410 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
411 /* bypass the initial hole */
412 while (tmp_idx != idx_start &&
413 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
414 tmp_idx++;
415 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
416 }
417 /* bypass the present frames following the initial hole */
418 while (tmp_idx != idx_start &&
419 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
420 tmp_idx++;
421 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
422 }
423 /*
424 * idx_end is exclusive rather than inclusive.
425 * In other words, it is the index of the first slot of the second
426 * hole, rather than the index of the final present frame following
427 * the first hole.
428 */
429 *idx_end = tmp_idx;
430}
431
lifeng74c9a6d2017-02-22 15:15:38 +0800432#ifdef HL_RX_AGGREGATION_HOLE_DETECTION
433
434/**
435 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
436 * @peer: ol_txrx_peer_t
437 * @tid: tid
438 * @idx_start: idx_start
439 *
440 * Return: void
441 */
442static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
443 uint32_t tid,
444 uint32_t idx_start)
445{
446 uint32_t win_sz_mask, next_rel_idx, hole_size;
447
Sravan Kumar Kairamadbff872018-05-09 16:38:26 +0530448 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
449 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
450 return;
451 }
452
lifeng74c9a6d2017-02-22 15:15:38 +0800453 if (peer->tids_next_rel_idx[tid] == INVALID_REORDER_INDEX)
454 return;
455
456 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
457 /* Return directly if block-ack not enable */
458 if (win_sz_mask == 0)
459 return;
460
461 idx_start &= win_sz_mask;
462 next_rel_idx = peer->tids_next_rel_idx[tid] & win_sz_mask;
463
464 if (idx_start != next_rel_idx) {
465 hole_size = ((int)idx_start - (int)next_rel_idx) & win_sz_mask;
466
467 ol_rx_aggregation_hole(hole_size);
468 }
469
470 return;
471}
472
473#else
474
475/**
476 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
477 * @peer: ol_txrx_peer_t
478 * @tid: tid
479 * @idx_start: idx_start
480 *
481 * Return: void
482 */
483static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
484 uint32_t tid,
485 uint32_t idx_start)
486{
487 /* no-op */
488}
489
490#endif
491
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492void
493ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
494 struct ol_txrx_peer_t *peer)
495{
496 int tid;
Yun Park28390e32017-04-05 12:19:26 -0700497
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
499 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
500 htt_rx_flush_discard);
501 }
502 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
503}
504
505/* functions called by HTT */
506
507void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800508ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
509 uint16_t peer_id,
510 uint8_t tid,
511 uint16_t idx_start,
512 uint16_t idx_end, enum htt_rx_flush_action action)
513{
514 struct ol_txrx_vdev_t *vdev = NULL;
515 void *rx_desc;
516 struct ol_txrx_peer_t *peer;
517 int idx;
518 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
519 htt_pdev_handle htt_pdev = pdev->htt_pdev;
520
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800521 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
522 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
523 return;
524 }
525
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
527 if (peer)
528 vdev = peer->vdev;
529 else
530 return;
531
532 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
533
534 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
535 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
536 if (rx_reorder_array_elem->head) {
537 rx_desc =
538 htt_rx_msdu_desc_retrieve(htt_pdev,
539 rx_reorder_array_elem->head);
540 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
541 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
542 idx_start);
543 /*
Jeff Johnsonfa7d9602018-05-06 11:25:31 -0700544 * Assuming flush message sent separately for frags
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800545 * and for normal frames
546 */
547 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
548 return;
549 }
550 }
lifeng74c9a6d2017-02-22 15:15:38 +0800551
552 if (action == htt_rx_flush_release)
553 ol_rx_reorder_detect_hole(peer, tid, idx_start);
554
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800555 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
556 /*
557 * If the rx reorder timeout is handled by host SW, see if there are
558 * remaining rx holes that require the timer to be restarted.
559 */
560 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
561 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
562}
563
564void
565ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
566 uint16_t peer_id,
567 uint8_t tid,
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800568 uint16_t seq_num_start,
569 uint16_t seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570{
571 struct ol_txrx_vdev_t *vdev = NULL;
572 void *rx_desc;
573 struct ol_txrx_peer_t *peer;
574 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Yun Park28390e32017-04-05 12:19:26 -0700575 unsigned int win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530576 qdf_nbuf_t head_msdu = NULL;
577 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800579 uint16_t seq_num;
580 int i = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800581
Alok Kumar4278b692018-01-11 11:16:53 +0530582 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
583 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
584 WARN_ON(1);
585 return;
586 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800587 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
588
589 if (!peer) {
590 /*
591 * If we can't find a peer send this packet to OCB interface
592 * using OCB self peer
593 */
594 if (!ol_txrx_get_ocb_peer(pdev, &peer))
595 peer = NULL;
596 }
597
598 if (peer)
599 vdev = peer->vdev;
600 else
601 return;
602
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530603 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800604 /*TODO: Fragmentation case */
605 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
606 seq_num_start &= win_sz_mask;
607 seq_num_end &= win_sz_mask;
608 seq_num = seq_num_start;
609
610 do {
611 rx_reorder_array_elem =
612 &peer->tids_rx_reorder[tid].array[seq_num];
613
614 if (rx_reorder_array_elem->head) {
615 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530616 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800617 mpdu_tail;
618 static uint32_t last_pncheck_print_time;
619 /* Do not need to initialize as C does it */
620
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800621 uint32_t current_time_ms;
622 union htt_rx_pn_t pn = { 0 };
623 int index, pn_len;
624
625 mpdu_head = msdu = rx_reorder_array_elem->head;
626 mpdu_tail = rx_reorder_array_elem->tail;
627
628 pn_ie_cnt--;
629 i++;
630 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
631 msdu);
632 index = htt_rx_msdu_is_wlan_mcast(
633 pdev->htt_pdev, rx_desc)
634 ? txrx_sec_mcast
635 : txrx_sec_ucast;
636 pn_len = pdev->rx_pn[peer->security[index].
637 sec_type].len;
638 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
639 pn_len);
640
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530641 current_time_ms = qdf_system_ticks_to_msecs(
642 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800643 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
644 (current_time_ms -
645 last_pncheck_print_time)) {
646 last_pncheck_print_time =
647 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530648 ol_txrx_warn(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700649 "Tgt PN check failed - TID %d, peer %pK "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800650 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
651 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
652 " new seq num = %d\n",
653 tid, peer,
654 peer->mac_addr.raw[0],
655 peer->mac_addr.raw[1],
656 peer->mac_addr.raw[2],
657 peer->mac_addr.raw[3],
658 peer->mac_addr.raw[4],
659 peer->mac_addr.raw[5], pn.pn128[1],
660 pn.pn128[0],
661 pn.pn128[0] & 0xffffffffffffULL,
662 htt_rx_mpdu_desc_seq_num(htt_pdev,
663 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530664 } else {
665 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700666 "Tgt PN check failed - TID %d, peer %pK "
Poddar, Siddarth14521792017-03-14 21:19:42 +0530667 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
668 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
669 " new seq num = %d\n",
670 tid, peer,
671 peer->mac_addr.raw[0],
672 peer->mac_addr.raw[1],
673 peer->mac_addr.raw[2],
674 peer->mac_addr.raw[3],
675 peer->mac_addr.raw[4],
676 peer->mac_addr.raw[5], pn.pn128[1],
677 pn.pn128[0],
678 pn.pn128[0] & 0xffffffffffffULL,
679 htt_rx_mpdu_desc_seq_num(htt_pdev,
680 rx_desc));
681 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800682 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
683 peer->mac_addr.raw, tid,
684 htt_rx_mpdu_desc_tsf32(htt_pdev,
685 rx_desc),
686 OL_RX_ERR_PN, mpdu_head, NULL, 0);
687
688 /* free all MSDUs within this MPDU */
689 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530690 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691 htt_rx_desc_frame_free(htt_pdev, msdu);
692 if (msdu == mpdu_tail)
693 break;
Yun Park28390e32017-04-05 12:19:26 -0700694 msdu = next_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695 } while (1);
696
697 } else {
698 if (head_msdu == NULL) {
699 head_msdu = rx_reorder_array_elem->head;
700 tail_msdu = rx_reorder_array_elem->tail;
701 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530702 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800703 tail_msdu,
704 rx_reorder_array_elem->head);
705 tail_msdu = rx_reorder_array_elem->tail;
706 }
707 }
708 rx_reorder_array_elem->head = NULL;
709 rx_reorder_array_elem->tail = NULL;
710 }
711 seq_num = (seq_num + 1) & win_sz_mask;
712 } while (seq_num != seq_num_end);
713
714 if (head_msdu) {
715 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530716 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800717 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
718 }
719}
720
721#if defined(ENABLE_RX_REORDER_TRACE)
722
723A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
724{
725 int num_elems;
726
727 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
728 pdev->rx_reorder_trace.idx = 0;
729 pdev->rx_reorder_trace.cnt = 0;
730 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530731 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800732 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
733 if (!pdev->rx_reorder_trace.data)
734 return A_NO_MEMORY;
735
736 while (--num_elems >= 0)
737 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
738
739 return A_OK;
740}
741
742void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
743{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530744 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800745}
746
747void
748ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
749 uint8_t tid,
750 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
751{
752 uint32_t idx = pdev->rx_reorder_trace.idx;
753
754 pdev->rx_reorder_trace.data[idx].tid = tid;
755 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
756 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
757 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
758 pdev->rx_reorder_trace.cnt++;
759 idx++;
760 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
761}
762
763void
764ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
765{
766 static int print_count;
767 uint32_t i, start, end;
768 uint64_t cnt;
769 int elems;
770
771 if (print_count != 0 && just_once)
772 return;
773
774 print_count++;
775
776 end = pdev->rx_reorder_trace.idx;
777 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
778 /* trace log has not yet wrapped around - start at the top */
779 start = 0;
780 cnt = 0;
781 } else {
782 start = end;
783 cnt = pdev->rx_reorder_trace.cnt -
784 (pdev->rx_reorder_trace.mask + 1);
785 }
786 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
787 if (limit > 0 && elems > limit) {
788 int delta;
Yun Park28390e32017-04-05 12:19:26 -0700789
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800790 delta = elems - limit;
791 start += delta;
792 start &= pdev->rx_reorder_trace.mask;
793 cnt += delta;
794 }
795
796 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530797 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800798 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530799 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800 " count idx tid idx num (LSBs)");
801 do {
802 uint16_t seq_num, reorder_idx;
Yun Park28390e32017-04-05 12:19:26 -0700803
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800804 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
805 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
806 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530807 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800808 " %6lld %4d %3d %4d %4d (%d)",
809 cnt, i, pdev->rx_reorder_trace.data[i].tid,
810 reorder_idx, seq_num, seq_num & 63);
811 } else {
812 int err = TXRX_SEQ_NUM_ERR(seq_num);
Yun Park28390e32017-04-05 12:19:26 -0700813
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530814 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800815 " %6lld %4d err %d (%d MPDUs)",
816 cnt, i, err,
817 pdev->rx_reorder_trace.data[i].num_mpdus);
818 }
819 cnt++;
820 i++;
821 i &= pdev->rx_reorder_trace.mask;
822 } while (i != end);
823}
824
825#endif /* ENABLE_RX_REORDER_TRACE */