blob: 0b44b5424d2b4258b532d4879500af313cf9e1b9 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar4278b692018-01-11 11:16:53 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== header file includes ===*/
29/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033/* external interfaces */
34#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
35#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
36#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
37#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
38#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
39
40/* datapath internal interfaces */
41#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
42#include <ol_txrx_internal.h> /* TXRX_ASSERT */
43#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
44#include <ol_rx_reorder.h>
45#include <ol_rx_defrag.h>
46
47/*=== data types and defines ===*/
48#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
49
50/*=== global variables ===*/
51
52static char g_log2ceil[] = {
53 1, /* 0 -> 1 */
54 1, /* 1 -> 1 */
55 2, /* 2 -> 2 */
56 4, 4, /* 3-4 -> 4 */
57 8, 8, 8, 8, /* 5-8 -> 8 */
58 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
59 32, 32, 32, 32, 32, 32, 32, 32,
60 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
61 64, 64, 64, 64, 64, 64, 64, 64,
62 64, 64, 64, 64, 64, 64, 64, 64,
63 64, 64, 64, 64, 64, 64, 64, 64,
64 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
65};
66
67/*=== function definitions ===*/
68
69/*---*/
70
71#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
72#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
73#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
74#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
75#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
76#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
77#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
78#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
79
80/*---*/
81
82/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
84 do { \
85 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053086 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080087 rx_reorder_array_elem->head); \
88 } \
89 } while (0)
90
91/* functions called by txrx components */
92
93void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
94{
95 rx_reorder->win_sz = 1;
96 rx_reorder->win_sz_mask = 0;
97 rx_reorder->array = &rx_reorder->base;
98 rx_reorder->base.head = rx_reorder->base.tail = NULL;
99 rx_reorder->tid = tid;
100 rx_reorder->defrag_timeout_ms = 0;
101
102 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
103 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
104}
105
106static enum htt_rx_status
107ol_rx_reorder_seq_num_check(
Yun Park28390e32017-04-05 12:19:26 -0700108 struct ol_txrx_pdev_t *pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800109 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700110 unsigned int tid, unsigned int seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800111{
Yun Park28390e32017-04-05 12:19:26 -0700112 unsigned int seq_num_delta;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800113
114 /* don't check the new seq_num against last_seq
115 if last_seq is not valid */
116 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
117 return htt_rx_status_ok;
118
119 /*
120 * For duplicate detection, it might be helpful to also check
121 * whether the retry bit is set or not - a strict duplicate packet
122 * should be the one with retry bit set.
123 * However, since many implementations do not set the retry bit,
124 * and since this same function is also used for filtering out
125 * late-arriving frames (frames that arive after their rx reorder
126 * timeout has expired) which are not retries, don't bother checking
127 * the retry bit for now.
128 */
129 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
130 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
131 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
132
133 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
134 return htt_rx_status_err_replay;
135 /* or maybe htt_rx_status_err_dup */
136 }
137 return htt_rx_status_ok;
138}
139
140/**
141 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
142 * duplicate detection & check for out-of-order
143 * packets for unicast packets.
144 * @pdev: Pointer to pdev maintained by OL
145 * @peer: Pointer to peer structure maintained by OL
146 * @tid: TID value passed as part of HTT msg by f/w
147 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
148 *
149 * This function
150 * 1) For Multicast Frames -- does duplicate detection
151 * A frame is considered duplicate & dropped if it has a seq.number
152 * which is received twice in succession and with the retry bit set
153 * in the second case.
154 * A frame which is older than the last sequence number received
155 * is not considered duplicate but out-of-order. This function does
156 * perform out-of-order check for multicast frames, which is in
157 * keeping with the 802.11 2012 spec section 9.3.2.10
158 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
159 * only for non-aggregation tids.
160 *
161 * Return: Returns htt_rx_status_err_replay, if packet needs to be
162 * dropped, htt_rx_status_ok otherwise.
163 */
164enum htt_rx_status
165ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
166 struct ol_txrx_peer_t *peer,
167 uint8_t tid,
168 void *rx_mpdu_desc)
169{
170 uint16_t pkt_tid = 0xffff;
171 uint16_t seq_num = IEEE80211_SEQ_MAX;
172 bool retry = 0;
173
174 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
175
176 /* For mcast packets, we only the dup-detection, not re-order check */
177
Anurag Chouhanc5548422016-02-24 18:33:27 +0530178 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800179
180 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
181
182 /* Invalid packet TID, expected only for HL */
183 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530184 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800185 return htt_rx_status_ok;
186
187 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
188
189 /*
Yun Park28390e32017-04-05 12:19:26 -0700190 * At this point, we define frames to be duplicate if they
191 * arrive "ONLY" in succession with the same sequence number
192 * and the last one has the retry bit set. For an older frame,
193 * we consider that as an out of order frame, and hence do not
194 * perform the dup-detection or out-of-order check for multicast
195 * frames as per discussions & spec.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800196 * Hence "seq_num <= last_seq_num" check is not necessary.
197 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530198 if (qdf_unlikely(retry &&
Yun Park28390e32017-04-05 12:19:26 -0700199 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
200 /* drop mcast */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800201 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
202 return htt_rx_status_err_replay;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 }
Yun Park28390e32017-04-05 12:19:26 -0700204
205 /*
206 * This is a multicast packet likely to be passed on...
207 * Set the mcast last seq number here
208 * This is fairly accurate since:
209 * a) f/w sends multicast as separate PPDU/HTT messages
210 * b) Mcast packets are not aggregated & hence single
211 * c) Result of b) is that, flush / release bit is set
212 * always on the mcast packets, so likely to be
213 * immediatedly released.
214 */
215 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
216 return htt_rx_status_ok;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217 } else
218 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
219}
220
221
222void
223ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
224 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700225 unsigned int tid,
226 unsigned int idx, qdf_nbuf_t head_msdu,
227 qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800228{
229 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
230
231 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
232 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
233 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530234 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235 } else {
236 rx_reorder_array_elem->head = head_msdu;
237 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
238 }
239 rx_reorder_array_elem->tail = tail_msdu;
240}
241
242void
243ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
244 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700245 unsigned int tid, unsigned int idx_start,
246 unsigned int idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800247{
Yun Park28390e32017-04-05 12:19:26 -0700248 unsigned int idx;
249 unsigned int win_sz, win_sz_mask;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800250 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530251 qdf_nbuf_t head_msdu;
252 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800253
254 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
255 /* may get reset below */
256 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
257
258 win_sz = peer->tids_rx_reorder[tid].win_sz;
259 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
260 idx_start &= win_sz_mask;
261 idx_end &= win_sz_mask;
262 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
263
264 head_msdu = rx_reorder_array_elem->head;
265 tail_msdu = rx_reorder_array_elem->tail;
266 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530267 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800268 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800269
270 idx = (idx_start + 1);
271 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
272 while (idx != idx_end) {
273 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530274 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800275 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
276 1);
277 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
278 rx_reorder_array_elem);
279 tail_msdu = rx_reorder_array_elem->tail;
280 }
281 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
282 NULL;
283 idx++;
284 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
285 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530286 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800287 uint16_t seq_num;
288 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
289
290 /*
291 * This logic is not quite correct - the last_seq value should
292 * be the sequence number of the final MPDU released rather than
293 * the initial MPDU released.
294 * However, tracking the sequence number of the first MPDU in
295 * the released batch works well enough:
296 * For Peregrine and Rome, the last_seq is checked only for
297 * non-aggregate cases, where only one MPDU at a time is
298 * released.
299 * For Riva, Pronto, and Northstar, the last_seq is checked to
300 * filter out late-arriving rx frames, whose sequence number
301 * will be less than the first MPDU in this release batch.
302 */
303 seq_num = htt_rx_mpdu_desc_seq_num(
304 htt_pdev,
305 htt_rx_msdu_desc_retrieve(htt_pdev,
306 head_msdu));
307 peer->tids_last_seq[tid] = seq_num;
308 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530309 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800310 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
311 }
312 /*
313 * If the rx reorder timeout is handled by host SW rather than the
314 * target's rx reorder logic, then stop the timer here.
315 * (If there are remaining rx holes, then the timer will be restarted.)
316 */
317 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
318}
319
320void
321ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
322 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700323 unsigned int tid,
324 unsigned int idx_start,
325 unsigned int idx_end, enum htt_rx_flush_action action)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326{
327 struct ol_txrx_pdev_t *pdev;
Yun Park28390e32017-04-05 12:19:26 -0700328 unsigned int win_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800329 uint8_t win_sz_mask;
330 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530331 qdf_nbuf_t head_msdu = NULL;
332 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333
334 pdev = vdev->pdev;
335 win_sz = peer->tids_rx_reorder[tid].win_sz;
336 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
337
338 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
339 /* a idx_end value of 0xffff means to flush the entire array */
340 if (idx_end == 0xffff) {
341 idx_end = idx_start;
342 /*
343 * The array is being flushed in entirety because the block
344 * ack window has been shifted to a new position that does not
345 * overlap with the old position. (Or due to reception of a
346 * DELBA.)
347 * Thus, since the block ack window is essentially being reset,
348 * reset the "next release index".
349 */
350 peer->tids_next_rel_idx[tid] =
351 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
352 } else {
353 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
354 }
355
356 idx_start &= win_sz_mask;
357 idx_end &= win_sz_mask;
358
359 do {
360 rx_reorder_array_elem =
361 &peer->tids_rx_reorder[tid].array[idx_start];
362 idx_start = (idx_start + 1);
363 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
364
365 if (rx_reorder_array_elem->head) {
366 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
367 1);
368 if (head_msdu == NULL) {
369 head_msdu = rx_reorder_array_elem->head;
370 tail_msdu = rx_reorder_array_elem->tail;
371 rx_reorder_array_elem->head = NULL;
372 rx_reorder_array_elem->tail = NULL;
373 continue;
374 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530375 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 rx_reorder_array_elem->head);
377 tail_msdu = rx_reorder_array_elem->tail;
378 rx_reorder_array_elem->head =
379 rx_reorder_array_elem->tail = NULL;
380 }
381 } while (idx_start != idx_end);
382
383 ol_rx_defrag_waitlist_remove(peer, tid);
384
385 if (head_msdu) {
386 uint16_t seq_num;
387 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
388
389 seq_num = htt_rx_mpdu_desc_seq_num(
390 htt_pdev,
391 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
392 peer->tids_last_seq[tid] = seq_num;
393 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530394 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 if (action == htt_rx_flush_release) {
396 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
397 } else {
398 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530399 qdf_nbuf_t next;
Yun Park28390e32017-04-05 12:19:26 -0700400
Nirav Shahcbc6d722016-03-01 16:24:53 +0530401 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402 htt_rx_desc_frame_free(pdev->htt_pdev,
403 head_msdu);
404 head_msdu = next;
405 } while (head_msdu);
406 }
407 }
408 /*
409 * If the rx reorder array is empty, then reset the last_seq value -
410 * it is likely that a BAR or a sequence number shift caused the
411 * sequence number to jump, so the old last_seq value is not relevant.
412 */
413 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
414 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
415
416 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
417}
418
419void
420ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700421 unsigned int tid, unsigned int *idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422{
Yun Park28390e32017-04-05 12:19:26 -0700423 unsigned int win_sz, win_sz_mask;
424 unsigned int idx_start = 0, tmp_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800425
426 win_sz = peer->tids_rx_reorder[tid].win_sz;
427 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
428
429 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
430 tmp_idx++;
431 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
432 /* bypass the initial hole */
433 while (tmp_idx != idx_start &&
434 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
435 tmp_idx++;
436 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
437 }
438 /* bypass the present frames following the initial hole */
439 while (tmp_idx != idx_start &&
440 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
441 tmp_idx++;
442 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
443 }
444 /*
445 * idx_end is exclusive rather than inclusive.
446 * In other words, it is the index of the first slot of the second
447 * hole, rather than the index of the final present frame following
448 * the first hole.
449 */
450 *idx_end = tmp_idx;
451}
452
lifeng74c9a6d2017-02-22 15:15:38 +0800453#ifdef HL_RX_AGGREGATION_HOLE_DETECTION
454
455/**
456 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
457 * @peer: ol_txrx_peer_t
458 * @tid: tid
459 * @idx_start: idx_start
460 *
461 * Return: void
462 */
463static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
464 uint32_t tid,
465 uint32_t idx_start)
466{
467 uint32_t win_sz_mask, next_rel_idx, hole_size;
468
469 if (peer->tids_next_rel_idx[tid] == INVALID_REORDER_INDEX)
470 return;
471
472 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
473 /* Return directly if block-ack not enable */
474 if (win_sz_mask == 0)
475 return;
476
477 idx_start &= win_sz_mask;
478 next_rel_idx = peer->tids_next_rel_idx[tid] & win_sz_mask;
479
480 if (idx_start != next_rel_idx) {
481 hole_size = ((int)idx_start - (int)next_rel_idx) & win_sz_mask;
482
483 ol_rx_aggregation_hole(hole_size);
484 }
485
486 return;
487}
488
489#else
490
491/**
492 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
493 * @peer: ol_txrx_peer_t
494 * @tid: tid
495 * @idx_start: idx_start
496 *
497 * Return: void
498 */
499static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
500 uint32_t tid,
501 uint32_t idx_start)
502{
503 /* no-op */
504}
505
506#endif
507
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800508void
509ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
510 struct ol_txrx_peer_t *peer)
511{
512 int tid;
Yun Park28390e32017-04-05 12:19:26 -0700513
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800514 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
515 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
516 htt_rx_flush_discard);
517 }
518 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
519}
520
521/* functions called by HTT */
522
523void
524ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
525 uint16_t peer_id,
526 uint8_t tid,
527 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
528{
529 uint8_t round_pwr2_win_sz;
Yun Park28390e32017-04-05 12:19:26 -0700530 unsigned int array_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 struct ol_txrx_peer_t *peer;
532 struct ol_rx_reorder_t *rx_reorder;
533
Alok Kumar4278b692018-01-11 11:16:53 +0530534 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
535 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
536 WARN_ON(1);
537 return;
538 }
539
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
541 if (peer == NULL)
542 return;
543
544 if (pdev->cfg.host_addba) {
545 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
546 &peer->mac_addr.raw[0], tid, failed);
547 }
548 if (failed)
549 return;
550
551 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
552 rx_reorder = &peer->tids_rx_reorder[tid];
553
554 TXRX_ASSERT2(win_sz <= 64);
555 rx_reorder->win_sz = win_sz;
556 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
557 array_size =
558 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530559 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560 TXRX_ASSERT1(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800561
562 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
563 rx_reorder->num_mpdus = 0;
564
565 peer->tids_next_rel_idx[tid] =
566 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
567 rx_reorder->win_sz_mask);
568}
569
570void
571ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
572{
573 struct ol_txrx_peer_t *peer;
574 struct ol_rx_reorder_t *rx_reorder;
575
Alok Kumar4278b692018-01-11 11:16:53 +0530576 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
577 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
578 WARN_ON(1);
579 return;
580 }
581
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800582 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
583 if (peer == NULL)
584 return;
585
lifeng74c9a6d2017-02-22 15:15:38 +0800586 peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800587 rx_reorder = &peer->tids_rx_reorder[tid];
588
589 /* check that there really was a block ack agreement */
590 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
591 /*
592 * Deallocate the old rx reorder array.
593 * The call to ol_rx_reorder_init below
594 * will reset rx_reorder->array to point to
595 * the single-element statically-allocated reorder array
596 * used for non block-ack cases.
597 */
598 if (rx_reorder->array != &rx_reorder->base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530599 ol_txrx_dbg("%s, delete reorder array, tid:%d\n",
600 __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530601 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 }
603
604 /* set up the TID with default parameters (ARQ window size = 1) */
605 ol_rx_reorder_init(rx_reorder, tid);
606}
607
608void
609ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
610 uint16_t peer_id,
611 uint8_t tid,
612 uint16_t idx_start,
613 uint16_t idx_end, enum htt_rx_flush_action action)
614{
615 struct ol_txrx_vdev_t *vdev = NULL;
616 void *rx_desc;
617 struct ol_txrx_peer_t *peer;
618 int idx;
619 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
620 htt_pdev_handle htt_pdev = pdev->htt_pdev;
621
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800622 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
623 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
624 return;
625 }
626
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
628 if (peer)
629 vdev = peer->vdev;
630 else
631 return;
632
633 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
634
635 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
636 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
637 if (rx_reorder_array_elem->head) {
638 rx_desc =
639 htt_rx_msdu_desc_retrieve(htt_pdev,
640 rx_reorder_array_elem->head);
641 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
642 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
643 idx_start);
644 /*
645 * Assuming flush message sent seperately for frags
646 * and for normal frames
647 */
648 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
649 return;
650 }
651 }
lifeng74c9a6d2017-02-22 15:15:38 +0800652
653 if (action == htt_rx_flush_release)
654 ol_rx_reorder_detect_hole(peer, tid, idx_start);
655
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800656 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
657 /*
658 * If the rx reorder timeout is handled by host SW, see if there are
659 * remaining rx holes that require the timer to be restarted.
660 */
661 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
662 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
663}
664
665void
666ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
667 uint16_t peer_id,
668 uint8_t tid,
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800669 uint16_t seq_num_start,
670 uint16_t seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671{
672 struct ol_txrx_vdev_t *vdev = NULL;
673 void *rx_desc;
674 struct ol_txrx_peer_t *peer;
675 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Yun Park28390e32017-04-05 12:19:26 -0700676 unsigned int win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530677 qdf_nbuf_t head_msdu = NULL;
678 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800680 uint16_t seq_num;
681 int i = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800682
Alok Kumar4278b692018-01-11 11:16:53 +0530683 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
684 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
685 WARN_ON(1);
686 return;
687 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800688 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
689
690 if (!peer) {
691 /*
692 * If we can't find a peer send this packet to OCB interface
693 * using OCB self peer
694 */
695 if (!ol_txrx_get_ocb_peer(pdev, &peer))
696 peer = NULL;
697 }
698
699 if (peer)
700 vdev = peer->vdev;
701 else
702 return;
703
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530704 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800705 /*TODO: Fragmentation case */
706 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
707 seq_num_start &= win_sz_mask;
708 seq_num_end &= win_sz_mask;
709 seq_num = seq_num_start;
710
711 do {
712 rx_reorder_array_elem =
713 &peer->tids_rx_reorder[tid].array[seq_num];
714
715 if (rx_reorder_array_elem->head) {
716 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530717 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718 mpdu_tail;
719 static uint32_t last_pncheck_print_time;
720 /* Do not need to initialize as C does it */
721
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722 uint32_t current_time_ms;
723 union htt_rx_pn_t pn = { 0 };
724 int index, pn_len;
725
726 mpdu_head = msdu = rx_reorder_array_elem->head;
727 mpdu_tail = rx_reorder_array_elem->tail;
728
729 pn_ie_cnt--;
730 i++;
731 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
732 msdu);
733 index = htt_rx_msdu_is_wlan_mcast(
734 pdev->htt_pdev, rx_desc)
735 ? txrx_sec_mcast
736 : txrx_sec_ucast;
737 pn_len = pdev->rx_pn[peer->security[index].
738 sec_type].len;
739 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
740 pn_len);
741
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530742 current_time_ms = qdf_system_ticks_to_msecs(
743 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
745 (current_time_ms -
746 last_pncheck_print_time)) {
747 last_pncheck_print_time =
748 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530749 ol_txrx_warn(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700750 "Tgt PN check failed - TID %d, peer %pK "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800751 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
752 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
753 " new seq num = %d\n",
754 tid, peer,
755 peer->mac_addr.raw[0],
756 peer->mac_addr.raw[1],
757 peer->mac_addr.raw[2],
758 peer->mac_addr.raw[3],
759 peer->mac_addr.raw[4],
760 peer->mac_addr.raw[5], pn.pn128[1],
761 pn.pn128[0],
762 pn.pn128[0] & 0xffffffffffffULL,
763 htt_rx_mpdu_desc_seq_num(htt_pdev,
764 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530765 } else {
766 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700767 "Tgt PN check failed - TID %d, peer %pK "
Poddar, Siddarth14521792017-03-14 21:19:42 +0530768 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
769 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
770 " new seq num = %d\n",
771 tid, peer,
772 peer->mac_addr.raw[0],
773 peer->mac_addr.raw[1],
774 peer->mac_addr.raw[2],
775 peer->mac_addr.raw[3],
776 peer->mac_addr.raw[4],
777 peer->mac_addr.raw[5], pn.pn128[1],
778 pn.pn128[0],
779 pn.pn128[0] & 0xffffffffffffULL,
780 htt_rx_mpdu_desc_seq_num(htt_pdev,
781 rx_desc));
782 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800783 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
784 peer->mac_addr.raw, tid,
785 htt_rx_mpdu_desc_tsf32(htt_pdev,
786 rx_desc),
787 OL_RX_ERR_PN, mpdu_head, NULL, 0);
788
789 /* free all MSDUs within this MPDU */
790 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530791 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800792 htt_rx_desc_frame_free(htt_pdev, msdu);
793 if (msdu == mpdu_tail)
794 break;
Yun Park28390e32017-04-05 12:19:26 -0700795 msdu = next_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800796 } while (1);
797
798 } else {
799 if (head_msdu == NULL) {
800 head_msdu = rx_reorder_array_elem->head;
801 tail_msdu = rx_reorder_array_elem->tail;
802 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530803 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800804 tail_msdu,
805 rx_reorder_array_elem->head);
806 tail_msdu = rx_reorder_array_elem->tail;
807 }
808 }
809 rx_reorder_array_elem->head = NULL;
810 rx_reorder_array_elem->tail = NULL;
811 }
812 seq_num = (seq_num + 1) & win_sz_mask;
813 } while (seq_num != seq_num_end);
814
815 if (head_msdu) {
816 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530817 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800818 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
819 }
820}
821
822#if defined(ENABLE_RX_REORDER_TRACE)
823
824A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
825{
826 int num_elems;
827
828 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
829 pdev->rx_reorder_trace.idx = 0;
830 pdev->rx_reorder_trace.cnt = 0;
831 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530832 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800833 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
834 if (!pdev->rx_reorder_trace.data)
835 return A_NO_MEMORY;
836
837 while (--num_elems >= 0)
838 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
839
840 return A_OK;
841}
842
843void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
844{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530845 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800846}
847
848void
849ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
850 uint8_t tid,
851 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
852{
853 uint32_t idx = pdev->rx_reorder_trace.idx;
854
855 pdev->rx_reorder_trace.data[idx].tid = tid;
856 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
857 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
858 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
859 pdev->rx_reorder_trace.cnt++;
860 idx++;
861 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
862}
863
864void
865ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
866{
867 static int print_count;
868 uint32_t i, start, end;
869 uint64_t cnt;
870 int elems;
871
872 if (print_count != 0 && just_once)
873 return;
874
875 print_count++;
876
877 end = pdev->rx_reorder_trace.idx;
878 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
879 /* trace log has not yet wrapped around - start at the top */
880 start = 0;
881 cnt = 0;
882 } else {
883 start = end;
884 cnt = pdev->rx_reorder_trace.cnt -
885 (pdev->rx_reorder_trace.mask + 1);
886 }
887 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
888 if (limit > 0 && elems > limit) {
889 int delta;
Yun Park28390e32017-04-05 12:19:26 -0700890
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800891 delta = elems - limit;
892 start += delta;
893 start &= pdev->rx_reorder_trace.mask;
894 cnt += delta;
895 }
896
897 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530898 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800899 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530900 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800901 " count idx tid idx num (LSBs)");
902 do {
903 uint16_t seq_num, reorder_idx;
Yun Park28390e32017-04-05 12:19:26 -0700904
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800905 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
906 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
907 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530908 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800909 " %6lld %4d %3d %4d %4d (%d)",
910 cnt, i, pdev->rx_reorder_trace.data[i].tid,
911 reorder_idx, seq_num, seq_num & 63);
912 } else {
913 int err = TXRX_SEQ_NUM_ERR(seq_num);
Yun Park28390e32017-04-05 12:19:26 -0700914
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530915 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800916 " %6lld %4d err %d (%d MPDUs)",
917 cnt, i, err,
918 pdev->rx_reorder_trace.data[i].num_mpdus);
919 }
920 cnt++;
921 i++;
922 i &= pdev->rx_reorder_trace.mask;
923 } while (i != end);
924}
925
926#endif /* ENABLE_RX_REORDER_TRACE */