blob: aa1fe958169fae38604a3e30973115147baed413 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarth14521792017-03-14 21:19:42 +05302 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== header file includes ===*/
29/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
Dustin Brown0bec9a92017-08-17 15:44:34 -070033#include <linux/ieee80211.h> /* IEEE80211_SEQ_MAX */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034
35/* external interfaces */
36#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
37#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
38#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
39#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
40#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
41
42/* datapath internal interfaces */
43#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
44#include <ol_txrx_internal.h> /* TXRX_ASSERT */
45#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
46#include <ol_rx_reorder.h>
47#include <ol_rx_defrag.h>
48
49/*=== data types and defines ===*/
50#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
51
52/*=== global variables ===*/
53
54static char g_log2ceil[] = {
55 1, /* 0 -> 1 */
56 1, /* 1 -> 1 */
57 2, /* 2 -> 2 */
58 4, 4, /* 3-4 -> 4 */
59 8, 8, 8, 8, /* 5-8 -> 8 */
60 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
61 32, 32, 32, 32, 32, 32, 32, 32,
62 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
63 64, 64, 64, 64, 64, 64, 64, 64,
64 64, 64, 64, 64, 64, 64, 64, 64,
65 64, 64, 64, 64, 64, 64, 64, 64,
66 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
67};
68
69/*=== function definitions ===*/
70
71/*---*/
72
73#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
74#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
75#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
76#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
77#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
78#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
79#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
80#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
81
82/*---*/
83
84/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
86 do { \
87 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053088 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089 rx_reorder_array_elem->head); \
90 } \
91 } while (0)
92
93/* functions called by txrx components */
94
95void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
96{
97 rx_reorder->win_sz = 1;
98 rx_reorder->win_sz_mask = 0;
99 rx_reorder->array = &rx_reorder->base;
100 rx_reorder->base.head = rx_reorder->base.tail = NULL;
101 rx_reorder->tid = tid;
102 rx_reorder->defrag_timeout_ms = 0;
103
104 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
105 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
106}
107
108static enum htt_rx_status
109ol_rx_reorder_seq_num_check(
Yun Park28390e32017-04-05 12:19:26 -0700110 struct ol_txrx_pdev_t *pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800111 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700112 unsigned int tid, unsigned int seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800113{
Yun Park28390e32017-04-05 12:19:26 -0700114 unsigned int seq_num_delta;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800115
116 /* don't check the new seq_num against last_seq
117 if last_seq is not valid */
118 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
119 return htt_rx_status_ok;
120
121 /*
122 * For duplicate detection, it might be helpful to also check
123 * whether the retry bit is set or not - a strict duplicate packet
124 * should be the one with retry bit set.
125 * However, since many implementations do not set the retry bit,
126 * and since this same function is also used for filtering out
127 * late-arriving frames (frames that arive after their rx reorder
128 * timeout has expired) which are not retries, don't bother checking
129 * the retry bit for now.
130 */
131 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
132 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
133 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
134
135 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
136 return htt_rx_status_err_replay;
137 /* or maybe htt_rx_status_err_dup */
138 }
139 return htt_rx_status_ok;
140}
141
142/**
143 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
144 * duplicate detection & check for out-of-order
145 * packets for unicast packets.
146 * @pdev: Pointer to pdev maintained by OL
147 * @peer: Pointer to peer structure maintained by OL
148 * @tid: TID value passed as part of HTT msg by f/w
149 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
150 *
151 * This function
152 * 1) For Multicast Frames -- does duplicate detection
153 * A frame is considered duplicate & dropped if it has a seq.number
154 * which is received twice in succession and with the retry bit set
155 * in the second case.
156 * A frame which is older than the last sequence number received
157 * is not considered duplicate but out-of-order. This function does
158 * perform out-of-order check for multicast frames, which is in
159 * keeping with the 802.11 2012 spec section 9.3.2.10
160 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
161 * only for non-aggregation tids.
162 *
163 * Return: Returns htt_rx_status_err_replay, if packet needs to be
164 * dropped, htt_rx_status_ok otherwise.
165 */
166enum htt_rx_status
167ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
168 struct ol_txrx_peer_t *peer,
169 uint8_t tid,
170 void *rx_mpdu_desc)
171{
172 uint16_t pkt_tid = 0xffff;
173 uint16_t seq_num = IEEE80211_SEQ_MAX;
174 bool retry = 0;
175
176 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
177
178 /* For mcast packets, we only the dup-detection, not re-order check */
179
Anurag Chouhanc5548422016-02-24 18:33:27 +0530180 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800181
182 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
183
184 /* Invalid packet TID, expected only for HL */
185 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530186 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return htt_rx_status_ok;
188
189 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
190
191 /*
Yun Park28390e32017-04-05 12:19:26 -0700192 * At this point, we define frames to be duplicate if they
193 * arrive "ONLY" in succession with the same sequence number
194 * and the last one has the retry bit set. For an older frame,
195 * we consider that as an out of order frame, and hence do not
196 * perform the dup-detection or out-of-order check for multicast
197 * frames as per discussions & spec.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 * Hence "seq_num <= last_seq_num" check is not necessary.
199 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530200 if (qdf_unlikely(retry &&
Yun Park28390e32017-04-05 12:19:26 -0700201 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
202 /* drop mcast */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
204 return htt_rx_status_err_replay;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205 }
Yun Park28390e32017-04-05 12:19:26 -0700206
207 /*
208 * This is a multicast packet likely to be passed on...
209 * Set the mcast last seq number here
210 * This is fairly accurate since:
211 * a) f/w sends multicast as separate PPDU/HTT messages
212 * b) Mcast packets are not aggregated & hence single
213 * c) Result of b) is that, flush / release bit is set
214 * always on the mcast packets, so likely to be
215 * immediatedly released.
216 */
217 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
218 return htt_rx_status_ok;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219 } else
220 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
221}
222
223
224void
225ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
226 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700227 unsigned int tid,
228 unsigned int idx, qdf_nbuf_t head_msdu,
229 qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230{
231 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
232
233 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
234 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
235 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530236 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800237 } else {
238 rx_reorder_array_elem->head = head_msdu;
239 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
240 }
241 rx_reorder_array_elem->tail = tail_msdu;
242}
243
244void
245ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
246 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700247 unsigned int tid, unsigned int idx_start,
248 unsigned int idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249{
Yun Park28390e32017-04-05 12:19:26 -0700250 unsigned int idx;
251 unsigned int win_sz, win_sz_mask;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530253 qdf_nbuf_t head_msdu;
254 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255
256 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
257 /* may get reset below */
258 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
259
260 win_sz = peer->tids_rx_reorder[tid].win_sz;
261 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
262 idx_start &= win_sz_mask;
263 idx_end &= win_sz_mask;
264 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
265
266 head_msdu = rx_reorder_array_elem->head;
267 tail_msdu = rx_reorder_array_elem->tail;
268 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530269 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800270 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800271
272 idx = (idx_start + 1);
273 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
274 while (idx != idx_end) {
275 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530276 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
278 1);
279 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
280 rx_reorder_array_elem);
281 tail_msdu = rx_reorder_array_elem->tail;
282 }
283 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
284 NULL;
285 idx++;
286 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
287 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530288 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289 uint16_t seq_num;
290 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
291
292 /*
293 * This logic is not quite correct - the last_seq value should
294 * be the sequence number of the final MPDU released rather than
295 * the initial MPDU released.
296 * However, tracking the sequence number of the first MPDU in
297 * the released batch works well enough:
298 * For Peregrine and Rome, the last_seq is checked only for
299 * non-aggregate cases, where only one MPDU at a time is
300 * released.
301 * For Riva, Pronto, and Northstar, the last_seq is checked to
302 * filter out late-arriving rx frames, whose sequence number
303 * will be less than the first MPDU in this release batch.
304 */
305 seq_num = htt_rx_mpdu_desc_seq_num(
306 htt_pdev,
307 htt_rx_msdu_desc_retrieve(htt_pdev,
308 head_msdu));
309 peer->tids_last_seq[tid] = seq_num;
310 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530311 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
313 }
314 /*
315 * If the rx reorder timeout is handled by host SW rather than the
316 * target's rx reorder logic, then stop the timer here.
317 * (If there are remaining rx holes, then the timer will be restarted.)
318 */
319 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
320}
321
322void
323ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
324 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700325 unsigned int tid,
326 unsigned int idx_start,
327 unsigned int idx_end, enum htt_rx_flush_action action)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328{
329 struct ol_txrx_pdev_t *pdev;
Yun Park28390e32017-04-05 12:19:26 -0700330 unsigned int win_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800331 uint8_t win_sz_mask;
332 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530333 qdf_nbuf_t head_msdu = NULL;
334 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335
336 pdev = vdev->pdev;
337 win_sz = peer->tids_rx_reorder[tid].win_sz;
338 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
339
340 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
341 /* a idx_end value of 0xffff means to flush the entire array */
342 if (idx_end == 0xffff) {
343 idx_end = idx_start;
344 /*
345 * The array is being flushed in entirety because the block
346 * ack window has been shifted to a new position that does not
347 * overlap with the old position. (Or due to reception of a
348 * DELBA.)
349 * Thus, since the block ack window is essentially being reset,
350 * reset the "next release index".
351 */
352 peer->tids_next_rel_idx[tid] =
353 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
354 } else {
355 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
356 }
357
358 idx_start &= win_sz_mask;
359 idx_end &= win_sz_mask;
360
361 do {
362 rx_reorder_array_elem =
363 &peer->tids_rx_reorder[tid].array[idx_start];
364 idx_start = (idx_start + 1);
365 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
366
367 if (rx_reorder_array_elem->head) {
368 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
369 1);
370 if (head_msdu == NULL) {
371 head_msdu = rx_reorder_array_elem->head;
372 tail_msdu = rx_reorder_array_elem->tail;
373 rx_reorder_array_elem->head = NULL;
374 rx_reorder_array_elem->tail = NULL;
375 continue;
376 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530377 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378 rx_reorder_array_elem->head);
379 tail_msdu = rx_reorder_array_elem->tail;
380 rx_reorder_array_elem->head =
381 rx_reorder_array_elem->tail = NULL;
382 }
383 } while (idx_start != idx_end);
384
385 ol_rx_defrag_waitlist_remove(peer, tid);
386
387 if (head_msdu) {
388 uint16_t seq_num;
389 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
390
391 seq_num = htt_rx_mpdu_desc_seq_num(
392 htt_pdev,
393 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
394 peer->tids_last_seq[tid] = seq_num;
395 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530396 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800397 if (action == htt_rx_flush_release) {
398 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
399 } else {
400 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530401 qdf_nbuf_t next;
Yun Park28390e32017-04-05 12:19:26 -0700402
Nirav Shahcbc6d722016-03-01 16:24:53 +0530403 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 htt_rx_desc_frame_free(pdev->htt_pdev,
405 head_msdu);
406 head_msdu = next;
407 } while (head_msdu);
408 }
409 }
410 /*
411 * If the rx reorder array is empty, then reset the last_seq value -
412 * it is likely that a BAR or a sequence number shift caused the
413 * sequence number to jump, so the old last_seq value is not relevant.
414 */
415 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
416 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
417
418 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
419}
420
421void
422ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700423 unsigned int tid, unsigned int *idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800424{
Yun Park28390e32017-04-05 12:19:26 -0700425 unsigned int win_sz, win_sz_mask;
426 unsigned int idx_start = 0, tmp_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800427
428 win_sz = peer->tids_rx_reorder[tid].win_sz;
429 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
430
431 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
432 tmp_idx++;
433 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
434 /* bypass the initial hole */
435 while (tmp_idx != idx_start &&
436 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
437 tmp_idx++;
438 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
439 }
440 /* bypass the present frames following the initial hole */
441 while (tmp_idx != idx_start &&
442 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
443 tmp_idx++;
444 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
445 }
446 /*
447 * idx_end is exclusive rather than inclusive.
448 * In other words, it is the index of the first slot of the second
449 * hole, rather than the index of the final present frame following
450 * the first hole.
451 */
452 *idx_end = tmp_idx;
453}
454
lifeng74c9a6d2017-02-22 15:15:38 +0800455#ifdef HL_RX_AGGREGATION_HOLE_DETECTION
456
457/**
458 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
459 * @peer: ol_txrx_peer_t
460 * @tid: tid
461 * @idx_start: idx_start
462 *
463 * Return: void
464 */
465static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
466 uint32_t tid,
467 uint32_t idx_start)
468{
469 uint32_t win_sz_mask, next_rel_idx, hole_size;
470
471 if (peer->tids_next_rel_idx[tid] == INVALID_REORDER_INDEX)
472 return;
473
474 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
475 /* Return directly if block-ack not enable */
476 if (win_sz_mask == 0)
477 return;
478
479 idx_start &= win_sz_mask;
480 next_rel_idx = peer->tids_next_rel_idx[tid] & win_sz_mask;
481
482 if (idx_start != next_rel_idx) {
483 hole_size = ((int)idx_start - (int)next_rel_idx) & win_sz_mask;
484
485 ol_rx_aggregation_hole(hole_size);
486 }
487
488 return;
489}
490
491#else
492
493/**
494 * ol_rx_reorder_detect_hole - ol rx reorder detect hole
495 * @peer: ol_txrx_peer_t
496 * @tid: tid
497 * @idx_start: idx_start
498 *
499 * Return: void
500 */
501static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
502 uint32_t tid,
503 uint32_t idx_start)
504{
505 /* no-op */
506}
507
508#endif
509
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800510void
511ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
512 struct ol_txrx_peer_t *peer)
513{
514 int tid;
Yun Park28390e32017-04-05 12:19:26 -0700515
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800516 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
517 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
518 htt_rx_flush_discard);
519 }
520 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
521}
522
523/* functions called by HTT */
524
525void
526ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
527 uint16_t peer_id,
528 uint8_t tid,
529 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
530{
531 uint8_t round_pwr2_win_sz;
Yun Park28390e32017-04-05 12:19:26 -0700532 unsigned int array_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533 struct ol_txrx_peer_t *peer;
534 struct ol_rx_reorder_t *rx_reorder;
535
536 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
537 if (peer == NULL)
538 return;
539
540 if (pdev->cfg.host_addba) {
541 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
542 &peer->mac_addr.raw[0], tid, failed);
543 }
544 if (failed)
545 return;
546
547 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
548 rx_reorder = &peer->tids_rx_reorder[tid];
549
550 TXRX_ASSERT2(win_sz <= 64);
551 rx_reorder->win_sz = win_sz;
552 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
553 array_size =
554 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530555 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 TXRX_ASSERT1(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800557
558 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
559 rx_reorder->num_mpdus = 0;
560
561 peer->tids_next_rel_idx[tid] =
562 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
563 rx_reorder->win_sz_mask);
564}
565
566void
567ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
568{
569 struct ol_txrx_peer_t *peer;
570 struct ol_rx_reorder_t *rx_reorder;
571
572 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
573 if (peer == NULL)
574 return;
575
lifeng74c9a6d2017-02-22 15:15:38 +0800576 peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800577 rx_reorder = &peer->tids_rx_reorder[tid];
578
579 /* check that there really was a block ack agreement */
580 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
581 /*
582 * Deallocate the old rx reorder array.
583 * The call to ol_rx_reorder_init below
584 * will reset rx_reorder->array to point to
585 * the single-element statically-allocated reorder array
586 * used for non block-ack cases.
587 */
588 if (rx_reorder->array != &rx_reorder->base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530589 ol_txrx_dbg("%s, delete reorder array, tid:%d\n",
590 __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530591 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800592 }
593
594 /* set up the TID with default parameters (ARQ window size = 1) */
595 ol_rx_reorder_init(rx_reorder, tid);
596}
597
598void
599ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
600 uint16_t peer_id,
601 uint8_t tid,
602 uint16_t idx_start,
603 uint16_t idx_end, enum htt_rx_flush_action action)
604{
605 struct ol_txrx_vdev_t *vdev = NULL;
606 void *rx_desc;
607 struct ol_txrx_peer_t *peer;
608 int idx;
609 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
610 htt_pdev_handle htt_pdev = pdev->htt_pdev;
611
612 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
613 if (peer)
614 vdev = peer->vdev;
615 else
616 return;
617
618 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
619
620 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
621 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
622 if (rx_reorder_array_elem->head) {
623 rx_desc =
624 htt_rx_msdu_desc_retrieve(htt_pdev,
625 rx_reorder_array_elem->head);
626 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
627 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
628 idx_start);
629 /*
630 * Assuming flush message sent seperately for frags
631 * and for normal frames
632 */
633 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
634 return;
635 }
636 }
lifeng74c9a6d2017-02-22 15:15:38 +0800637
638 if (action == htt_rx_flush_release)
639 ol_rx_reorder_detect_hole(peer, tid, idx_start);
640
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800641 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
642 /*
643 * If the rx reorder timeout is handled by host SW, see if there are
644 * remaining rx holes that require the timer to be restarted.
645 */
646 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
647 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
648}
649
650void
651ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
652 uint16_t peer_id,
653 uint8_t tid,
654 int seq_num_start,
655 int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
656{
657 struct ol_txrx_vdev_t *vdev = NULL;
658 void *rx_desc;
659 struct ol_txrx_peer_t *peer;
660 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Yun Park28390e32017-04-05 12:19:26 -0700661 unsigned int win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530662 qdf_nbuf_t head_msdu = NULL;
663 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 htt_pdev_handle htt_pdev = pdev->htt_pdev;
665 int seq_num, i = 0;
666
667 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
668
669 if (!peer) {
670 /*
671 * If we can't find a peer send this packet to OCB interface
672 * using OCB self peer
673 */
674 if (!ol_txrx_get_ocb_peer(pdev, &peer))
675 peer = NULL;
676 }
677
678 if (peer)
679 vdev = peer->vdev;
680 else
681 return;
682
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530683 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800684 /*TODO: Fragmentation case */
685 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
686 seq_num_start &= win_sz_mask;
687 seq_num_end &= win_sz_mask;
688 seq_num = seq_num_start;
689
690 do {
691 rx_reorder_array_elem =
692 &peer->tids_rx_reorder[tid].array[seq_num];
693
694 if (rx_reorder_array_elem->head) {
695 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530696 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800697 mpdu_tail;
698 static uint32_t last_pncheck_print_time;
699 /* Do not need to initialize as C does it */
700
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 uint32_t current_time_ms;
702 union htt_rx_pn_t pn = { 0 };
703 int index, pn_len;
704
705 mpdu_head = msdu = rx_reorder_array_elem->head;
706 mpdu_tail = rx_reorder_array_elem->tail;
707
708 pn_ie_cnt--;
709 i++;
710 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
711 msdu);
712 index = htt_rx_msdu_is_wlan_mcast(
713 pdev->htt_pdev, rx_desc)
714 ? txrx_sec_mcast
715 : txrx_sec_ucast;
716 pn_len = pdev->rx_pn[peer->security[index].
717 sec_type].len;
718 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
719 pn_len);
720
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530721 current_time_ms = qdf_system_ticks_to_msecs(
722 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
724 (current_time_ms -
725 last_pncheck_print_time)) {
726 last_pncheck_print_time =
727 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530728 ol_txrx_warn(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700729 "Tgt PN check failed - TID %d, peer %pK "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800730 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
731 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
732 " new seq num = %d\n",
733 tid, peer,
734 peer->mac_addr.raw[0],
735 peer->mac_addr.raw[1],
736 peer->mac_addr.raw[2],
737 peer->mac_addr.raw[3],
738 peer->mac_addr.raw[4],
739 peer->mac_addr.raw[5], pn.pn128[1],
740 pn.pn128[0],
741 pn.pn128[0] & 0xffffffffffffULL,
742 htt_rx_mpdu_desc_seq_num(htt_pdev,
743 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530744 } else {
745 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700746 "Tgt PN check failed - TID %d, peer %pK "
Poddar, Siddarth14521792017-03-14 21:19:42 +0530747 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
748 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
749 " new seq num = %d\n",
750 tid, peer,
751 peer->mac_addr.raw[0],
752 peer->mac_addr.raw[1],
753 peer->mac_addr.raw[2],
754 peer->mac_addr.raw[3],
755 peer->mac_addr.raw[4],
756 peer->mac_addr.raw[5], pn.pn128[1],
757 pn.pn128[0],
758 pn.pn128[0] & 0xffffffffffffULL,
759 htt_rx_mpdu_desc_seq_num(htt_pdev,
760 rx_desc));
761 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
763 peer->mac_addr.raw, tid,
764 htt_rx_mpdu_desc_tsf32(htt_pdev,
765 rx_desc),
766 OL_RX_ERR_PN, mpdu_head, NULL, 0);
767
768 /* free all MSDUs within this MPDU */
769 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530770 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771 htt_rx_desc_frame_free(htt_pdev, msdu);
772 if (msdu == mpdu_tail)
773 break;
Yun Park28390e32017-04-05 12:19:26 -0700774 msdu = next_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800775 } while (1);
776
777 } else {
778 if (head_msdu == NULL) {
779 head_msdu = rx_reorder_array_elem->head;
780 tail_msdu = rx_reorder_array_elem->tail;
781 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530782 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800783 tail_msdu,
784 rx_reorder_array_elem->head);
785 tail_msdu = rx_reorder_array_elem->tail;
786 }
787 }
788 rx_reorder_array_elem->head = NULL;
789 rx_reorder_array_elem->tail = NULL;
790 }
791 seq_num = (seq_num + 1) & win_sz_mask;
792 } while (seq_num != seq_num_end);
793
794 if (head_msdu) {
795 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530796 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800797 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
798 }
799}
800
801#if defined(ENABLE_RX_REORDER_TRACE)
802
803A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
804{
805 int num_elems;
806
807 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
808 pdev->rx_reorder_trace.idx = 0;
809 pdev->rx_reorder_trace.cnt = 0;
810 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530811 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800812 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
813 if (!pdev->rx_reorder_trace.data)
814 return A_NO_MEMORY;
815
816 while (--num_elems >= 0)
817 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
818
819 return A_OK;
820}
821
822void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
823{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530824 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800825}
826
827void
828ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
829 uint8_t tid,
830 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
831{
832 uint32_t idx = pdev->rx_reorder_trace.idx;
833
834 pdev->rx_reorder_trace.data[idx].tid = tid;
835 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
836 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
837 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
838 pdev->rx_reorder_trace.cnt++;
839 idx++;
840 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
841}
842
843void
844ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
845{
846 static int print_count;
847 uint32_t i, start, end;
848 uint64_t cnt;
849 int elems;
850
851 if (print_count != 0 && just_once)
852 return;
853
854 print_count++;
855
856 end = pdev->rx_reorder_trace.idx;
857 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
858 /* trace log has not yet wrapped around - start at the top */
859 start = 0;
860 cnt = 0;
861 } else {
862 start = end;
863 cnt = pdev->rx_reorder_trace.cnt -
864 (pdev->rx_reorder_trace.mask + 1);
865 }
866 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
867 if (limit > 0 && elems > limit) {
868 int delta;
Yun Park28390e32017-04-05 12:19:26 -0700869
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800870 delta = elems - limit;
871 start += delta;
872 start &= pdev->rx_reorder_trace.mask;
873 cnt += delta;
874 }
875
876 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530877 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800878 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530879 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800880 " count idx tid idx num (LSBs)");
881 do {
882 uint16_t seq_num, reorder_idx;
Yun Park28390e32017-04-05 12:19:26 -0700883
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800884 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
885 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
886 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530887 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888 " %6lld %4d %3d %4d %4d (%d)",
889 cnt, i, pdev->rx_reorder_trace.data[i].tid,
890 reorder_idx, seq_num, seq_num & 63);
891 } else {
892 int err = TXRX_SEQ_NUM_ERR(seq_num);
Yun Park28390e32017-04-05 12:19:26 -0700893
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530894 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895 " %6lld %4d err %d (%d MPDUs)",
896 cnt, i, err,
897 pdev->rx_reorder_trace.data[i].num_mpdus);
898 }
899 cnt++;
900 i++;
901 i &= pdev->rx_reorder_trace.mask;
902 } while (i != end);
903}
904
905#endif /* ENABLE_RX_REORDER_TRACE */