blob: fa1aefb2f7a455bbcda9e732fd1d32defcceb6ab [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarth14521792017-03-14 21:19:42 +05302 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== header file includes ===*/
29/* generic utilities */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
Dustin Brown0bec9a92017-08-17 15:44:34 -070033#include <linux/ieee80211.h> /* IEEE80211_SEQ_MAX */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034
35/* external interfaces */
36#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
37#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
38#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
39#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
40#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
41
42/* datapath internal interfaces */
43#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
44#include <ol_txrx_internal.h> /* TXRX_ASSERT */
45#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
46#include <ol_rx_reorder.h>
47#include <ol_rx_defrag.h>
48
49/*=== data types and defines ===*/
50#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
51
52/*=== global variables ===*/
53
54static char g_log2ceil[] = {
55 1, /* 0 -> 1 */
56 1, /* 1 -> 1 */
57 2, /* 2 -> 2 */
58 4, 4, /* 3-4 -> 4 */
59 8, 8, 8, 8, /* 5-8 -> 8 */
60 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
61 32, 32, 32, 32, 32, 32, 32, 32,
62 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
63 64, 64, 64, 64, 64, 64, 64, 64,
64 64, 64, 64, 64, 64, 64, 64, 64,
65 64, 64, 64, 64, 64, 64, 64, 64,
66 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
67};
68
69/*=== function definitions ===*/
70
71/*---*/
72
73#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
74#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
75#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
76#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
77#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
78#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
79#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
80#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
81
82/*---*/
83
84/* reorder array elements are known to be non-NULL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
86 do { \
87 if (tail_msdu) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +053088 qdf_nbuf_set_next(tail_msdu, \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089 rx_reorder_array_elem->head); \
90 } \
91 } while (0)
92
93/* functions called by txrx components */
94
95void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
96{
97 rx_reorder->win_sz = 1;
98 rx_reorder->win_sz_mask = 0;
99 rx_reorder->array = &rx_reorder->base;
100 rx_reorder->base.head = rx_reorder->base.tail = NULL;
101 rx_reorder->tid = tid;
102 rx_reorder->defrag_timeout_ms = 0;
103
104 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
105 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
106}
107
108static enum htt_rx_status
109ol_rx_reorder_seq_num_check(
Yun Park28390e32017-04-05 12:19:26 -0700110 struct ol_txrx_pdev_t *pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800111 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700112 unsigned int tid, unsigned int seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800113{
Yun Park28390e32017-04-05 12:19:26 -0700114 unsigned int seq_num_delta;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800115
116 /* don't check the new seq_num against last_seq
117 if last_seq is not valid */
118 if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
119 return htt_rx_status_ok;
120
121 /*
122 * For duplicate detection, it might be helpful to also check
123 * whether the retry bit is set or not - a strict duplicate packet
124 * should be the one with retry bit set.
125 * However, since many implementations do not set the retry bit,
126 * and since this same function is also used for filtering out
127 * late-arriving frames (frames that arive after their rx reorder
128 * timeout has expired) which are not retries, don't bother checking
129 * the retry bit for now.
130 */
131 /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
132 seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
133 (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
134
135 if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
136 return htt_rx_status_err_replay;
137 /* or maybe htt_rx_status_err_dup */
138 }
139 return htt_rx_status_ok;
140}
141
142/**
143 * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
144 * duplicate detection & check for out-of-order
145 * packets for unicast packets.
146 * @pdev: Pointer to pdev maintained by OL
147 * @peer: Pointer to peer structure maintained by OL
148 * @tid: TID value passed as part of HTT msg by f/w
149 * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
150 *
151 * This function
152 * 1) For Multicast Frames -- does duplicate detection
153 * A frame is considered duplicate & dropped if it has a seq.number
154 * which is received twice in succession and with the retry bit set
155 * in the second case.
156 * A frame which is older than the last sequence number received
157 * is not considered duplicate but out-of-order. This function does
158 * perform out-of-order check for multicast frames, which is in
159 * keeping with the 802.11 2012 spec section 9.3.2.10
160 * 2) For Unicast Frames -- does duplicate detection & out-of-order check
161 * only for non-aggregation tids.
162 *
163 * Return: Returns htt_rx_status_err_replay, if packet needs to be
164 * dropped, htt_rx_status_ok otherwise.
165 */
166enum htt_rx_status
167ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
168 struct ol_txrx_peer_t *peer,
169 uint8_t tid,
170 void *rx_mpdu_desc)
171{
172 uint16_t pkt_tid = 0xffff;
173 uint16_t seq_num = IEEE80211_SEQ_MAX;
174 bool retry = 0;
175
176 seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
177
178 /* For mcast packets, we only the dup-detection, not re-order check */
179
Anurag Chouhanc5548422016-02-24 18:33:27 +0530180 if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800181
182 pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
183
184 /* Invalid packet TID, expected only for HL */
185 /* Pass the packet on */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530186 if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return htt_rx_status_ok;
188
189 retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
190
191 /*
Yun Park28390e32017-04-05 12:19:26 -0700192 * At this point, we define frames to be duplicate if they
193 * arrive "ONLY" in succession with the same sequence number
194 * and the last one has the retry bit set. For an older frame,
195 * we consider that as an out of order frame, and hence do not
196 * perform the dup-detection or out-of-order check for multicast
197 * frames as per discussions & spec.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 * Hence "seq_num <= last_seq_num" check is not necessary.
199 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530200 if (qdf_unlikely(retry &&
Yun Park28390e32017-04-05 12:19:26 -0700201 (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
202 /* drop mcast */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
204 return htt_rx_status_err_replay;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205 }
Yun Park28390e32017-04-05 12:19:26 -0700206
207 /*
208 * This is a multicast packet likely to be passed on...
209 * Set the mcast last seq number here
210 * This is fairly accurate since:
211 * a) f/w sends multicast as separate PPDU/HTT messages
212 * b) Mcast packets are not aggregated & hence single
213 * c) Result of b) is that, flush / release bit is set
214 * always on the mcast packets, so likely to be
215 * immediatedly released.
216 */
217 peer->tids_mcast_last_seq[pkt_tid] = seq_num;
218 return htt_rx_status_ok;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219 } else
220 return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
221}
222
223
224void
225ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
226 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700227 unsigned int tid,
228 unsigned int idx, qdf_nbuf_t head_msdu,
229 qdf_nbuf_t tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230{
231 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
232
233 idx &= peer->tids_rx_reorder[tid].win_sz_mask;
234 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
235 if (rx_reorder_array_elem->head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530236 qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800237 } else {
238 rx_reorder_array_elem->head = head_msdu;
239 OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
240 }
241 rx_reorder_array_elem->tail = tail_msdu;
242}
243
244void
245ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
246 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700247 unsigned int tid, unsigned int idx_start,
248 unsigned int idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249{
Yun Park28390e32017-04-05 12:19:26 -0700250 unsigned int idx;
251 unsigned int win_sz, win_sz_mask;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530253 qdf_nbuf_t head_msdu;
254 qdf_nbuf_t tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255
256 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
257 /* may get reset below */
258 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
259
260 win_sz = peer->tids_rx_reorder[tid].win_sz;
261 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
262 idx_start &= win_sz_mask;
263 idx_end &= win_sz_mask;
264 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
265
266 head_msdu = rx_reorder_array_elem->head;
267 tail_msdu = rx_reorder_array_elem->tail;
268 rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530269 if (head_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800270 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800271
272 idx = (idx_start + 1);
273 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
274 while (idx != idx_end) {
275 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530276 if (rx_reorder_array_elem->head) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
278 1);
279 OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
280 rx_reorder_array_elem);
281 tail_msdu = rx_reorder_array_elem->tail;
282 }
283 rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
284 NULL;
285 idx++;
286 OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
287 }
Poddar, Siddarth0cec8ea2016-05-02 12:05:11 +0530288 if (head_msdu) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289 uint16_t seq_num;
290 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
291
292 /*
293 * This logic is not quite correct - the last_seq value should
294 * be the sequence number of the final MPDU released rather than
295 * the initial MPDU released.
296 * However, tracking the sequence number of the first MPDU in
297 * the released batch works well enough:
298 * For Peregrine and Rome, the last_seq is checked only for
299 * non-aggregate cases, where only one MPDU at a time is
300 * released.
301 * For Riva, Pronto, and Northstar, the last_seq is checked to
302 * filter out late-arriving rx frames, whose sequence number
303 * will be less than the first MPDU in this release batch.
304 */
305 seq_num = htt_rx_mpdu_desc_seq_num(
306 htt_pdev,
307 htt_rx_msdu_desc_retrieve(htt_pdev,
308 head_msdu));
309 peer->tids_last_seq[tid] = seq_num;
310 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530311 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
313 }
314 /*
315 * If the rx reorder timeout is handled by host SW rather than the
316 * target's rx reorder logic, then stop the timer here.
317 * (If there are remaining rx holes, then the timer will be restarted.)
318 */
319 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
320}
321
322void
323ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
324 struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700325 unsigned int tid,
326 unsigned int idx_start,
327 unsigned int idx_end, enum htt_rx_flush_action action)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328{
329 struct ol_txrx_pdev_t *pdev;
Yun Park28390e32017-04-05 12:19:26 -0700330 unsigned int win_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800331 uint8_t win_sz_mask;
332 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530333 qdf_nbuf_t head_msdu = NULL;
334 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335
336 pdev = vdev->pdev;
337 win_sz = peer->tids_rx_reorder[tid].win_sz;
338 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
339
340 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
341 /* a idx_end value of 0xffff means to flush the entire array */
342 if (idx_end == 0xffff) {
343 idx_end = idx_start;
344 /*
345 * The array is being flushed in entirety because the block
346 * ack window has been shifted to a new position that does not
347 * overlap with the old position. (Or due to reception of a
348 * DELBA.)
349 * Thus, since the block ack window is essentially being reset,
350 * reset the "next release index".
351 */
352 peer->tids_next_rel_idx[tid] =
353 OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
354 } else {
355 peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
356 }
357
358 idx_start &= win_sz_mask;
359 idx_end &= win_sz_mask;
360
361 do {
362 rx_reorder_array_elem =
363 &peer->tids_rx_reorder[tid].array[idx_start];
364 idx_start = (idx_start + 1);
365 OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
366
367 if (rx_reorder_array_elem->head) {
368 OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
369 1);
370 if (head_msdu == NULL) {
371 head_msdu = rx_reorder_array_elem->head;
372 tail_msdu = rx_reorder_array_elem->tail;
373 rx_reorder_array_elem->head = NULL;
374 rx_reorder_array_elem->tail = NULL;
375 continue;
376 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530377 qdf_nbuf_set_next(tail_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378 rx_reorder_array_elem->head);
379 tail_msdu = rx_reorder_array_elem->tail;
380 rx_reorder_array_elem->head =
381 rx_reorder_array_elem->tail = NULL;
382 }
383 } while (idx_start != idx_end);
384
385 ol_rx_defrag_waitlist_remove(peer, tid);
386
387 if (head_msdu) {
388 uint16_t seq_num;
389 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
390
391 seq_num = htt_rx_mpdu_desc_seq_num(
392 htt_pdev,
393 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
394 peer->tids_last_seq[tid] = seq_num;
395 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530396 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800397 if (action == htt_rx_flush_release) {
398 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
399 } else {
400 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530401 qdf_nbuf_t next;
Yun Park28390e32017-04-05 12:19:26 -0700402
Nirav Shahcbc6d722016-03-01 16:24:53 +0530403 next = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 htt_rx_desc_frame_free(pdev->htt_pdev,
405 head_msdu);
406 head_msdu = next;
407 } while (head_msdu);
408 }
409 }
410 /*
411 * If the rx reorder array is empty, then reset the last_seq value -
412 * it is likely that a BAR or a sequence number shift caused the
413 * sequence number to jump, so the old last_seq value is not relevant.
414 */
415 if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
416 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
417
418 OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
419}
420
421void
422ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
Yun Park28390e32017-04-05 12:19:26 -0700423 unsigned int tid, unsigned int *idx_end)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800424{
Yun Park28390e32017-04-05 12:19:26 -0700425 unsigned int win_sz, win_sz_mask;
426 unsigned int idx_start = 0, tmp_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800427
428 win_sz = peer->tids_rx_reorder[tid].win_sz;
429 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
430
431 OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
432 tmp_idx++;
433 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
434 /* bypass the initial hole */
435 while (tmp_idx != idx_start &&
436 !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
437 tmp_idx++;
438 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
439 }
440 /* bypass the present frames following the initial hole */
441 while (tmp_idx != idx_start &&
442 peer->tids_rx_reorder[tid].array[tmp_idx].head) {
443 tmp_idx++;
444 OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
445 }
446 /*
447 * idx_end is exclusive rather than inclusive.
448 * In other words, it is the index of the first slot of the second
449 * hole, rather than the index of the final present frame following
450 * the first hole.
451 */
452 *idx_end = tmp_idx;
453}
454
455void
456ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
457 struct ol_txrx_peer_t *peer)
458{
459 int tid;
Yun Park28390e32017-04-05 12:19:26 -0700460
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
462 ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
463 htt_rx_flush_discard);
464 }
465 OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
466}
467
468/* functions called by HTT */
469
470void
471ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
472 uint16_t peer_id,
473 uint8_t tid,
474 uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
475{
476 uint8_t round_pwr2_win_sz;
Yun Park28390e32017-04-05 12:19:26 -0700477 unsigned int array_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478 struct ol_txrx_peer_t *peer;
479 struct ol_rx_reorder_t *rx_reorder;
480
481 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
482 if (peer == NULL)
483 return;
484
485 if (pdev->cfg.host_addba) {
486 ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
487 &peer->mac_addr.raw[0], tid, failed);
488 }
489 if (failed)
490 return;
491
492 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
493 rx_reorder = &peer->tids_rx_reorder[tid];
494
495 TXRX_ASSERT2(win_sz <= 64);
496 rx_reorder->win_sz = win_sz;
497 round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
498 array_size =
499 round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530500 rx_reorder->array = qdf_mem_malloc(array_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501 TXRX_ASSERT1(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502
503 rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
504 rx_reorder->num_mpdus = 0;
505
506 peer->tids_next_rel_idx[tid] =
507 OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
508 rx_reorder->win_sz_mask);
509}
510
511void
512ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
513{
514 struct ol_txrx_peer_t *peer;
515 struct ol_rx_reorder_t *rx_reorder;
516
517 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
518 if (peer == NULL)
519 return;
520
521 peer->tids_next_rel_idx[tid] = 0xffff; /* invalid value */
522 rx_reorder = &peer->tids_rx_reorder[tid];
523
524 /* check that there really was a block ack agreement */
525 TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
526 /*
527 * Deallocate the old rx reorder array.
528 * The call to ol_rx_reorder_init below
529 * will reset rx_reorder->array to point to
530 * the single-element statically-allocated reorder array
531 * used for non block-ack cases.
532 */
533 if (rx_reorder->array != &rx_reorder->base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530534 ol_txrx_dbg("%s, delete reorder array, tid:%d\n",
535 __func__, tid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530536 qdf_mem_free(rx_reorder->array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 }
538
539 /* set up the TID with default parameters (ARQ window size = 1) */
540 ol_rx_reorder_init(rx_reorder, tid);
541}
542
543void
544ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
545 uint16_t peer_id,
546 uint8_t tid,
547 uint16_t idx_start,
548 uint16_t idx_end, enum htt_rx_flush_action action)
549{
550 struct ol_txrx_vdev_t *vdev = NULL;
551 void *rx_desc;
552 struct ol_txrx_peer_t *peer;
553 int idx;
554 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
555 htt_pdev_handle htt_pdev = pdev->htt_pdev;
556
557 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
558 if (peer)
559 vdev = peer->vdev;
560 else
561 return;
562
563 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
564
565 idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
566 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
567 if (rx_reorder_array_elem->head) {
568 rx_desc =
569 htt_rx_msdu_desc_retrieve(htt_pdev,
570 rx_reorder_array_elem->head);
571 if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
572 ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
573 idx_start);
574 /*
575 * Assuming flush message sent seperately for frags
576 * and for normal frames
577 */
578 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
579 return;
580 }
581 }
582 ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
583 /*
584 * If the rx reorder timeout is handled by host SW, see if there are
585 * remaining rx holes that require the timer to be restarted.
586 */
587 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
588 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
589}
590
591void
592ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
593 uint16_t peer_id,
594 uint8_t tid,
595 int seq_num_start,
596 int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
597{
598 struct ol_txrx_vdev_t *vdev = NULL;
599 void *rx_desc;
600 struct ol_txrx_peer_t *peer;
601 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
Yun Park28390e32017-04-05 12:19:26 -0700602 unsigned int win_sz_mask;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530603 qdf_nbuf_t head_msdu = NULL;
604 qdf_nbuf_t tail_msdu = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800605 htt_pdev_handle htt_pdev = pdev->htt_pdev;
606 int seq_num, i = 0;
607
608 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
609
610 if (!peer) {
611 /*
612 * If we can't find a peer send this packet to OCB interface
613 * using OCB self peer
614 */
615 if (!ol_txrx_get_ocb_peer(pdev, &peer))
616 peer = NULL;
617 }
618
619 if (peer)
620 vdev = peer->vdev;
621 else
622 return;
623
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530624 qdf_atomic_set(&peer->fw_pn_check, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625 /*TODO: Fragmentation case */
626 win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
627 seq_num_start &= win_sz_mask;
628 seq_num_end &= win_sz_mask;
629 seq_num = seq_num_start;
630
631 do {
632 rx_reorder_array_elem =
633 &peer->tids_rx_reorder[tid].array[seq_num];
634
635 if (rx_reorder_array_elem->head) {
636 if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530637 qdf_nbuf_t msdu, next_msdu, mpdu_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 mpdu_tail;
639 static uint32_t last_pncheck_print_time;
640 /* Do not need to initialize as C does it */
641
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800642 uint32_t current_time_ms;
643 union htt_rx_pn_t pn = { 0 };
644 int index, pn_len;
645
646 mpdu_head = msdu = rx_reorder_array_elem->head;
647 mpdu_tail = rx_reorder_array_elem->tail;
648
649 pn_ie_cnt--;
650 i++;
651 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
652 msdu);
653 index = htt_rx_msdu_is_wlan_mcast(
654 pdev->htt_pdev, rx_desc)
655 ? txrx_sec_mcast
656 : txrx_sec_ucast;
657 pn_len = pdev->rx_pn[peer->security[index].
658 sec_type].len;
659 htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
660 pn_len);
661
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530662 current_time_ms = qdf_system_ticks_to_msecs(
663 qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
665 (current_time_ms -
666 last_pncheck_print_time)) {
667 last_pncheck_print_time =
668 current_time_ms;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530669 ol_txrx_warn(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700670 "Tgt PN check failed - TID %d, peer %pK "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
672 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
673 " new seq num = %d\n",
674 tid, peer,
675 peer->mac_addr.raw[0],
676 peer->mac_addr.raw[1],
677 peer->mac_addr.raw[2],
678 peer->mac_addr.raw[3],
679 peer->mac_addr.raw[4],
680 peer->mac_addr.raw[5], pn.pn128[1],
681 pn.pn128[0],
682 pn.pn128[0] & 0xffffffffffffULL,
683 htt_rx_mpdu_desc_seq_num(htt_pdev,
684 rx_desc));
Poddar, Siddarth14521792017-03-14 21:19:42 +0530685 } else {
686 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700687 "Tgt PN check failed - TID %d, peer %pK "
Poddar, Siddarth14521792017-03-14 21:19:42 +0530688 "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
689 " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
690 " new seq num = %d\n",
691 tid, peer,
692 peer->mac_addr.raw[0],
693 peer->mac_addr.raw[1],
694 peer->mac_addr.raw[2],
695 peer->mac_addr.raw[3],
696 peer->mac_addr.raw[4],
697 peer->mac_addr.raw[5], pn.pn128[1],
698 pn.pn128[0],
699 pn.pn128[0] & 0xffffffffffffULL,
700 htt_rx_mpdu_desc_seq_num(htt_pdev,
701 rx_desc));
702 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800703 ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
704 peer->mac_addr.raw, tid,
705 htt_rx_mpdu_desc_tsf32(htt_pdev,
706 rx_desc),
707 OL_RX_ERR_PN, mpdu_head, NULL, 0);
708
709 /* free all MSDUs within this MPDU */
710 do {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530711 next_msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800712 htt_rx_desc_frame_free(htt_pdev, msdu);
713 if (msdu == mpdu_tail)
714 break;
Yun Park28390e32017-04-05 12:19:26 -0700715 msdu = next_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800716 } while (1);
717
718 } else {
719 if (head_msdu == NULL) {
720 head_msdu = rx_reorder_array_elem->head;
721 tail_msdu = rx_reorder_array_elem->tail;
722 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530723 qdf_nbuf_set_next(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800724 tail_msdu,
725 rx_reorder_array_elem->head);
726 tail_msdu = rx_reorder_array_elem->tail;
727 }
728 }
729 rx_reorder_array_elem->head = NULL;
730 rx_reorder_array_elem->tail = NULL;
731 }
732 seq_num = (seq_num + 1) & win_sz_mask;
733 } while (seq_num != seq_num_end);
734
735 if (head_msdu) {
736 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530737 qdf_nbuf_set_next(tail_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
739 }
740}
741
742#if defined(ENABLE_RX_REORDER_TRACE)
743
744A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
745{
746 int num_elems;
747
748 num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
749 pdev->rx_reorder_trace.idx = 0;
750 pdev->rx_reorder_trace.cnt = 0;
751 pdev->rx_reorder_trace.mask = num_elems - 1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530752 pdev->rx_reorder_trace.data = qdf_mem_malloc(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800753 sizeof(*pdev->rx_reorder_trace.data) * num_elems);
754 if (!pdev->rx_reorder_trace.data)
755 return A_NO_MEMORY;
756
757 while (--num_elems >= 0)
758 pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
759
760 return A_OK;
761}
762
763void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
764{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530765 qdf_mem_free(pdev->rx_reorder_trace.data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766}
767
768void
769ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
770 uint8_t tid,
771 uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
772{
773 uint32_t idx = pdev->rx_reorder_trace.idx;
774
775 pdev->rx_reorder_trace.data[idx].tid = tid;
776 pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
777 pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
778 pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
779 pdev->rx_reorder_trace.cnt++;
780 idx++;
781 pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
782}
783
784void
785ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
786{
787 static int print_count;
788 uint32_t i, start, end;
789 uint64_t cnt;
790 int elems;
791
792 if (print_count != 0 && just_once)
793 return;
794
795 print_count++;
796
797 end = pdev->rx_reorder_trace.idx;
798 if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
799 /* trace log has not yet wrapped around - start at the top */
800 start = 0;
801 cnt = 0;
802 } else {
803 start = end;
804 cnt = pdev->rx_reorder_trace.cnt -
805 (pdev->rx_reorder_trace.mask + 1);
806 }
807 elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
808 if (limit > 0 && elems > limit) {
809 int delta;
Yun Park28390e32017-04-05 12:19:26 -0700810
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800811 delta = elems - limit;
812 start += delta;
813 start &= pdev->rx_reorder_trace.mask;
814 cnt += delta;
815 }
816
817 i = start;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530818 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800819 " log array seq");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530820 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821 " count idx tid idx num (LSBs)");
822 do {
823 uint16_t seq_num, reorder_idx;
Yun Park28390e32017-04-05 12:19:26 -0700824
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800825 seq_num = pdev->rx_reorder_trace.data[i].seq_num;
826 reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
827 if (seq_num < (1 << 14)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530828 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829 " %6lld %4d %3d %4d %4d (%d)",
830 cnt, i, pdev->rx_reorder_trace.data[i].tid,
831 reorder_idx, seq_num, seq_num & 63);
832 } else {
833 int err = TXRX_SEQ_NUM_ERR(seq_num);
Yun Park28390e32017-04-05 12:19:26 -0700834
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530835 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800836 " %6lld %4d err %d (%d MPDUs)",
837 cnt, i, err,
838 pdev->rx_reorder_trace.data[i].num_mpdus);
839 }
840 cnt++;
841 i++;
842 i &= pdev->rx_reorder_trace.mask;
843 } while (i != end);
844}
845
846#endif /* ENABLE_RX_REORDER_TRACE */