Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Nirav Shah | eb017be | 2018-02-15 11:20:58 +0530 | [diff] [blame^] | 2 | * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | /*=== header file includes ===*/ |
| 29 | /* generic utilities */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 30 | #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */ |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 31 | #include <qdf_timer.h> |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 32 | #include <qdf_time.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 33 | |
| 34 | /* datapath internal interfaces */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 35 | #include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */ |
| 36 | #include <ol_rx_reorder.h> /* ol_rx_reorder_flush, etc. */ |
Nirav Shah | eb017be | 2018-02-15 11:20:58 +0530 | [diff] [blame^] | 37 | #include <ol_rx_reorder_timeout.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 38 | |
| 39 | #ifdef QCA_SUPPORT_OL_RX_REORDER_TIMEOUT |
| 40 | |
Yun Park | 42e0bef | 2017-04-05 22:36:33 -0700 | [diff] [blame] | 41 | void ol_rx_reorder_timeout_remove(struct ol_txrx_peer_t *peer, unsigned int tid) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 42 | { |
| 43 | struct ol_txrx_pdev_t *pdev; |
| 44 | struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac; |
| 45 | struct ol_rx_reorder_timeout_list_elem_t *list_elem; |
| 46 | int ac; |
| 47 | |
| 48 | pdev = peer->vdev->pdev; |
| 49 | ac = TXRX_TID_TO_WMM_AC(tid); |
| 50 | rx_reorder_timeout_ac = &pdev->rx.reorder_timeout.access_cats[ac]; |
| 51 | list_elem = &peer->tids_rx_reorder[tid].timeout; |
| 52 | if (!list_elem->active) { |
| 53 | /* this element has already been removed */ |
| 54 | return; |
| 55 | } |
| 56 | list_elem->active = 0; |
| 57 | TAILQ_REMOVE(&rx_reorder_timeout_ac->virtual_timer_list, list_elem, |
| 58 | reorder_timeout_list_elem); |
| 59 | } |
| 60 | |
| 61 | static void |
| 62 | ol_rx_reorder_timeout_start(struct ol_tx_reorder_cat_timeout_t |
| 63 | *rx_reorder_timeout_ac, uint32_t time_now_ms) |
| 64 | { |
| 65 | uint32_t duration_ms; |
| 66 | struct ol_rx_reorder_timeout_list_elem_t *list_elem; |
| 67 | |
| 68 | list_elem = TAILQ_FIRST(&rx_reorder_timeout_ac->virtual_timer_list); |
| 69 | |
| 70 | duration_ms = list_elem->timestamp_ms - time_now_ms; |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 71 | qdf_timer_start(&rx_reorder_timeout_ac->timer, duration_ms); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | static inline void |
| 75 | ol_rx_reorder_timeout_add(struct ol_txrx_peer_t *peer, uint8_t tid) |
| 76 | { |
| 77 | uint32_t time_now_ms; |
| 78 | struct ol_txrx_pdev_t *pdev; |
| 79 | struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac; |
| 80 | struct ol_rx_reorder_timeout_list_elem_t *list_elem; |
| 81 | int ac; |
| 82 | int start; |
| 83 | |
| 84 | pdev = peer->vdev->pdev; |
| 85 | ac = TXRX_TID_TO_WMM_AC(tid); |
| 86 | rx_reorder_timeout_ac = &pdev->rx.reorder_timeout.access_cats[ac]; |
| 87 | list_elem = &peer->tids_rx_reorder[tid].timeout; |
| 88 | |
| 89 | list_elem->active = 1; |
| 90 | list_elem->peer = peer; |
| 91 | list_elem->tid = tid; |
| 92 | |
| 93 | /* set the expiration timestamp */ |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 94 | time_now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 95 | list_elem->timestamp_ms = |
| 96 | time_now_ms + rx_reorder_timeout_ac->duration_ms; |
| 97 | |
| 98 | /* add to the queue */ |
| 99 | start = TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list); |
| 100 | TAILQ_INSERT_TAIL(&rx_reorder_timeout_ac->virtual_timer_list, |
| 101 | list_elem, reorder_timeout_list_elem); |
| 102 | if (start) |
| 103 | ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms); |
| 104 | } |
| 105 | |
| 106 | void ol_rx_reorder_timeout_update(struct ol_txrx_peer_t *peer, uint8_t tid) |
| 107 | { |
| 108 | if (!peer) |
| 109 | return; |
| 110 | |
| 111 | /* |
| 112 | * If there are no holes, i.e. no queued frames, |
| 113 | * then timeout doesn't apply. |
| 114 | */ |
| 115 | if (peer->tids_rx_reorder[tid].num_mpdus == 0) |
| 116 | return; |
| 117 | |
| 118 | /* |
| 119 | * If the virtual timer for this peer-TID is already running, |
| 120 | * then leave it. |
| 121 | */ |
| 122 | if (peer->tids_rx_reorder[tid].timeout.active) |
| 123 | return; |
| 124 | |
| 125 | ol_rx_reorder_timeout_add(peer, tid); |
| 126 | } |
| 127 | |
| 128 | static void ol_rx_reorder_timeout(void *arg) |
| 129 | { |
| 130 | struct ol_txrx_pdev_t *pdev; |
| 131 | struct ol_rx_reorder_timeout_list_elem_t *list_elem, *tmp; |
| 132 | uint32_t time_now_ms; |
| 133 | struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac; |
| 134 | |
| 135 | rx_reorder_timeout_ac = (struct ol_tx_reorder_cat_timeout_t *)arg; |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 136 | time_now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 137 | |
| 138 | pdev = rx_reorder_timeout_ac->pdev; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 139 | qdf_spin_lock(&pdev->rx.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 140 | /* TODO: conditionally take mutex lock during regular rx */ |
| 141 | TAILQ_FOREACH_SAFE(list_elem, |
| 142 | &rx_reorder_timeout_ac->virtual_timer_list, |
| 143 | reorder_timeout_list_elem, tmp) { |
Yun Park | 42e0bef | 2017-04-05 22:36:33 -0700 | [diff] [blame] | 144 | unsigned int idx_start, idx_end; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 145 | struct ol_txrx_peer_t *peer; |
| 146 | |
| 147 | if (list_elem->timestamp_ms > time_now_ms) |
| 148 | break; /* time has not expired yet for this element */ |
| 149 | |
| 150 | list_elem->active = 0; |
| 151 | /* remove the expired element from the list */ |
| 152 | TAILQ_REMOVE(&rx_reorder_timeout_ac->virtual_timer_list, |
| 153 | list_elem, reorder_timeout_list_elem); |
| 154 | |
| 155 | peer = list_elem->peer; |
| 156 | |
| 157 | idx_start = 0xffff; /* start from next_rel_idx */ |
| 158 | ol_rx_reorder_first_hole(peer, list_elem->tid, &idx_end); |
| 159 | ol_rx_reorder_flush(peer->vdev, |
| 160 | peer, |
| 161 | list_elem->tid, |
| 162 | idx_start, idx_end, htt_rx_flush_release); |
| 163 | } |
| 164 | /* restart the timer if unexpired elements are left in the list */ |
| 165 | if (!TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list)) |
| 166 | ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms); |
| 167 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 168 | qdf_spin_unlock(&pdev->rx.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | void ol_rx_reorder_timeout_init(struct ol_txrx_pdev_t *pdev) |
| 172 | { |
| 173 | int i; |
| 174 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 175 | for (i = 0; i < QDF_ARRAY_SIZE(pdev->rx.reorder_timeout.access_cats); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 176 | i++) { |
| 177 | struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac; |
Yun Park | 42e0bef | 2017-04-05 22:36:33 -0700 | [diff] [blame] | 178 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 179 | rx_reorder_timeout_ac = |
| 180 | &pdev->rx.reorder_timeout.access_cats[i]; |
| 181 | /* init the per-AC timers */ |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 182 | qdf_timer_init(pdev->osdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 183 | &rx_reorder_timeout_ac->timer, |
| 184 | ol_rx_reorder_timeout, |
Mohit Khanna | c6f0398 | 2016-05-15 20:37:55 -0700 | [diff] [blame] | 185 | rx_reorder_timeout_ac, |
| 186 | QDF_TIMER_TYPE_SW); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 187 | /* init the virtual timer list */ |
| 188 | TAILQ_INIT(&rx_reorder_timeout_ac->virtual_timer_list); |
| 189 | rx_reorder_timeout_ac->pdev = pdev; |
| 190 | } |
| 191 | pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_VO].duration_ms = 40; |
| 192 | pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_VI].duration_ms = 100; |
| 193 | pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_BE].duration_ms = 100; |
| 194 | pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_BK].duration_ms = 100; |
| 195 | } |
| 196 | |
| 197 | void ol_rx_reorder_timeout_peer_cleanup(struct ol_txrx_peer_t *peer) |
| 198 | { |
| 199 | int tid; |
| 200 | |
| 201 | for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) { |
| 202 | if (peer->tids_rx_reorder[tid].timeout.active) |
| 203 | ol_rx_reorder_timeout_remove(peer, tid); |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | void ol_rx_reorder_timeout_cleanup(struct ol_txrx_pdev_t *pdev) |
| 208 | { |
| 209 | int i; |
| 210 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 211 | for (i = 0; i < QDF_ARRAY_SIZE(pdev->rx.reorder_timeout.access_cats); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 212 | i++) { |
| 213 | struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac; |
Yun Park | 42e0bef | 2017-04-05 22:36:33 -0700 | [diff] [blame] | 214 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 215 | rx_reorder_timeout_ac = |
| 216 | &pdev->rx.reorder_timeout.access_cats[i]; |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 217 | qdf_timer_stop(&rx_reorder_timeout_ac->timer); |
| 218 | qdf_timer_free(&rx_reorder_timeout_ac->timer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | |
| 222 | #endif /* QCA_SUPPORT_OL_RX_REORDER_TIMEOUT */ |