Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 2 | * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 28 | #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 29 | #include <qdf_atomic.h> /* qdf_atomic_read, etc. */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 30 | #include <ol_cfg.h> /* ol_cfg_addba_retry */ |
| 31 | #include <htt.h> /* HTT_TX_EXT_TID_MGMT */ |
| 32 | #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 33 | #include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */ |
| 34 | #include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 35 | #include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */ |
| 36 | #include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */ |
| 37 | #include <ol_tx_queue.h> |
| 38 | #include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */ |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 39 | #include <qdf_types.h> /* bool */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 40 | |
| 41 | #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) |
| 42 | |
| 43 | void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason) |
| 44 | { |
| 45 | /* TO DO: log the queue pause */ |
| 46 | /* acquire the mutex lock, since we'll be modifying the queues */ |
| 47 | TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__); |
| 48 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 49 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 50 | vdev->ll_pause.paused_reason |= reason; |
| 51 | vdev->ll_pause.q_pause_cnt++; |
| 52 | vdev->ll_pause.is_q_paused = true; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 53 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 54 | |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 55 | DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_VDEV_PAUSE, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 56 | NULL, 0)); |
| 57 | TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__); |
| 58 | } |
| 59 | |
| 60 | void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason) |
| 61 | { |
| 62 | /* TO DO: log the queue unpause */ |
| 63 | /* acquire the mutex lock, since we'll be modifying the queues */ |
| 64 | TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__); |
| 65 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 66 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 67 | if (vdev->ll_pause.paused_reason & reason) { |
| 68 | vdev->ll_pause.paused_reason &= ~reason; |
| 69 | if (!vdev->ll_pause.paused_reason) { |
| 70 | vdev->ll_pause.is_q_paused = false; |
| 71 | vdev->ll_pause.q_unpause_cnt++; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 72 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 73 | ol_tx_vdev_ll_pause_queue_send(vdev); |
| 74 | } else { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 75 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 76 | } |
| 77 | } else { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 78 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 79 | } |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 80 | DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_VDEV_UNPAUSE, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 81 | NULL, 0)); |
| 82 | TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__); |
| 83 | } |
| 84 | |
| 85 | void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev) |
| 86 | { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 87 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 88 | qdf_timer_stop(&vdev->ll_pause.timer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 89 | vdev->ll_pause.is_q_timer_on = false; |
| 90 | while (vdev->ll_pause.txq.head) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 91 | qdf_nbuf_t next = |
| 92 | qdf_nbuf_next(vdev->ll_pause.txq.head); |
| 93 | qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL); |
| 94 | qdf_nbuf_unmap(vdev->pdev->osdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 95 | vdev->ll_pause.txq.head, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 96 | QDF_DMA_TO_DEVICE); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 97 | qdf_nbuf_tx_free(vdev->ll_pause.txq.head, |
| 98 | QDF_NBUF_PKT_ERROR); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 99 | vdev->ll_pause.txq.head = next; |
| 100 | } |
| 101 | vdev->ll_pause.txq.tail = NULL; |
| 102 | vdev->ll_pause.txq.depth = 0; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 103 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */ |
| 107 | |
| 108 | #ifdef QCA_LL_TX_FLOW_CONTROL_V2 |
| 109 | |
| 110 | /** |
| 111 | * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type |
| 112 | * @reason: reason |
| 113 | * |
| 114 | * Return: netif_reason_type |
| 115 | */ |
| 116 | enum netif_reason_type |
| 117 | ol_txrx_map_to_netif_reason_type(uint32_t reason) |
| 118 | { |
| 119 | switch (reason) { |
| 120 | case OL_TXQ_PAUSE_REASON_FW: |
| 121 | return WLAN_FW_PAUSE; |
| 122 | case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED: |
| 123 | return WLAN_PEER_UNAUTHORISED; |
| 124 | case OL_TXQ_PAUSE_REASON_TX_ABORT: |
| 125 | return WLAN_TX_ABORT; |
| 126 | case OL_TXQ_PAUSE_REASON_VDEV_STOP: |
| 127 | return WLAN_VDEV_STOP; |
| 128 | case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION: |
| 129 | return WLAN_THERMAL_MITIGATION; |
| 130 | default: |
| 131 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 132 | "%s: reason not supported %d\n", |
| 133 | __func__, reason); |
| 134 | return WLAN_REASON_TYPE_MAX; |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | /** |
| 139 | * ol_txrx_vdev_pause() - pause vdev network queues |
| 140 | * @vdev: vdev handle |
| 141 | * @reason: reason |
| 142 | * |
| 143 | * Return: none |
| 144 | */ |
| 145 | void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason) |
| 146 | { |
| 147 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 148 | enum netif_reason_type netif_reason; |
| 149 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 150 | if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 151 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 152 | "%s: invalid pdev\n", __func__); |
| 153 | return; |
| 154 | } |
| 155 | |
| 156 | netif_reason = ol_txrx_map_to_netif_reason_type(reason); |
| 157 | if (netif_reason == WLAN_REASON_TYPE_MAX) |
| 158 | return; |
| 159 | |
| 160 | pdev->pause_cb(vdev->vdev_id, WLAN_NETIF_TX_DISABLE, netif_reason); |
| 161 | } |
| 162 | |
| 163 | /** |
| 164 | * ol_txrx_vdev_unpause() - unpause vdev network queues |
| 165 | * @vdev: vdev handle |
| 166 | * @reason: reason |
| 167 | * |
| 168 | * Return: none |
| 169 | */ |
| 170 | void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason) |
| 171 | { |
| 172 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 173 | enum netif_reason_type netif_reason; |
| 174 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 175 | if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 176 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 177 | "%s: invalid pdev\n", __func__); |
| 178 | return; |
| 179 | } |
| 180 | |
| 181 | netif_reason = ol_txrx_map_to_netif_reason_type(reason); |
| 182 | if (netif_reason == WLAN_REASON_TYPE_MAX) |
| 183 | return; |
| 184 | |
| 185 | pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE, |
| 186 | netif_reason); |
| 187 | |
| 188 | } |
| 189 | |
| 190 | /** |
| 191 | * ol_txrx_pdev_pause() - pause network queues for each vdev |
| 192 | * @pdev: pdev handle |
| 193 | * @reason: reason |
| 194 | * |
| 195 | * Return: none |
| 196 | */ |
| 197 | void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason) |
| 198 | { |
| 199 | struct ol_txrx_vdev_t *vdev = NULL, *tmp; |
| 200 | |
| 201 | TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) { |
| 202 | ol_txrx_vdev_pause(vdev, reason); |
| 203 | } |
| 204 | |
| 205 | } |
| 206 | |
| 207 | /** |
| 208 | * ol_txrx_pdev_unpause() - unpause network queues for each vdev |
| 209 | * @pdev: pdev handle |
| 210 | * @reason: reason |
| 211 | * |
| 212 | * Return: none |
| 213 | */ |
| 214 | void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason) |
| 215 | { |
| 216 | struct ol_txrx_vdev_t *vdev = NULL, *tmp; |
| 217 | |
| 218 | TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) { |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 219 | ol_txrx_vdev_unpause(vdev, reason); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | } |
| 223 | #endif |
| 224 | |
| 225 | /*--- LL tx throttle queue code --------------------------------------------*/ |
| 226 | #if defined(QCA_SUPPORT_TX_THROTTLE) |
| 227 | uint8_t ol_tx_pdev_is_target_empty(void) |
| 228 | { |
| 229 | /* TM TODO */ |
| 230 | return 1; |
| 231 | } |
| 232 | |
| 233 | #ifdef QCA_LL_TX_FLOW_CONTROL_V2 |
| 234 | /** |
| 235 | * ol_txrx_thermal_pause() - pause due to thermal mitigation |
| 236 | * @pdev: pdev handle |
| 237 | * |
| 238 | * Return: none |
| 239 | */ |
| 240 | static inline |
| 241 | void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev) |
| 242 | { |
| 243 | ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION); |
| 244 | return; |
| 245 | } |
| 246 | /** |
| 247 | * ol_txrx_thermal_unpause() - unpause due to thermal mitigation |
| 248 | * @pdev: pdev handle |
| 249 | * |
| 250 | * Return: none |
| 251 | */ |
| 252 | static inline |
| 253 | void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev) |
| 254 | { |
| 255 | ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION); |
| 256 | return; |
| 257 | } |
| 258 | #else |
| 259 | /** |
| 260 | * ol_txrx_thermal_pause() - pause due to thermal mitigation |
| 261 | * @pdev: pdev handle |
| 262 | * |
| 263 | * Return: none |
| 264 | */ |
| 265 | static inline |
| 266 | void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev) |
| 267 | { |
| 268 | return; |
| 269 | } |
| 270 | |
| 271 | /** |
| 272 | * ol_txrx_thermal_unpause() - unpause due to thermal mitigation |
| 273 | * @pdev: pdev handle |
| 274 | * |
| 275 | * Return: none |
| 276 | */ |
| 277 | static inline |
| 278 | void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev) |
| 279 | { |
| 280 | ol_tx_pdev_ll_pause_queue_send_all(pdev); |
| 281 | return; |
| 282 | } |
| 283 | #endif |
| 284 | |
| 285 | void ol_tx_pdev_throttle_phase_timer(void *context) |
| 286 | { |
| 287 | struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context; |
| 288 | int ms; |
| 289 | enum throttle_level cur_level; |
| 290 | enum throttle_phase cur_phase; |
| 291 | |
| 292 | /* update the phase */ |
| 293 | pdev->tx_throttle.current_throttle_phase++; |
| 294 | |
| 295 | if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX) |
| 296 | pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF; |
| 297 | |
| 298 | if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) { |
| 299 | /* Traffic is stopped */ |
| 300 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 301 | "throttle phase --> OFF\n"); |
| 302 | ol_txrx_thermal_pause(pdev); |
| 303 | cur_level = pdev->tx_throttle.current_throttle_level; |
| 304 | cur_phase = pdev->tx_throttle.current_throttle_phase; |
| 305 | ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase]; |
| 306 | if (pdev->tx_throttle.current_throttle_level != |
| 307 | THROTTLE_LEVEL_0) { |
| 308 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 309 | "start timer %d ms\n", ms); |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 310 | qdf_timer_start(&pdev->tx_throttle. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 311 | phase_timer, ms); |
| 312 | } |
| 313 | } else { |
| 314 | /* Traffic can go */ |
| 315 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 316 | "throttle phase --> ON\n"); |
| 317 | ol_txrx_thermal_unpause(pdev); |
| 318 | cur_level = pdev->tx_throttle.current_throttle_level; |
| 319 | cur_phase = pdev->tx_throttle.current_throttle_phase; |
| 320 | ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase]; |
| 321 | if (pdev->tx_throttle.current_throttle_level != |
| 322 | THROTTLE_LEVEL_0) { |
| 323 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "start timer %d ms\n", |
| 324 | ms); |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 325 | qdf_timer_start(&pdev->tx_throttle.phase_timer, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 326 | ms); |
| 327 | } |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 332 | void ol_tx_pdev_throttle_tx_timer(void *context) |
| 333 | { |
| 334 | struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context; |
| 335 | ol_tx_pdev_ll_pause_queue_send_all(pdev); |
| 336 | } |
| 337 | #endif |
| 338 | |
| 339 | void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level) |
| 340 | { |
| 341 | int ms = 0; |
| 342 | |
| 343 | if (level >= THROTTLE_LEVEL_MAX) { |
| 344 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 345 | "%s invalid throttle level set %d, ignoring\n", |
| 346 | __func__, level); |
| 347 | return; |
| 348 | } |
| 349 | |
| 350 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Setting throttle level %d\n", level); |
| 351 | |
| 352 | /* Set the current throttle level */ |
| 353 | pdev->tx_throttle.current_throttle_level = (enum throttle_level) level; |
| 354 | |
| 355 | /* Reset the phase */ |
| 356 | pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF; |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 357 | ol_txrx_thermal_unpause(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 358 | |
| 359 | /* Start with the new time */ |
| 360 | ms = pdev->tx_throttle. |
| 361 | throttle_time_ms[level][THROTTLE_PHASE_OFF]; |
| 362 | |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 363 | qdf_timer_stop(&pdev->tx_throttle.phase_timer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 364 | |
| 365 | if (level != THROTTLE_LEVEL_0) |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 366 | qdf_timer_start(&pdev->tx_throttle.phase_timer, ms); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 367 | } |
| 368 | |
| 369 | /* This table stores the duty cycle for each level. |
| 370 | Example "on" time for level 2 with duty period 100ms is: |
| 371 | "on" time = duty_period_ms >> throttle_duty_cycle_table[2] |
| 372 | "on" time = 100 ms >> 2 = 25ms */ |
| 373 | static uint8_t g_throttle_duty_cycle_table[THROTTLE_LEVEL_MAX] = { 0, 1, 2, 4 }; |
| 374 | |
| 375 | void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period) |
| 376 | { |
| 377 | int i; |
| 378 | |
| 379 | /* Set the current throttle level */ |
| 380 | pdev->tx_throttle.throttle_period_ms = period; |
| 381 | |
| 382 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "level OFF ON\n"); |
| 383 | for (i = 0; i < THROTTLE_LEVEL_MAX; i++) { |
| 384 | pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] = |
| 385 | pdev->tx_throttle.throttle_period_ms >> |
| 386 | g_throttle_duty_cycle_table[i]; |
| 387 | pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] = |
| 388 | pdev->tx_throttle.throttle_period_ms - |
| 389 | pdev->tx_throttle.throttle_time_ms[ |
| 390 | i][THROTTLE_PHASE_ON]; |
| 391 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%d %d %d\n", i, |
| 392 | pdev->tx_throttle. |
| 393 | throttle_time_ms[i][THROTTLE_PHASE_OFF], |
| 394 | pdev->tx_throttle. |
| 395 | throttle_time_ms[i][THROTTLE_PHASE_ON]); |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev) |
| 400 | { |
| 401 | uint32_t throttle_period; |
| 402 | |
| 403 | pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0; |
| 404 | pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 405 | qdf_spinlock_create(&pdev->tx_throttle.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 406 | |
| 407 | throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev); |
| 408 | |
| 409 | ol_tx_throttle_init_period(pdev, throttle_period); |
| 410 | |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 411 | qdf_timer_init(pdev->osdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 412 | &pdev->tx_throttle.phase_timer, |
| 413 | ol_tx_pdev_throttle_phase_timer, pdev, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 414 | QDF_TIMER_TYPE_SW); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 415 | |
| 416 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 417 | qdf_timer_init(pdev->osdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 418 | &pdev->tx_throttle.tx_timer, |
| 419 | ol_tx_pdev_throttle_tx_timer, pdev, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 420 | QDF_TIMER_TYPE_SW); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 421 | #endif |
| 422 | |
| 423 | pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD; |
| 424 | } |
| 425 | #endif /* QCA_SUPPORT_TX_THROTTLE */ |
| 426 | /*--- End of LL tx throttle queue code ---------------------------------------*/ |