blob: de35be4e970a18ea9dbbc2a23d78abf43608b31b [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302 * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Nirav Shahcbc6d722016-03-01 16:24:53 +053028#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053029#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080030#include <ol_cfg.h> /* ol_cfg_addba_retry */
31#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
32#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033#include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */
34#include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035#include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
36#include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
37#include <ol_tx_queue.h>
38#include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
Anurag Chouhan6d760662016-02-20 16:05:43 +053039#include <qdf_types.h> /* bool */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040
41#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
42
43void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
44{
45 /* TO DO: log the queue pause */
46 /* acquire the mutex lock, since we'll be modifying the queues */
47 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
48
Anurag Chouhana37b5b72016-02-21 14:53:42 +053049 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080050 vdev->ll_pause.paused_reason |= reason;
51 vdev->ll_pause.q_pause_cnt++;
52 vdev->ll_pause.is_q_paused = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +053053 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +053055 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_VDEV_PAUSE,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056 NULL, 0));
57 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
58}
59
60void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
61{
62 /* TO DO: log the queue unpause */
63 /* acquire the mutex lock, since we'll be modifying the queues */
64 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
65
Anurag Chouhana37b5b72016-02-21 14:53:42 +053066 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067 if (vdev->ll_pause.paused_reason & reason) {
68 vdev->ll_pause.paused_reason &= ~reason;
69 if (!vdev->ll_pause.paused_reason) {
70 vdev->ll_pause.is_q_paused = false;
71 vdev->ll_pause.q_unpause_cnt++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +053072 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080073 ol_tx_vdev_ll_pause_queue_send(vdev);
74 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +053075 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080076 }
77 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +053078 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +053080 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_VDEV_UNPAUSE,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080081 NULL, 0));
82 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
83}
84
85void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
86{
Anurag Chouhana37b5b72016-02-21 14:53:42 +053087 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +053088 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089 vdev->ll_pause.is_q_timer_on = false;
90 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +053091 qdf_nbuf_t next =
92 qdf_nbuf_next(vdev->ll_pause.txq.head);
93 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
94 qdf_nbuf_unmap(vdev->pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080095 vdev->ll_pause.txq.head,
Anurag Chouhan6d760662016-02-20 16:05:43 +053096 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +053097 qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
98 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080099 vdev->ll_pause.txq.head = next;
100 }
101 vdev->ll_pause.txq.tail = NULL;
102 vdev->ll_pause.txq.depth = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530103 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800104}
105
106#endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
107
108#ifdef QCA_LL_TX_FLOW_CONTROL_V2
109
110/**
111 * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
112 * @reason: reason
113 *
114 * Return: netif_reason_type
115 */
116enum netif_reason_type
117ol_txrx_map_to_netif_reason_type(uint32_t reason)
118{
119 switch (reason) {
120 case OL_TXQ_PAUSE_REASON_FW:
121 return WLAN_FW_PAUSE;
122 case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
123 return WLAN_PEER_UNAUTHORISED;
124 case OL_TXQ_PAUSE_REASON_TX_ABORT:
125 return WLAN_TX_ABORT;
126 case OL_TXQ_PAUSE_REASON_VDEV_STOP:
127 return WLAN_VDEV_STOP;
128 case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
129 return WLAN_THERMAL_MITIGATION;
130 default:
131 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
132 "%s: reason not supported %d\n",
133 __func__, reason);
134 return WLAN_REASON_TYPE_MAX;
135 }
136}
137
138/**
139 * ol_txrx_vdev_pause() - pause vdev network queues
140 * @vdev: vdev handle
141 * @reason: reason
142 *
143 * Return: none
144 */
145void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
146{
147 struct ol_txrx_pdev_t *pdev = vdev->pdev;
148 enum netif_reason_type netif_reason;
149
Anurag Chouhanc5548422016-02-24 18:33:27 +0530150 if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800151 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
152 "%s: invalid pdev\n", __func__);
153 return;
154 }
155
156 netif_reason = ol_txrx_map_to_netif_reason_type(reason);
157 if (netif_reason == WLAN_REASON_TYPE_MAX)
158 return;
159
160 pdev->pause_cb(vdev->vdev_id, WLAN_NETIF_TX_DISABLE, netif_reason);
161}
162
163/**
164 * ol_txrx_vdev_unpause() - unpause vdev network queues
165 * @vdev: vdev handle
166 * @reason: reason
167 *
168 * Return: none
169 */
170void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
171{
172 struct ol_txrx_pdev_t *pdev = vdev->pdev;
173 enum netif_reason_type netif_reason;
174
Anurag Chouhanc5548422016-02-24 18:33:27 +0530175 if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
177 "%s: invalid pdev\n", __func__);
178 return;
179 }
180
181 netif_reason = ol_txrx_map_to_netif_reason_type(reason);
182 if (netif_reason == WLAN_REASON_TYPE_MAX)
183 return;
184
185 pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
186 netif_reason);
187
188}
189
190/**
191 * ol_txrx_pdev_pause() - pause network queues for each vdev
192 * @pdev: pdev handle
193 * @reason: reason
194 *
195 * Return: none
196 */
197void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
198{
199 struct ol_txrx_vdev_t *vdev = NULL, *tmp;
200
201 TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
202 ol_txrx_vdev_pause(vdev, reason);
203 }
204
205}
206
207/**
208 * ol_txrx_pdev_unpause() - unpause network queues for each vdev
209 * @pdev: pdev handle
210 * @reason: reason
211 *
212 * Return: none
213 */
214void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
215{
216 struct ol_txrx_vdev_t *vdev = NULL, *tmp;
217
218 TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700219 ol_txrx_vdev_unpause(vdev, reason);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800220 }
221
222}
223#endif
224
225/*--- LL tx throttle queue code --------------------------------------------*/
226#if defined(QCA_SUPPORT_TX_THROTTLE)
227uint8_t ol_tx_pdev_is_target_empty(void)
228{
229 /* TM TODO */
230 return 1;
231}
232
233#ifdef QCA_LL_TX_FLOW_CONTROL_V2
234/**
235 * ol_txrx_thermal_pause() - pause due to thermal mitigation
236 * @pdev: pdev handle
237 *
238 * Return: none
239 */
240static inline
241void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
242{
243 ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
244 return;
245}
246/**
247 * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
248 * @pdev: pdev handle
249 *
250 * Return: none
251 */
252static inline
253void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
254{
255 ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
256 return;
257}
258#else
259/**
260 * ol_txrx_thermal_pause() - pause due to thermal mitigation
261 * @pdev: pdev handle
262 *
263 * Return: none
264 */
265static inline
266void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
267{
268 return;
269}
270
271/**
272 * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
273 * @pdev: pdev handle
274 *
275 * Return: none
276 */
277static inline
278void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
279{
280 ol_tx_pdev_ll_pause_queue_send_all(pdev);
281 return;
282}
283#endif
284
285void ol_tx_pdev_throttle_phase_timer(void *context)
286{
287 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
288 int ms;
289 enum throttle_level cur_level;
290 enum throttle_phase cur_phase;
291
292 /* update the phase */
293 pdev->tx_throttle.current_throttle_phase++;
294
295 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX)
296 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
297
298 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
299 /* Traffic is stopped */
300 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
301 "throttle phase --> OFF\n");
302 ol_txrx_thermal_pause(pdev);
303 cur_level = pdev->tx_throttle.current_throttle_level;
304 cur_phase = pdev->tx_throttle.current_throttle_phase;
305 ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
306 if (pdev->tx_throttle.current_throttle_level !=
307 THROTTLE_LEVEL_0) {
308 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
309 "start timer %d ms\n", ms);
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530310 qdf_timer_start(&pdev->tx_throttle.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311 phase_timer, ms);
312 }
313 } else {
314 /* Traffic can go */
315 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
316 "throttle phase --> ON\n");
317 ol_txrx_thermal_unpause(pdev);
318 cur_level = pdev->tx_throttle.current_throttle_level;
319 cur_phase = pdev->tx_throttle.current_throttle_phase;
320 ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
321 if (pdev->tx_throttle.current_throttle_level !=
322 THROTTLE_LEVEL_0) {
323 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "start timer %d ms\n",
324 ms);
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530325 qdf_timer_start(&pdev->tx_throttle.phase_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326 ms);
327 }
328 }
329}
330
331#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
332void ol_tx_pdev_throttle_tx_timer(void *context)
333{
334 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
335 ol_tx_pdev_ll_pause_queue_send_all(pdev);
336}
337#endif
338
339void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
340{
341 int ms = 0;
342
343 if (level >= THROTTLE_LEVEL_MAX) {
344 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
345 "%s invalid throttle level set %d, ignoring\n",
346 __func__, level);
347 return;
348 }
349
350 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Setting throttle level %d\n", level);
351
352 /* Set the current throttle level */
353 pdev->tx_throttle.current_throttle_level = (enum throttle_level) level;
354
355 /* Reset the phase */
356 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700357 ol_txrx_thermal_unpause(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358
359 /* Start with the new time */
360 ms = pdev->tx_throttle.
361 throttle_time_ms[level][THROTTLE_PHASE_OFF];
362
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530363 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364
365 if (level != THROTTLE_LEVEL_0)
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530366 qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367}
368
369/* This table stores the duty cycle for each level.
370 Example "on" time for level 2 with duty period 100ms is:
371 "on" time = duty_period_ms >> throttle_duty_cycle_table[2]
372 "on" time = 100 ms >> 2 = 25ms */
373static uint8_t g_throttle_duty_cycle_table[THROTTLE_LEVEL_MAX] = { 0, 1, 2, 4 };
374
375void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period)
376{
377 int i;
378
379 /* Set the current throttle level */
380 pdev->tx_throttle.throttle_period_ms = period;
381
382 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "level OFF ON\n");
383 for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
384 pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
385 pdev->tx_throttle.throttle_period_ms >>
386 g_throttle_duty_cycle_table[i];
387 pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] =
388 pdev->tx_throttle.throttle_period_ms -
389 pdev->tx_throttle.throttle_time_ms[
390 i][THROTTLE_PHASE_ON];
391 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%d %d %d\n", i,
392 pdev->tx_throttle.
393 throttle_time_ms[i][THROTTLE_PHASE_OFF],
394 pdev->tx_throttle.
395 throttle_time_ms[i][THROTTLE_PHASE_ON]);
396 }
397}
398
399void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
400{
401 uint32_t throttle_period;
402
403 pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
404 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530405 qdf_spinlock_create(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406
407 throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
408
409 ol_tx_throttle_init_period(pdev, throttle_period);
410
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530411 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412 &pdev->tx_throttle.phase_timer,
413 ol_tx_pdev_throttle_phase_timer, pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530414 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800415
416#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530417 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800418 &pdev->tx_throttle.tx_timer,
419 ol_tx_pdev_throttle_tx_timer, pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530420 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800421#endif
422
423 pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
424}
425#endif /* QCA_SUPPORT_TX_THROTTLE */
426/*--- End of LL tx throttle queue code ---------------------------------------*/