Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Houston Hoffman | 1460fa3 | 2015-11-18 02:36:30 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 28 | #include <qdf_atomic.h> /* qdf_atomic_inc, etc. */ |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 29 | #include <qdf_lock.h> /* qdf_os_spinlock */ |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 30 | #include <qdf_time.h> /* qdf_system_ticks, etc. */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 31 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 32 | #include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 33 | |
| 34 | #include <cds_queue.h> /* TAILQ */ |
| 35 | #ifdef QCA_COMPUTE_TX_DELAY |
| 36 | #include <ieee80211.h> /* ieee80211_frame, etc. */ |
| 37 | #include <enet.h> /* ethernet_hdr_t, etc. */ |
| 38 | #include <ipv6_defs.h> /* ipv6_traffic_class */ |
| 39 | #endif |
| 40 | |
| 41 | #include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */ |
| 42 | #include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */ |
| 43 | #include <ol_txrx_htt_api.h> /* htt_tx_status */ |
| 44 | |
| 45 | #include <ol_ctrl_txrx_api.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 46 | #include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */ |
| 47 | #ifdef QCA_COMPUTE_TX_DELAY |
| 48 | #endif |
| 49 | #include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */ |
| 50 | #include <ol_osif_txrx_api.h> |
| 51 | #include <ol_tx.h> /* ol_tx_reinject */ |
| 52 | |
| 53 | #include <ol_cfg.h> /* ol_cfg_is_high_latency */ |
| 54 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 55 | #include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */ |
| 56 | #endif |
| 57 | |
| 58 | #ifdef TX_CREDIT_RECLAIM_SUPPORT |
| 59 | |
| 60 | #define OL_TX_CREDIT_RECLAIM(pdev) \ |
| 61 | do { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 62 | if (qdf_atomic_read(&pdev->target_tx_credit) < \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 63 | ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \ |
| 64 | ol_osif_ath_tasklet(pdev->osdev); \ |
| 65 | } \ |
| 66 | } while (0) |
| 67 | |
| 68 | #else |
| 69 | |
| 70 | #define OL_TX_CREDIT_RECLAIM(pdev) |
| 71 | |
| 72 | #endif /* TX_CREDIT_RECLAIM_SUPPORT */ |
| 73 | |
| 74 | #if defined(TX_CREDIT_RECLAIM_SUPPORT) |
| 75 | /* |
| 76 | * HL needs to keep track of the amount of credit available to download |
| 77 | * tx frames to the target - the download scheduler decides when to |
| 78 | * download frames, and which frames to download, based on the credit |
| 79 | * availability. |
| 80 | * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track |
| 81 | * of the target_tx_credit, to determine when to poll for tx completion |
| 82 | * messages. |
| 83 | */ |
| 84 | #define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 85 | qdf_atomic_add( \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 86 | factor * htt_tx_msdu_credit(msdu), &pdev->target_tx_credit) |
| 87 | #define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) \ |
| 88 | OL_TX_TARGET_CREDIT_ADJUST(-1, pdev, msdu) |
| 89 | #define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) \ |
| 90 | OL_TX_TARGET_CREDIT_ADJUST(1, pdev, msdu) |
| 91 | #define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 92 | qdf_atomic_add(-1 * delta, &pdev->target_tx_credit) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 93 | #define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 94 | qdf_atomic_add(delta, &pdev->target_tx_credit) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 95 | #else |
| 96 | /* |
| 97 | * LL does not need to keep track of target credit. |
| 98 | * Since the host tx descriptor pool size matches the target's, |
| 99 | * we know the target has space for the new tx frame if the host's |
| 100 | * tx descriptor allocation succeeded. |
| 101 | */ |
| 102 | #define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) /* no-op */ |
| 103 | #define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) /* no-op */ |
| 104 | #define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) /* no-op */ |
| 105 | #define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) /* no-op */ |
| 106 | #define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) /* no-op */ |
| 107 | #endif |
| 108 | |
| 109 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 110 | #define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev) \ |
| 111 | do { \ |
| 112 | struct ol_txrx_vdev_t *vdev; \ |
| 113 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 114 | if (qdf_atomic_read(&vdev->os_q_paused) && \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 115 | (vdev->tx_fl_hwm != 0)) { \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 116 | qdf_spin_lock(&pdev->tx_mutex); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 117 | if (pdev->tx_desc.num_free > \ |
| 118 | vdev->tx_fl_hwm) { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 119 | qdf_atomic_set(&vdev->os_q_paused, 0); \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 120 | qdf_spin_unlock(&pdev->tx_mutex); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 121 | ol_txrx_flow_control_cb(vdev, true);\ |
| 122 | } \ |
| 123 | else { \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 124 | qdf_spin_unlock(&pdev->tx_mutex); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 125 | } \ |
| 126 | } \ |
| 127 | } \ |
| 128 | } while (0) |
| 129 | #else |
| 130 | #define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev) |
| 131 | #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ |
| 132 | |
| 133 | static inline uint16_t |
| 134 | ol_tx_send_base(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 135 | struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 136 | { |
| 137 | int msdu_credit_consumed; |
| 138 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 139 | TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 140 | TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 141 | qdf_atomic_read(&pdev->target_tx_credit), |
| 142 | qdf_atomic_read(&pdev->target_tx_credit) - 1, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 143 | qdf_nbuf_len(msdu)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 144 | |
| 145 | msdu_credit_consumed = htt_tx_msdu_credit(msdu); |
| 146 | OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed); |
| 147 | OL_TX_CREDIT_RECLAIM(pdev); |
| 148 | |
| 149 | /* |
| 150 | * When the tx frame is downloaded to the target, there are two |
| 151 | * outstanding references: |
| 152 | * 1. The host download SW (HTT, HTC, HIF) |
| 153 | * This reference is cleared by the ol_tx_send_done callback |
| 154 | * functions. |
| 155 | * 2. The target FW |
| 156 | * This reference is cleared by the ol_tx_completion_handler |
| 157 | * function. |
| 158 | * It is extremely probable that the download completion is processed |
| 159 | * before the tx completion message. However, under exceptional |
| 160 | * conditions the tx completion may be processed first. Thus, rather |
| 161 | * that assuming that reference (1) is done before reference (2), |
| 162 | * explicit reference tracking is needed. |
| 163 | * Double-increment the ref count to account for both references |
| 164 | * described above. |
| 165 | */ |
| 166 | |
| 167 | OL_TX_DESC_REF_INIT(tx_desc); |
| 168 | OL_TX_DESC_REF_INC(tx_desc); |
| 169 | OL_TX_DESC_REF_INC(tx_desc); |
| 170 | |
| 171 | return msdu_credit_consumed; |
| 172 | } |
| 173 | |
| 174 | void |
| 175 | ol_tx_send(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 176 | struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 177 | { |
| 178 | int msdu_credit_consumed; |
| 179 | uint16_t id; |
| 180 | int failed; |
| 181 | |
| 182 | msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu); |
| 183 | id = ol_tx_desc_id(pdev, tx_desc); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 184 | QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 185 | DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 186 | (uint8_t *)(qdf_nbuf_data(msdu)), |
| 187 | sizeof(qdf_nbuf_data(msdu)))); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 188 | failed = htt_tx_send_std(pdev->htt_pdev, msdu, id); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 189 | if (qdf_unlikely(failed)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 190 | OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed); |
| 191 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */); |
| 192 | } |
| 193 | } |
| 194 | |
| 195 | void |
| 196 | ol_tx_send_batch(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 197 | qdf_nbuf_t head_msdu, int num_msdus) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 198 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 199 | qdf_nbuf_t rejected; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 200 | OL_TX_CREDIT_RECLAIM(pdev); |
| 201 | |
| 202 | rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 203 | while (qdf_unlikely(rejected)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 204 | struct ol_tx_desc_t *tx_desc; |
| 205 | uint16_t *msdu_id_storage; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 206 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 207 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 208 | next = qdf_nbuf_next(rejected); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 209 | msdu_id_storage = ol_tx_msdu_id_storage(rejected); |
| 210 | tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage); |
| 211 | |
| 212 | OL_TX_TARGET_CREDIT_INCR(pdev, rejected); |
| 213 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */); |
| 214 | |
| 215 | rejected = next; |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | void |
| 220 | ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev, |
| 221 | struct ol_tx_desc_t *tx_desc, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 222 | qdf_nbuf_t msdu, enum htt_pkt_type pkt_type) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 223 | { |
| 224 | int msdu_credit_consumed; |
| 225 | uint16_t id; |
| 226 | int failed; |
| 227 | |
| 228 | msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu); |
| 229 | id = ol_tx_desc_id(pdev, tx_desc); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 230 | QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 231 | failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type); |
| 232 | if (failed) { |
| 233 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 234 | "Error: freeing tx frame after htt_tx failed"); |
| 235 | OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed); |
| 236 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */); |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | static inline void |
| 241 | ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 242 | A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 243 | { |
| 244 | struct ol_tx_desc_t *tx_desc; |
| 245 | |
| 246 | tx_desc = ol_tx_desc_find(pdev, msdu_id); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 247 | qdf_assert(tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 248 | |
| 249 | /* |
| 250 | * If the download is done for |
| 251 | * the Management frame then |
| 252 | * call the download callback if registered |
| 253 | */ |
| 254 | if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) { |
| 255 | int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE; |
| 256 | ol_txrx_mgmt_tx_cb download_cb = |
| 257 | pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb; |
| 258 | |
| 259 | if (download_cb) { |
| 260 | download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt, |
| 261 | tx_desc->netbuf, status != A_OK); |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | if (status != A_OK) { |
| 266 | OL_TX_TARGET_CREDIT_INCR(pdev, msdu); |
| 267 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, |
| 268 | 1 /* download err */); |
| 269 | } else { |
| 270 | if (OL_TX_DESC_NO_REFS(tx_desc)) { |
| 271 | /* |
| 272 | * The decremented value was zero - free the frame. |
| 273 | * Use the tx status recorded previously during |
| 274 | * tx completion handling. |
| 275 | */ |
| 276 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, |
| 277 | tx_desc->status != |
| 278 | htt_tx_status_ok); |
| 279 | } |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | void |
| 284 | ol_tx_download_done_ll(void *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 285 | A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 286 | { |
| 287 | ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu, |
| 288 | msdu_id); |
| 289 | } |
| 290 | |
| 291 | void |
| 292 | ol_tx_download_done_hl_retain(void *txrx_pdev, |
| 293 | A_STATUS status, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 294 | qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 295 | { |
| 296 | struct ol_txrx_pdev_t *pdev = txrx_pdev; |
| 297 | ol_tx_download_done_base(pdev, status, msdu, msdu_id); |
| 298 | } |
| 299 | |
| 300 | void |
| 301 | ol_tx_download_done_hl_free(void *txrx_pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 302 | A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 303 | { |
| 304 | struct ol_txrx_pdev_t *pdev = txrx_pdev; |
| 305 | struct ol_tx_desc_t *tx_desc; |
| 306 | |
| 307 | tx_desc = ol_tx_desc_find(pdev, msdu_id); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 308 | qdf_assert(tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 309 | |
| 310 | ol_tx_download_done_base(pdev, status, msdu, msdu_id); |
| 311 | |
| 312 | if ((tx_desc->pkt_type != ol_tx_frm_no_free) && |
| 313 | (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) { |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 314 | qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 315 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK); |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta) |
| 320 | { |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 321 | qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta) |
| 325 | { |
| 326 | TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 327 | qdf_atomic_read(&pdev->target_tx_credit), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 328 | credit_delta, |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 329 | qdf_atomic_read(&pdev->target_tx_credit) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 330 | credit_delta); |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 331 | qdf_atomic_add(credit_delta, &pdev->target_tx_credit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 332 | } |
| 333 | |
| 334 | #ifdef QCA_COMPUTE_TX_DELAY |
| 335 | |
| 336 | static void |
| 337 | ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev, |
| 338 | enum htt_tx_status status, |
| 339 | uint16_t *desc_ids, int num_msdus); |
| 340 | #define OL_TX_DELAY_COMPUTE ol_tx_delay_compute |
| 341 | #else |
| 342 | #define OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus) /* no-op */ |
| 343 | #endif /* QCA_COMPUTE_TX_DELAY */ |
| 344 | |
| 345 | #ifndef OL_TX_RESTORE_HDR |
| 346 | #define OL_TX_RESTORE_HDR(__tx_desc, __msdu) |
| 347 | #endif |
| 348 | /* |
| 349 | * The following macros could have been inline functions too. |
| 350 | * The only rationale for choosing macros, is to force the compiler to inline |
| 351 | * the implementation, which cannot be controlled for actual "inline" functions, |
| 352 | * since "inline" is only a hint to the compiler. |
| 353 | * In the performance path, we choose to force the inlining, in preference to |
| 354 | * type-checking offered by the actual inlined functions. |
| 355 | */ |
| 356 | #define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \ |
| 357 | TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem) |
| 358 | #ifndef ATH_11AC_TXCOMPACT |
| 359 | #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\ |
| 360 | _lcl_freelist, _tx_desc_last) \ |
| 361 | do { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 362 | qdf_atomic_init(&(_tx_desc)->ref_cnt); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 363 | /* restore orginal hdr offset */ \ |
| 364 | OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 365 | qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \ |
| 366 | qdf_nbuf_free((_netbuf)); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 367 | ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \ |
| 368 | (_lcl_freelist); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 369 | if (qdf_unlikely(!lcl_freelist)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 370 | (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\ |
| 371 | (_tx_desc); \ |
| 372 | } \ |
| 373 | (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \ |
| 374 | } while (0) |
| 375 | #else /*!ATH_11AC_TXCOMPACT */ |
| 376 | #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\ |
| 377 | _lcl_freelist, _tx_desc_last) \ |
| 378 | do { \ |
| 379 | /* restore orginal hdr offset */ \ |
| 380 | OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 381 | qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \ |
| 382 | qdf_nbuf_free((_netbuf)); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 383 | ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \ |
| 384 | (_lcl_freelist); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 385 | if (qdf_unlikely(!lcl_freelist)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 386 | (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\ |
| 387 | (_tx_desc); \ |
| 388 | } \ |
| 389 | (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \ |
| 390 | } while (0) |
| 391 | |
| 392 | #endif /*!ATH_11AC_TXCOMPACT */ |
| 393 | |
| 394 | #ifdef QCA_TX_SINGLE_COMPLETIONS |
| 395 | #ifdef QCA_TX_STD_PATH_ONLY |
| 396 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 397 | _netbuf, _lcl_freelist, \ |
| 398 | _tx_desc_last, _status) \ |
| 399 | ol_tx_msdu_complete_single((_pdev), (_tx_desc), \ |
| 400 | (_netbuf), (_lcl_freelist), \ |
| 401 | _tx_desc_last) |
| 402 | #else /* !QCA_TX_STD_PATH_ONLY */ |
| 403 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 404 | _netbuf, _lcl_freelist, \ |
| 405 | _tx_desc_last, _status) \ |
| 406 | do { \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 407 | if (qdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 408 | ol_tx_msdu_complete_single((_pdev), (_tx_desc),\ |
| 409 | (_netbuf), (_lcl_freelist), \ |
| 410 | (_tx_desc_last)); \ |
| 411 | } else { \ |
| 412 | ol_tx_desc_frame_free_nonstd( \ |
| 413 | (_pdev), (_tx_desc), \ |
| 414 | (_status) != htt_tx_status_ok); \ |
| 415 | } \ |
| 416 | } while (0) |
| 417 | #endif /* !QCA_TX_STD_PATH_ONLY */ |
| 418 | #else /* !QCA_TX_SINGLE_COMPLETIONS */ |
| 419 | #ifdef QCA_TX_STD_PATH_ONLY |
| 420 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 421 | _netbuf, _lcl_freelist, \ |
| 422 | _tx_desc_last, _status) \ |
| 423 | ol_tx_msdus_complete_batch((_pdev), (_tx_desc), (_tx_descs), (_status)) |
| 424 | #else /* !QCA_TX_STD_PATH_ONLY */ |
| 425 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 426 | _netbuf, _lcl_freelist, \ |
| 427 | _tx_desc_last, _status) \ |
| 428 | do { \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 429 | if (qdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 430 | ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \ |
| 431 | (_tx_descs), (_status)); \ |
| 432 | } else { \ |
| 433 | ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \ |
| 434 | (_status) != \ |
| 435 | htt_tx_status_ok); \ |
| 436 | } \ |
| 437 | } while (0) |
| 438 | #endif /* !QCA_TX_STD_PATH_ONLY */ |
| 439 | #endif /* QCA_TX_SINGLE_COMPLETIONS */ |
| 440 | |
| 441 | void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev) |
| 442 | { |
| 443 | int i = 0; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 444 | struct ol_tx_desc_t *tx_desc; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 445 | |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 446 | for (i = 0; i < pdev->tx_desc.pool_size; i++) { |
| 447 | tx_desc = ol_tx_desc_find(pdev, i); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 448 | /* |
| 449 | * Confirm that each tx descriptor is "empty", i.e. it has |
| 450 | * no tx frame attached. |
| 451 | * In particular, check that there are no frames that have |
| 452 | * been given to the target to transmit, for which the |
| 453 | * target has never provided a response. |
| 454 | */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 455 | if (qdf_atomic_read(&tx_desc->ref_cnt)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 456 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 457 | "Warning: freeing tx frame " |
| 458 | "(no tx completion from the target)\n"); |
| 459 | ol_tx_desc_frame_free_nonstd(pdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 460 | tx_desc, 1); |
| 461 | } |
| 462 | } |
| 463 | } |
| 464 | |
| 465 | void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits) |
| 466 | { |
| 467 | ol_tx_target_credit_update(pdev, credits); |
| 468 | |
| 469 | /* UNPAUSE OS Q */ |
| 470 | OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev); |
| 471 | } |
| 472 | |
| 473 | /* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of |
| 474 | ol_tx_completion_handler(). |
| 475 | * any change in ol_tx_completion_handler() must be mirrored in |
| 476 | ol_tx_inspect_handler(). |
| 477 | */ |
| 478 | void |
| 479 | ol_tx_completion_handler(ol_txrx_pdev_handle pdev, |
| 480 | int num_msdus, |
| 481 | enum htt_tx_status status, void *tx_desc_id_iterator) |
| 482 | { |
| 483 | int i; |
| 484 | uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator; |
| 485 | uint16_t tx_desc_id; |
| 486 | struct ol_tx_desc_t *tx_desc; |
| 487 | char *trace_str; |
| 488 | |
| 489 | uint32_t byte_cnt = 0; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 490 | qdf_nbuf_t netbuf; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 491 | |
| 492 | union ol_tx_desc_list_elem_t *lcl_freelist = NULL; |
| 493 | union ol_tx_desc_list_elem_t *tx_desc_last = NULL; |
| 494 | ol_tx_desc_list tx_descs; |
| 495 | TAILQ_INIT(&tx_descs); |
| 496 | |
| 497 | OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus); |
| 498 | |
| 499 | trace_str = (status) ? "OT:C:F:" : "OT:C:S:"; |
| 500 | for (i = 0; i < num_msdus; i++) { |
| 501 | tx_desc_id = desc_ids[i]; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 502 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 503 | tx_desc->status = status; |
| 504 | netbuf = tx_desc->netbuf; |
| 505 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 506 | qdf_runtime_pm_put(); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 507 | qdf_nbuf_trace_update(netbuf, trace_str); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 508 | /* Per SDU update of byte count */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 509 | byte_cnt += qdf_nbuf_len(netbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 510 | if (OL_TX_DESC_NO_REFS(tx_desc)) { |
| 511 | ol_tx_statistics( |
| 512 | pdev->ctrl_pdev, |
| 513 | HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *) |
| 514 | (tx_desc-> |
| 515 | htt_tx_desc))), |
| 516 | status != htt_tx_status_ok); |
| 517 | ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf, |
| 518 | lcl_freelist, tx_desc_last, status); |
| 519 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 520 | QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 521 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
| 522 | tx_desc->pkt_type = 0xff; |
| 523 | #ifdef QCA_COMPUTE_TX_DELAY |
| 524 | tx_desc->entry_timestamp_ticks = 0xffffffff; |
| 525 | #endif |
| 526 | #endif |
| 527 | } |
| 528 | |
| 529 | /* One shot protected access to pdev freelist, when setup */ |
| 530 | if (lcl_freelist) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 531 | qdf_spin_lock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 532 | tx_desc_last->next = pdev->tx_desc.freelist; |
| 533 | pdev->tx_desc.freelist = lcl_freelist; |
| 534 | pdev->tx_desc.num_free += (uint16_t) num_msdus; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 535 | qdf_spin_unlock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 536 | } else { |
| 537 | ol_tx_desc_frame_list_free(pdev, &tx_descs, |
| 538 | status != htt_tx_status_ok); |
| 539 | } |
| 540 | |
| 541 | OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL); |
| 542 | |
| 543 | /* UNPAUSE OS Q */ |
| 544 | OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev); |
| 545 | /* Do one shot statistics */ |
| 546 | TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt); |
| 547 | } |
| 548 | |
| 549 | /* |
| 550 | * ol_tx_single_completion_handler performs the same tx completion |
| 551 | * processing as ol_tx_completion_handler, but for a single frame. |
| 552 | * ol_tx_completion_handler is optimized to handle batch completions |
| 553 | * as efficiently as possible; in contrast ol_tx_single_completion_handler |
| 554 | * handles single frames as simply and generally as possible. |
| 555 | * Thus, this ol_tx_single_completion_handler function is suitable for |
| 556 | * intermittent usage, such as for tx mgmt frames. |
| 557 | */ |
| 558 | void |
| 559 | ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev, |
| 560 | enum htt_tx_status status, uint16_t tx_desc_id) |
| 561 | { |
| 562 | struct ol_tx_desc_t *tx_desc; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 563 | qdf_nbuf_t netbuf; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 564 | |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 565 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 566 | tx_desc->status = status; |
| 567 | netbuf = tx_desc->netbuf; |
| 568 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 569 | QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 570 | /* Do one shot statistics */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 571 | TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 572 | |
| 573 | if (OL_TX_DESC_NO_REFS(tx_desc)) { |
| 574 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, |
| 575 | status != htt_tx_status_ok); |
| 576 | } |
| 577 | |
| 578 | TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 579 | qdf_atomic_read(&pdev->target_tx_credit), |
| 580 | 1, qdf_atomic_read(&pdev->target_tx_credit) + 1); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 581 | |
| 582 | |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 583 | qdf_atomic_add(1, &pdev->target_tx_credit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 584 | } |
| 585 | |
| 586 | /* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of |
| 587 | ol_tx_completion_handler(). |
| 588 | * any change in ol_tx_completion_handler() must be mirrored here. |
| 589 | */ |
| 590 | void |
| 591 | ol_tx_inspect_handler(ol_txrx_pdev_handle pdev, |
| 592 | int num_msdus, void *tx_desc_id_iterator) |
| 593 | { |
| 594 | uint16_t vdev_id, i; |
| 595 | struct ol_txrx_vdev_t *vdev; |
| 596 | uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator; |
| 597 | uint16_t tx_desc_id; |
| 598 | struct ol_tx_desc_t *tx_desc; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 599 | union ol_tx_desc_list_elem_t *lcl_freelist = NULL; |
| 600 | union ol_tx_desc_list_elem_t *tx_desc_last = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 601 | qdf_nbuf_t netbuf; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 602 | ol_tx_desc_list tx_descs; |
| 603 | TAILQ_INIT(&tx_descs); |
| 604 | |
| 605 | for (i = 0; i < num_msdus; i++) { |
| 606 | tx_desc_id = desc_ids[i]; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 607 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 608 | netbuf = tx_desc->netbuf; |
| 609 | |
| 610 | /* find the "vdev" this tx_desc belongs to */ |
| 611 | vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *) |
| 612 | (tx_desc->htt_tx_desc))); |
| 613 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
| 614 | if (vdev->vdev_id == vdev_id) |
| 615 | break; |
| 616 | } |
| 617 | |
| 618 | /* vdev now points to the vdev for this descriptor. */ |
| 619 | |
| 620 | #ifndef ATH_11AC_TXCOMPACT |
| 621 | /* save this multicast packet to local free list */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 622 | if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 623 | #endif |
| 624 | { |
| 625 | /* for this function only, force htt status to be |
| 626 | "htt_tx_status_ok" |
| 627 | * for graceful freeing of this multicast frame |
| 628 | */ |
| 629 | ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf, |
| 630 | lcl_freelist, tx_desc_last, |
| 631 | htt_tx_status_ok); |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | if (lcl_freelist) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 636 | qdf_spin_lock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 637 | tx_desc_last->next = pdev->tx_desc.freelist; |
| 638 | pdev->tx_desc.freelist = lcl_freelist; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 639 | qdf_spin_unlock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 640 | } else { |
| 641 | ol_tx_desc_frame_list_free(pdev, &tx_descs, |
| 642 | htt_tx_status_discard); |
| 643 | } |
| 644 | TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 645 | qdf_atomic_read(&pdev->target_tx_credit), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 646 | num_msdus, |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 647 | qdf_atomic_read(&pdev->target_tx_credit) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 648 | num_msdus); |
| 649 | |
| 650 | OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL); |
| 651 | } |
| 652 | |
| 653 | #ifdef QCA_COMPUTE_TX_DELAY |
| 654 | |
| 655 | void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval) |
| 656 | { |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 657 | pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 658 | } |
| 659 | |
| 660 | void |
| 661 | ol_tx_packet_count(ol_txrx_pdev_handle pdev, |
| 662 | uint16_t *out_packet_count, |
| 663 | uint16_t *out_packet_loss_count, int category) |
| 664 | { |
| 665 | *out_packet_count = pdev->packet_count[category]; |
| 666 | *out_packet_loss_count = pdev->packet_loss_count[category]; |
| 667 | pdev->packet_count[category] = 0; |
| 668 | pdev->packet_loss_count[category] = 0; |
| 669 | } |
| 670 | |
| 671 | uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num) |
| 672 | { |
| 673 | uint32_t sum32; |
| 674 | int shift = 0; |
| 675 | /* |
| 676 | * To avoid doing a 64-bit divide, shift the sum down until it is |
| 677 | * no more than 32 bits (and shift the denominator to match). |
| 678 | */ |
| 679 | while ((sum >> 32) != 0) { |
| 680 | sum >>= 1; |
| 681 | shift++; |
| 682 | } |
| 683 | sum32 = (uint32_t) sum; |
| 684 | num >>= shift; |
| 685 | return (sum32 + (num >> 1)) / num; /* round to nearest */ |
| 686 | } |
| 687 | |
| 688 | void |
| 689 | ol_tx_delay(ol_txrx_pdev_handle pdev, |
| 690 | uint32_t *queue_delay_microsec, |
| 691 | uint32_t *tx_delay_microsec, int category) |
| 692 | { |
| 693 | int index; |
| 694 | uint32_t avg_delay_ticks; |
| 695 | struct ol_tx_delay_data *data; |
| 696 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 697 | qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 698 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 699 | qdf_spin_lock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 700 | index = 1 - pdev->tx_delay.cats[category].in_progress_idx; |
| 701 | |
| 702 | data = &pdev->tx_delay.cats[category].copies[index]; |
| 703 | |
| 704 | if (data->avgs.transmit_num > 0) { |
| 705 | avg_delay_ticks = |
| 706 | ol_tx_delay_avg(data->avgs.transmit_sum_ticks, |
| 707 | data->avgs.transmit_num); |
| 708 | *tx_delay_microsec = |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 709 | qdf_system_ticks_to_msecs(avg_delay_ticks * 1000); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 710 | } else { |
| 711 | /* |
| 712 | * This case should only happen if there's a query |
| 713 | * within 5 sec after the first tx data frame. |
| 714 | */ |
| 715 | *tx_delay_microsec = 0; |
| 716 | } |
| 717 | if (data->avgs.queue_num > 0) { |
| 718 | avg_delay_ticks = |
| 719 | ol_tx_delay_avg(data->avgs.queue_sum_ticks, |
| 720 | data->avgs.queue_num); |
| 721 | *queue_delay_microsec = |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 722 | qdf_system_ticks_to_msecs(avg_delay_ticks * 1000); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 723 | } else { |
| 724 | /* |
| 725 | * This case should only happen if there's a query |
| 726 | * within 5 sec after the first tx data frame. |
| 727 | */ |
| 728 | *queue_delay_microsec = 0; |
| 729 | } |
| 730 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 731 | qdf_spin_unlock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 732 | } |
| 733 | |
| 734 | void |
| 735 | ol_tx_delay_hist(ol_txrx_pdev_handle pdev, |
| 736 | uint16_t *report_bin_values, int category) |
| 737 | { |
| 738 | int index, i, j; |
| 739 | struct ol_tx_delay_data *data; |
| 740 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 741 | qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 742 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 743 | qdf_spin_lock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 744 | index = 1 - pdev->tx_delay.cats[category].in_progress_idx; |
| 745 | |
| 746 | data = &pdev->tx_delay.cats[category].copies[index]; |
| 747 | |
| 748 | for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) { |
| 749 | uint16_t internal_bin_sum = 0; |
| 750 | while (j < (1 << i)) |
| 751 | internal_bin_sum += data->hist_bins_queue[j++]; |
| 752 | |
| 753 | report_bin_values[i] = internal_bin_sum; |
| 754 | } |
| 755 | report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */ |
| 756 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 757 | qdf_spin_unlock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 758 | } |
| 759 | |
| 760 | #ifdef QCA_COMPUTE_TX_DELAY_PER_TID |
| 761 | static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 762 | qdf_nbuf_t tx_nbuf) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 763 | { |
| 764 | uint8_t *hdr_ptr; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 765 | void *datap = qdf_nbuf_data(tx_nbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 766 | |
| 767 | if (pdev->frame_format == wlan_frm_fmt_raw) { |
| 768 | /* adjust hdr_ptr to RA */ |
| 769 | struct ieee80211_frame *wh = (struct ieee80211_frame *)datap; |
| 770 | hdr_ptr = wh->i_addr1; |
| 771 | } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) { |
| 772 | /* adjust hdr_ptr to RA */ |
| 773 | struct ieee80211_frame *wh = (struct ieee80211_frame *)datap; |
| 774 | hdr_ptr = wh->i_addr1; |
| 775 | } else if (pdev->frame_format == wlan_frm_fmt_802_3) { |
| 776 | hdr_ptr = datap; |
| 777 | } else { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 778 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 779 | "Invalid standard frame type: %d", |
| 780 | pdev->frame_format); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 781 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 782 | hdr_ptr = NULL; |
| 783 | } |
| 784 | return hdr_ptr; |
| 785 | } |
| 786 | |
| 787 | static uint8_t |
| 788 | ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 789 | qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 790 | { |
| 791 | uint16_t ethertype; |
| 792 | uint8_t *dest_addr, *l3_hdr; |
| 793 | int is_mgmt, is_mcast; |
| 794 | int l2_hdr_size; |
| 795 | |
| 796 | dest_addr = ol_tx_dest_addr_find(pdev, msdu); |
| 797 | if (NULL == dest_addr) |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 798 | return QDF_NBUF_TX_EXT_TID_INVALID; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 799 | |
| 800 | is_mcast = IEEE80211_IS_MULTICAST(dest_addr); |
| 801 | is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE; |
| 802 | if (is_mgmt) { |
| 803 | return (is_mcast) ? |
| 804 | OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT : |
| 805 | HTT_TX_EXT_TID_MGMT; |
| 806 | } |
| 807 | if (is_mcast) |
| 808 | return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST; |
| 809 | |
| 810 | if (pdev->frame_format == wlan_frm_fmt_802_3) { |
| 811 | struct ethernet_hdr_t *enet_hdr; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 812 | enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 813 | l2_hdr_size = sizeof(struct ethernet_hdr_t); |
| 814 | ethertype = |
| 815 | (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1]; |
| 816 | if (!IS_ETHERTYPE(ethertype)) { |
| 817 | struct llc_snap_hdr_t *llc_hdr; |
| 818 | llc_hdr = (struct llc_snap_hdr_t *) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 819 | (qdf_nbuf_data(msdu) + l2_hdr_size); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 820 | l2_hdr_size += sizeof(struct llc_snap_hdr_t); |
| 821 | ethertype = |
| 822 | (llc_hdr->ethertype[0] << 8) | llc_hdr-> |
| 823 | ethertype[1]; |
| 824 | } |
| 825 | } else { |
| 826 | struct llc_snap_hdr_t *llc_hdr; |
| 827 | l2_hdr_size = sizeof(struct ieee80211_frame); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 828 | llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 829 | + l2_hdr_size); |
| 830 | l2_hdr_size += sizeof(struct llc_snap_hdr_t); |
| 831 | ethertype = |
| 832 | (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1]; |
| 833 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 834 | l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 835 | if (ETHERTYPE_IPV4 == ethertype) { |
| 836 | return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7; |
| 837 | } else if (ETHERTYPE_IPV6 == ethertype) { |
| 838 | return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) & |
| 839 | 0x7; |
| 840 | } else { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 841 | return QDF_NBUF_TX_EXT_TID_INVALID; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 842 | } |
| 843 | } |
| 844 | #endif |
| 845 | |
| 846 | static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id) |
| 847 | { |
| 848 | #ifdef QCA_COMPUTE_TX_DELAY_PER_TID |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 849 | struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 850 | uint8_t tid; |
| 851 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 852 | qdf_nbuf_t msdu = tx_desc->netbuf; |
| 853 | tid = qdf_nbuf_get_tid(msdu); |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 854 | if (tid == QDF_NBUF_TX_EXT_TID_INVALID) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 855 | tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc); |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 856 | if (tid == QDF_NBUF_TX_EXT_TID_INVALID) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 857 | /* TID could not be determined |
| 858 | (this is not an IP frame?) */ |
| 859 | return -EINVAL; |
| 860 | } |
| 861 | } |
| 862 | return tid; |
| 863 | #else |
| 864 | return 0; |
| 865 | #endif |
| 866 | } |
| 867 | |
| 868 | static inline int |
| 869 | ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks) |
| 870 | { |
| 871 | int bin; |
| 872 | /* |
| 873 | * For speed, multiply and shift to approximate a divide. This causes |
| 874 | * a small error, but the approximation error should be much less |
| 875 | * than the other uncertainties in the tx delay computation. |
| 876 | */ |
| 877 | bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >> |
| 878 | pdev->tx_delay.hist_internal_bin_width_shift; |
| 879 | if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS) |
| 880 | bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1; |
| 881 | |
| 882 | return bin; |
| 883 | } |
| 884 | |
| 885 | static void |
| 886 | ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev, |
| 887 | enum htt_tx_status status, |
| 888 | uint16_t *desc_ids, int num_msdus) |
| 889 | { |
| 890 | int i, index, cat; |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 891 | uint32_t now_ticks = qdf_system_ticks(); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 892 | uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks; |
| 893 | uint32_t avg_time_ticks; |
| 894 | struct ol_tx_delay_data *data; |
| 895 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 896 | qdf_assert(num_msdus > 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 897 | |
| 898 | /* |
| 899 | * keep static counters for total packet and lost packets |
| 900 | * reset them in ol_tx_delay(), function used to fetch the stats |
| 901 | */ |
| 902 | |
| 903 | cat = ol_tx_delay_category(pdev, desc_ids[0]); |
| 904 | if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES) |
| 905 | return; |
| 906 | |
| 907 | pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus; |
| 908 | if (status != htt_tx_status_ok) { |
| 909 | for (i = 0; i < num_msdus; i++) { |
| 910 | cat = ol_tx_delay_category(pdev, desc_ids[i]); |
| 911 | if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES) |
| 912 | return; |
| 913 | pdev->packet_loss_count[cat]++; |
| 914 | } |
| 915 | return; |
| 916 | } |
| 917 | |
| 918 | /* since we may switch the ping-pong index, provide mutex w. readers */ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 919 | qdf_spin_lock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 920 | index = pdev->tx_delay.cats[cat].in_progress_idx; |
| 921 | |
| 922 | data = &pdev->tx_delay.cats[cat].copies[index]; |
| 923 | |
| 924 | if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) { |
| 925 | tx_delay_transmit_ticks = |
| 926 | now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks; |
| 927 | /* |
| 928 | * We'd like to account for the number of MSDUs that were |
| 929 | * transmitted together, but we don't know this. All we know |
| 930 | * is the number of MSDUs that were acked together. |
| 931 | * Since the frame error rate is small, this is nearly the same |
| 932 | * as the number of frames transmitted together. |
| 933 | */ |
| 934 | data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks; |
| 935 | data->avgs.transmit_num += num_msdus; |
| 936 | } |
| 937 | pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks; |
| 938 | |
| 939 | for (i = 0; i < num_msdus; i++) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 940 | int bin; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 941 | uint16_t id = desc_ids[i]; |
| 942 | struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 943 | |
| 944 | tx_delay_queue_ticks = |
| 945 | now_ticks - tx_desc->entry_timestamp_ticks; |
| 946 | |
| 947 | data->avgs.queue_sum_ticks += tx_delay_queue_ticks; |
| 948 | data->avgs.queue_num++; |
| 949 | bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks); |
| 950 | data->hist_bins_queue[bin]++; |
| 951 | } |
| 952 | |
| 953 | /* check if it's time to start a new average */ |
| 954 | avg_time_ticks = |
| 955 | now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks; |
| 956 | if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) { |
| 957 | pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks; |
| 958 | index = 1 - index; |
| 959 | pdev->tx_delay.cats[cat].in_progress_idx = index; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 960 | qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index], |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 961 | sizeof(pdev->tx_delay.cats[cat].copies[index])); |
| 962 | } |
| 963 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 964 | qdf_spin_unlock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 965 | } |
| 966 | |
| 967 | #endif /* QCA_COMPUTE_TX_DELAY */ |