Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Jeff Johnson | 1330773 | 2016-12-21 08:39:21 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 28 | #include <qdf_atomic.h> /* qdf_atomic_inc, etc. */ |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 29 | #include <qdf_lock.h> /* qdf_os_spinlock */ |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 30 | #include <qdf_time.h> /* qdf_system_ticks, etc. */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 31 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 32 | #include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 33 | |
| 34 | #include <cds_queue.h> /* TAILQ */ |
| 35 | #ifdef QCA_COMPUTE_TX_DELAY |
Dustin Brown | 0bec9a9 | 2017-08-17 15:44:34 -0700 | [diff] [blame^] | 36 | #include <linux/ieee80211.h> /* ieee80211_frame, etc. */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 37 | #include <enet.h> /* ethernet_hdr_t, etc. */ |
| 38 | #include <ipv6_defs.h> /* ipv6_traffic_class */ |
| 39 | #endif |
| 40 | |
| 41 | #include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */ |
| 42 | #include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */ |
| 43 | #include <ol_txrx_htt_api.h> /* htt_tx_status */ |
| 44 | |
| 45 | #include <ol_ctrl_txrx_api.h> |
Manjunathappa Prakash | 3454fd6 | 2016-04-01 08:52:06 -0700 | [diff] [blame] | 46 | #include <cdp_txrx_tx_delay.h> |
| 47 | #include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 48 | #include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */ |
| 49 | #ifdef QCA_COMPUTE_TX_DELAY |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 50 | #include <ol_tx_classify.h> /* ol_tx_dest_addr_find */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 51 | #endif |
| 52 | #include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */ |
| 53 | #include <ol_osif_txrx_api.h> |
| 54 | #include <ol_tx.h> /* ol_tx_reinject */ |
Jeff Johnson | f89f58f | 2016-10-14 09:58:29 -0700 | [diff] [blame] | 55 | #include <ol_tx_send.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 56 | |
| 57 | #include <ol_cfg.h> /* ol_cfg_is_high_latency */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 58 | #include <ol_tx_sched.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 59 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 60 | #include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */ |
| 61 | #endif |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 62 | #include <ol_tx_queue.h> |
| 63 | #include <ol_txrx.h> |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 64 | #include <pktlog_ac_fmt.h> |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 65 | #include <cdp_txrx_handle.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 66 | |
| 67 | #ifdef TX_CREDIT_RECLAIM_SUPPORT |
| 68 | |
| 69 | #define OL_TX_CREDIT_RECLAIM(pdev) \ |
| 70 | do { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 71 | if (qdf_atomic_read(&pdev->target_tx_credit) < \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 72 | ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \ |
| 73 | ol_osif_ath_tasklet(pdev->osdev); \ |
| 74 | } \ |
| 75 | } while (0) |
| 76 | |
| 77 | #else |
| 78 | |
| 79 | #define OL_TX_CREDIT_RECLAIM(pdev) |
| 80 | |
| 81 | #endif /* TX_CREDIT_RECLAIM_SUPPORT */ |
| 82 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 83 | #if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT) |
| 84 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 85 | /* |
| 86 | * HL needs to keep track of the amount of credit available to download |
| 87 | * tx frames to the target - the download scheduler decides when to |
| 88 | * download frames, and which frames to download, based on the credit |
| 89 | * availability. |
| 90 | * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track |
| 91 | * of the target_tx_credit, to determine when to poll for tx completion |
| 92 | * messages. |
| 93 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 94 | static inline void |
| 95 | ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta) |
| 96 | { |
| 97 | qdf_atomic_add(-1 * delta, &pdev->target_tx_credit); |
| 98 | } |
| 99 | |
| 100 | static inline void |
| 101 | ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta) |
| 102 | { |
| 103 | qdf_atomic_add(delta, &pdev->target_tx_credit); |
| 104 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 105 | #else |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 106 | |
| 107 | static inline void |
| 108 | ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta) |
| 109 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | static inline void |
| 113 | ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta) |
| 114 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 115 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 116 | #endif |
| 117 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 118 | #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 119 | void ol_txrx_flow_control_cb(struct cdp_vdev *pvdev, bool tx_resume) |
Leo Chang | 9872676 | 2016-10-28 11:07:18 -0700 | [diff] [blame] | 120 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 121 | struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev; |
Leo Chang | 9872676 | 2016-10-28 11:07:18 -0700 | [diff] [blame] | 122 | qdf_spin_lock_bh(&vdev->flow_control_lock); |
| 123 | if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx)) |
| 124 | vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume); |
| 125 | qdf_spin_unlock_bh(&vdev->flow_control_lock); |
| 126 | |
| 127 | return; |
| 128 | } |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 129 | |
| 130 | /** |
bings | 284f8be | 2017-08-11 10:41:30 +0800 | [diff] [blame] | 131 | * ol_txrx_flow_control_is_pause() - is osif paused by flow control |
| 132 | * @vdev: vdev handle |
| 133 | * |
| 134 | * Return: true if osif is paused by flow control |
| 135 | */ |
| 136 | static bool ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev) |
| 137 | { |
| 138 | bool is_pause = false; |
| 139 | if ((vdev->osif_flow_control_is_pause) && (vdev->osif_fc_ctx)) |
| 140 | is_pause = vdev->osif_flow_control_is_pause(vdev->osif_fc_ctx); |
| 141 | |
| 142 | return is_pause; |
| 143 | } |
| 144 | |
| 145 | /** |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 146 | * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q |
| 147 | * @pdev: physical device object |
| 148 | * |
| 149 | * |
| 150 | * Return: None |
| 151 | */ |
| 152 | static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev) |
| 153 | { |
| 154 | struct ol_txrx_vdev_t *vdev; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 155 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 156 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
bings | 284f8be | 2017-08-11 10:41:30 +0800 | [diff] [blame] | 157 | if ((qdf_atomic_read(&vdev->os_q_paused) && |
| 158 | (vdev->tx_fl_hwm != 0)) || |
| 159 | ol_txrx_flow_control_is_pause(vdev)) { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 160 | qdf_spin_lock(&pdev->tx_mutex); |
| 161 | if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) { |
| 162 | qdf_atomic_set(&vdev->os_q_paused, 0); |
| 163 | qdf_spin_unlock(&pdev->tx_mutex); |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 164 | ol_txrx_flow_control_cb((struct cdp_vdev *)vdev, |
| 165 | true); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 166 | } else { |
| 167 | qdf_spin_unlock(&pdev->tx_mutex); |
| 168 | } |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | #elif defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL) |
| 173 | |
| 174 | static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev) |
| 175 | { |
| 176 | struct ol_txrx_vdev_t *vdev; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 177 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 178 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
| 179 | if (qdf_atomic_read(&vdev->os_q_paused) && |
| 180 | (vdev->tx_fl_hwm != 0)) { |
| 181 | qdf_spin_lock(&pdev->tx_mutex); |
| 182 | if (((ol_tx_desc_pool_size_hl( |
| 183 | vdev->pdev->ctrl_pdev) >> 1) |
| 184 | - TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED) |
| 185 | - qdf_atomic_read(&vdev->tx_desc_count) |
| 186 | > vdev->tx_fl_hwm) { |
| 187 | qdf_atomic_set(&vdev->os_q_paused, 0); |
| 188 | qdf_spin_unlock(&pdev->tx_mutex); |
| 189 | vdev->osif_flow_control_cb(vdev, true); |
| 190 | } else { |
| 191 | qdf_spin_unlock(&pdev->tx_mutex); |
| 192 | } |
| 193 | } |
| 194 | } |
| 195 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 196 | #else |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 197 | |
| 198 | static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev) |
| 199 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 200 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 201 | #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ |
| 202 | |
| 203 | static inline uint16_t |
| 204 | ol_tx_send_base(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 205 | struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 206 | { |
| 207 | int msdu_credit_consumed; |
| 208 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 209 | TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 210 | TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 211 | qdf_atomic_read(&pdev->target_tx_credit), |
| 212 | qdf_atomic_read(&pdev->target_tx_credit) - 1, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 213 | qdf_nbuf_len(msdu)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 214 | |
| 215 | msdu_credit_consumed = htt_tx_msdu_credit(msdu); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 216 | ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 217 | OL_TX_CREDIT_RECLAIM(pdev); |
| 218 | |
| 219 | /* |
| 220 | * When the tx frame is downloaded to the target, there are two |
| 221 | * outstanding references: |
| 222 | * 1. The host download SW (HTT, HTC, HIF) |
| 223 | * This reference is cleared by the ol_tx_send_done callback |
| 224 | * functions. |
| 225 | * 2. The target FW |
| 226 | * This reference is cleared by the ol_tx_completion_handler |
| 227 | * function. |
| 228 | * It is extremely probable that the download completion is processed |
| 229 | * before the tx completion message. However, under exceptional |
| 230 | * conditions the tx completion may be processed first. Thus, rather |
| 231 | * that assuming that reference (1) is done before reference (2), |
| 232 | * explicit reference tracking is needed. |
| 233 | * Double-increment the ref count to account for both references |
| 234 | * described above. |
| 235 | */ |
| 236 | |
| 237 | OL_TX_DESC_REF_INIT(tx_desc); |
| 238 | OL_TX_DESC_REF_INC(tx_desc); |
| 239 | OL_TX_DESC_REF_INC(tx_desc); |
| 240 | |
| 241 | return msdu_credit_consumed; |
| 242 | } |
| 243 | |
| 244 | void |
| 245 | ol_tx_send(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 246 | struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 247 | { |
| 248 | int msdu_credit_consumed; |
| 249 | uint16_t id; |
| 250 | int failed; |
| 251 | |
| 252 | msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu); |
| 253 | id = ol_tx_desc_id(pdev, tx_desc); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 254 | QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX); |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 255 | DPTRACE(qdf_dp_trace_ptr(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD, |
Venkata Sharath Chandra Manchala | 0b9fc63 | 2017-05-15 14:35:15 -0700 | [diff] [blame] | 256 | QDF_TRACE_DEFAULT_PDEV_ID, |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 257 | qdf_nbuf_data_addr(msdu), |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 258 | sizeof(qdf_nbuf_data(msdu)), tx_desc->id, |
| 259 | vdev_id)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 260 | failed = htt_tx_send_std(pdev->htt_pdev, msdu, id); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 261 | if (qdf_unlikely(failed)) { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 262 | ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 263 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */); |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | void |
| 268 | ol_tx_send_batch(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 269 | qdf_nbuf_t head_msdu, int num_msdus) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 270 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 271 | qdf_nbuf_t rejected; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 272 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 273 | OL_TX_CREDIT_RECLAIM(pdev); |
| 274 | |
| 275 | rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 276 | while (qdf_unlikely(rejected)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 277 | struct ol_tx_desc_t *tx_desc; |
| 278 | uint16_t *msdu_id_storage; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 279 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 280 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 281 | next = qdf_nbuf_next(rejected); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 282 | msdu_id_storage = ol_tx_msdu_id_storage(rejected); |
| 283 | tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage); |
| 284 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 285 | ol_tx_target_credit_incr(pdev, rejected); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 286 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */); |
| 287 | |
| 288 | rejected = next; |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | void |
| 293 | ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev, |
| 294 | struct ol_tx_desc_t *tx_desc, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 295 | qdf_nbuf_t msdu, enum htt_pkt_type pkt_type) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 296 | { |
| 297 | int msdu_credit_consumed; |
| 298 | uint16_t id; |
| 299 | int failed; |
| 300 | |
| 301 | msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu); |
| 302 | id = ol_tx_desc_id(pdev, tx_desc); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 303 | QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 304 | failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type); |
| 305 | if (failed) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 306 | ol_txrx_err( |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 307 | "Error: freeing tx frame after htt_tx failed"); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 308 | ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 309 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */); |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | static inline void |
| 314 | ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 315 | A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 316 | { |
| 317 | struct ol_tx_desc_t *tx_desc; |
| 318 | |
| 319 | tx_desc = ol_tx_desc_find(pdev, msdu_id); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 320 | qdf_assert(tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 321 | |
| 322 | /* |
| 323 | * If the download is done for |
| 324 | * the Management frame then |
| 325 | * call the download callback if registered |
| 326 | */ |
| 327 | if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) { |
| 328 | int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE; |
| 329 | ol_txrx_mgmt_tx_cb download_cb = |
| 330 | pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb; |
| 331 | |
| 332 | if (download_cb) { |
| 333 | download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt, |
| 334 | tx_desc->netbuf, status != A_OK); |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | if (status != A_OK) { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 339 | ol_tx_target_credit_incr(pdev, msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 340 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, |
| 341 | 1 /* download err */); |
| 342 | } else { |
| 343 | if (OL_TX_DESC_NO_REFS(tx_desc)) { |
| 344 | /* |
| 345 | * The decremented value was zero - free the frame. |
| 346 | * Use the tx status recorded previously during |
| 347 | * tx completion handling. |
| 348 | */ |
| 349 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, |
| 350 | tx_desc->status != |
| 351 | htt_tx_status_ok); |
| 352 | } |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | void |
| 357 | ol_tx_download_done_ll(void *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 358 | A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 359 | { |
| 360 | ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu, |
| 361 | msdu_id); |
| 362 | } |
| 363 | |
| 364 | void |
| 365 | ol_tx_download_done_hl_retain(void *txrx_pdev, |
| 366 | A_STATUS status, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 367 | qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 368 | { |
| 369 | struct ol_txrx_pdev_t *pdev = txrx_pdev; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 370 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 371 | ol_tx_download_done_base(pdev, status, msdu, msdu_id); |
| 372 | } |
| 373 | |
| 374 | void |
| 375 | ol_tx_download_done_hl_free(void *txrx_pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 376 | A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 377 | { |
| 378 | struct ol_txrx_pdev_t *pdev = txrx_pdev; |
| 379 | struct ol_tx_desc_t *tx_desc; |
| 380 | |
| 381 | tx_desc = ol_tx_desc_find(pdev, msdu_id); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 382 | qdf_assert(tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 383 | |
| 384 | ol_tx_download_done_base(pdev, status, msdu, msdu_id); |
| 385 | |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 386 | if ((tx_desc->pkt_type != OL_TX_FRM_NO_FREE) && |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 387 | (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) { |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 388 | qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 389 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK); |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta) |
| 394 | { |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 395 | qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 396 | } |
| 397 | |
| 398 | void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta) |
| 399 | { |
| 400 | TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 401 | qdf_atomic_read(&pdev->target_tx_credit), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 402 | credit_delta, |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 403 | qdf_atomic_read(&pdev->target_tx_credit) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 404 | credit_delta); |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 405 | qdf_atomic_add(credit_delta, &pdev->target_tx_credit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | #ifdef QCA_COMPUTE_TX_DELAY |
| 409 | |
| 410 | static void |
| 411 | ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev, |
| 412 | enum htt_tx_status status, |
| 413 | uint16_t *desc_ids, int num_msdus); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 414 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 415 | #else |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 416 | static inline void |
| 417 | ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev, |
| 418 | enum htt_tx_status status, |
| 419 | uint16_t *desc_ids, int num_msdus) |
| 420 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 421 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 422 | #endif /* QCA_COMPUTE_TX_DELAY */ |
| 423 | |
| 424 | #ifndef OL_TX_RESTORE_HDR |
| 425 | #define OL_TX_RESTORE_HDR(__tx_desc, __msdu) |
| 426 | #endif |
| 427 | /* |
| 428 | * The following macros could have been inline functions too. |
| 429 | * The only rationale for choosing macros, is to force the compiler to inline |
| 430 | * the implementation, which cannot be controlled for actual "inline" functions, |
| 431 | * since "inline" is only a hint to the compiler. |
| 432 | * In the performance path, we choose to force the inlining, in preference to |
| 433 | * type-checking offered by the actual inlined functions. |
| 434 | */ |
| 435 | #define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \ |
| 436 | TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem) |
| 437 | #ifndef ATH_11AC_TXCOMPACT |
| 438 | #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\ |
| 439 | _lcl_freelist, _tx_desc_last) \ |
| 440 | do { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 441 | qdf_atomic_init(&(_tx_desc)->ref_cnt); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 442 | /* restore orginal hdr offset */ \ |
| 443 | OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 444 | qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \ |
| 445 | qdf_nbuf_free((_netbuf)); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 446 | ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \ |
| 447 | (_lcl_freelist); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 448 | if (qdf_unlikely(!lcl_freelist)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 449 | (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\ |
| 450 | (_tx_desc); \ |
| 451 | } \ |
| 452 | (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \ |
| 453 | } while (0) |
| 454 | #else /*!ATH_11AC_TXCOMPACT */ |
| 455 | #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\ |
| 456 | _lcl_freelist, _tx_desc_last) \ |
| 457 | do { \ |
| 458 | /* restore orginal hdr offset */ \ |
| 459 | OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 460 | qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \ |
| 461 | qdf_nbuf_free((_netbuf)); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 462 | ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \ |
| 463 | (_lcl_freelist); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 464 | if (qdf_unlikely(!lcl_freelist)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 465 | (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\ |
| 466 | (_tx_desc); \ |
| 467 | } \ |
| 468 | (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \ |
| 469 | } while (0) |
| 470 | |
| 471 | #endif /*!ATH_11AC_TXCOMPACT */ |
| 472 | |
| 473 | #ifdef QCA_TX_SINGLE_COMPLETIONS |
| 474 | #ifdef QCA_TX_STD_PATH_ONLY |
| 475 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 476 | _netbuf, _lcl_freelist, \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 477 | _tx_desc_last, _status, is_tx_desc_freed) \ |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 478 | { \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 479 | is_tx_desc_freed = 0; \ |
| 480 | ol_tx_msdu_complete_single((_pdev), (_tx_desc), \ |
| 481 | (_netbuf), (_lcl_freelist), \ |
| 482 | _tx_desc_last) \ |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 483 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 484 | #else /* !QCA_TX_STD_PATH_ONLY */ |
| 485 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 486 | _netbuf, _lcl_freelist, \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 487 | _tx_desc_last, _status, is_tx_desc_freed) \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 488 | do { \ |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 489 | if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 490 | is_tx_desc_freed = 0; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 491 | ol_tx_msdu_complete_single((_pdev), (_tx_desc),\ |
| 492 | (_netbuf), (_lcl_freelist), \ |
| 493 | (_tx_desc_last)); \ |
| 494 | } else { \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 495 | is_tx_desc_freed = 1; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 496 | ol_tx_desc_frame_free_nonstd( \ |
| 497 | (_pdev), (_tx_desc), \ |
| 498 | (_status) != htt_tx_status_ok); \ |
| 499 | } \ |
| 500 | } while (0) |
| 501 | #endif /* !QCA_TX_STD_PATH_ONLY */ |
| 502 | #else /* !QCA_TX_SINGLE_COMPLETIONS */ |
| 503 | #ifdef QCA_TX_STD_PATH_ONLY |
| 504 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 505 | _netbuf, _lcl_freelist, \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 506 | _tx_desc_last, _status, is_tx_desc_freed) \ |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 507 | { \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 508 | is_tx_desc_freed = 0; \ |
| 509 | ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \ |
| 510 | (_tx_descs), (_status)) \ |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 511 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 512 | #else /* !QCA_TX_STD_PATH_ONLY */ |
| 513 | #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \ |
| 514 | _netbuf, _lcl_freelist, \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 515 | _tx_desc_last, _status, is_tx_desc_freed) \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 516 | do { \ |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 517 | if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 518 | is_tx_desc_freed = 0; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 519 | ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \ |
| 520 | (_tx_descs), (_status)); \ |
| 521 | } else { \ |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 522 | is_tx_desc_freed = 1; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 523 | ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \ |
| 524 | (_status) != \ |
| 525 | htt_tx_status_ok); \ |
| 526 | } \ |
| 527 | } while (0) |
| 528 | #endif /* !QCA_TX_STD_PATH_ONLY */ |
| 529 | #endif /* QCA_TX_SINGLE_COMPLETIONS */ |
| 530 | |
Yu Wang | 053d3e7 | 2017-02-08 18:48:24 +0800 | [diff] [blame] | 531 | #if !defined(CONFIG_HL_SUPPORT) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 532 | void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev) |
| 533 | { |
| 534 | int i = 0; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 535 | struct ol_tx_desc_t *tx_desc; |
Mohit Khanna | c3b069b | 2017-02-17 14:51:51 -0800 | [diff] [blame] | 536 | int num_disarded = 0; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 537 | |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 538 | for (i = 0; i < pdev->tx_desc.pool_size; i++) { |
| 539 | tx_desc = ol_tx_desc_find(pdev, i); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 540 | /* |
| 541 | * Confirm that each tx descriptor is "empty", i.e. it has |
| 542 | * no tx frame attached. |
| 543 | * In particular, check that there are no frames that have |
| 544 | * been given to the target to transmit, for which the |
| 545 | * target has never provided a response. |
| 546 | */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 547 | if (qdf_atomic_read(&tx_desc->ref_cnt)) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 548 | ol_txrx_dbg( |
Mohit Khanna | c3b069b | 2017-02-17 14:51:51 -0800 | [diff] [blame] | 549 | "Warning: freeing tx desc %d", tx_desc->id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 550 | ol_tx_desc_frame_free_nonstd(pdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 551 | tx_desc, 1); |
Mohit Khanna | c3b069b | 2017-02-17 14:51:51 -0800 | [diff] [blame] | 552 | num_disarded++; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 553 | } |
| 554 | } |
Mohit Khanna | c3b069b | 2017-02-17 14:51:51 -0800 | [diff] [blame] | 555 | |
| 556 | if (num_disarded) |
| 557 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 558 | "Warning: freed %d tx descs for which no tx completion rcvd from the target", |
| 559 | num_disarded); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 560 | } |
Yu Wang | 053d3e7 | 2017-02-08 18:48:24 +0800 | [diff] [blame] | 561 | #endif |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 562 | |
| 563 | void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits) |
| 564 | { |
| 565 | ol_tx_target_credit_update(pdev, credits); |
| 566 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 567 | if (pdev->cfg.is_high_latency) |
| 568 | ol_tx_sched(pdev); |
| 569 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 570 | /* UNPAUSE OS Q */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 571 | ol_tx_flow_ct_unpause_os_q(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 572 | } |
| 573 | |
Yu Wang | ceb357b | 2017-06-01 12:04:18 +0800 | [diff] [blame] | 574 | #ifdef WLAN_FEATURE_TSF_PLUS |
| 575 | static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps( |
| 576 | u_int32_t *msg_word, int num_msdus) |
| 577 | { |
| 578 | u_int32_t has_tx_tsf; |
| 579 | u_int32_t has_retry; |
| 580 | struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL; |
| 581 | struct htt_tx_compl_ind_append_retries *retry_list = NULL; |
| 582 | int offset_dwords; |
| 583 | |
| 584 | has_tx_tsf = HTT_TX_COMPL_IND_APPEND1_GET(*msg_word); |
| 585 | if (num_msdus <= 0 || !has_tx_tsf) |
| 586 | return NULL; |
| 587 | |
| 588 | offset_dwords = 1 + ((num_msdus + 1) >> 1); |
| 589 | |
| 590 | has_retry = HTT_TX_COMPL_IND_APPEND_GET(*msg_word); |
| 591 | if (has_retry) { |
| 592 | int retry_index = 0; |
| 593 | int width_for_each_retry = |
| 594 | (sizeof(struct htt_tx_compl_ind_append_retries) + |
| 595 | 3) >> 2; |
| 596 | |
| 597 | retry_list = (struct htt_tx_compl_ind_append_retries *) |
| 598 | (msg_word + offset_dwords); |
| 599 | while (retry_list) { |
| 600 | if (retry_list[retry_index++].flag == 0) |
| 601 | break; |
| 602 | } |
| 603 | offset_dwords += retry_index * width_for_each_retry; |
| 604 | } |
| 605 | txtstamp_list = (struct htt_tx_compl_ind_append_tx_tstamp *) |
| 606 | (msg_word + offset_dwords); |
| 607 | |
| 608 | return txtstamp_list; |
| 609 | } |
| 610 | |
| 611 | static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev, |
| 612 | qdf_nbuf_t netbuf, u_int64_t ts) |
| 613 | { |
| 614 | if (!netbuf) |
| 615 | return; |
| 616 | |
| 617 | if (pdev->ol_tx_timestamp_cb) |
| 618 | pdev->ol_tx_timestamp_cb(netbuf, ts); |
| 619 | } |
| 620 | #else |
| 621 | static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps( |
| 622 | u_int32_t *msg_word, int num_msdus) |
| 623 | { |
| 624 | return NULL; |
| 625 | } |
| 626 | |
| 627 | static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev, |
| 628 | qdf_nbuf_t netbuf, u_int64_t ts) |
| 629 | { |
| 630 | } |
| 631 | #endif |
| 632 | |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 633 | /** |
| 634 | * WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of |
| 635 | * ol_tx_completion_handler(). |
| 636 | * any change in ol_tx_completion_handler() must be mirrored in |
| 637 | * ol_tx_inspect_handler(). |
| 638 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 639 | void |
| 640 | ol_tx_completion_handler(ol_txrx_pdev_handle pdev, |
| 641 | int num_msdus, |
Yu Wang | ceb357b | 2017-06-01 12:04:18 +0800 | [diff] [blame] | 642 | enum htt_tx_status status, void *msg) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 643 | { |
| 644 | int i; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 645 | uint16_t tx_desc_id; |
| 646 | struct ol_tx_desc_t *tx_desc; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 647 | uint32_t byte_cnt = 0; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 648 | qdf_nbuf_t netbuf; |
Himanshu Agarwal | bb226bc | 2017-01-18 20:45:01 +0530 | [diff] [blame] | 649 | tp_ol_packetdump_cb packetdump_cb; |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 650 | uint32_t is_tx_desc_freed = 0; |
Yu Wang | ceb357b | 2017-06-01 12:04:18 +0800 | [diff] [blame] | 651 | struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL; |
| 652 | u_int32_t *msg_word = (u_int32_t *)msg; |
| 653 | u_int16_t *desc_ids = (u_int16_t *)(msg_word + 1); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 654 | |
| 655 | union ol_tx_desc_list_elem_t *lcl_freelist = NULL; |
| 656 | union ol_tx_desc_list_elem_t *tx_desc_last = NULL; |
| 657 | ol_tx_desc_list tx_descs; |
| 658 | TAILQ_INIT(&tx_descs); |
| 659 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 660 | ol_tx_delay_compute(pdev, status, desc_ids, num_msdus); |
Yu Wang | ceb357b | 2017-06-01 12:04:18 +0800 | [diff] [blame] | 661 | if (status == htt_tx_status_ok) |
| 662 | txtstamp_list = ol_tx_get_txtstamps(msg_word, num_msdus); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 663 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 664 | for (i = 0; i < num_msdus; i++) { |
| 665 | tx_desc_id = desc_ids[i]; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 666 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 667 | tx_desc->status = status; |
| 668 | netbuf = tx_desc->netbuf; |
Yu Wang | ceb357b | 2017-06-01 12:04:18 +0800 | [diff] [blame] | 669 | |
| 670 | if (txtstamp_list) |
| 671 | ol_tx_timestamp(pdev, netbuf, |
| 672 | (u_int64_t)txtstamp_list->timestamp[i] |
| 673 | ); |
| 674 | |
Himanshu Agarwal | 8903461 | 2016-07-19 15:59:52 +0530 | [diff] [blame] | 675 | QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE); |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 676 | |
| 677 | if (tx_desc->pkt_type != OL_TX_FRM_TSO) { |
Himanshu Agarwal | bb226bc | 2017-01-18 20:45:01 +0530 | [diff] [blame] | 678 | packetdump_cb = pdev->ol_tx_packetdump_cb; |
| 679 | if (packetdump_cb) |
| 680 | packetdump_cb(netbuf, status, |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 681 | tx_desc->vdev->vdev_id, TX_DATA_PKT); |
| 682 | } |
| 683 | |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 684 | DPTRACE(qdf_dp_trace_ptr(netbuf, |
| 685 | QDF_DP_TRACE_FREE_PACKET_PTR_RECORD, |
Venkata Sharath Chandra Manchala | 0b9fc63 | 2017-05-15 14:35:15 -0700 | [diff] [blame] | 686 | QDF_TRACE_DEFAULT_PDEV_ID, |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 687 | qdf_nbuf_data_addr(netbuf), |
| 688 | sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status)); |
Houston Hoffman | c2c4762 | 2016-07-12 13:05:31 -0700 | [diff] [blame] | 689 | htc_pm_runtime_put(pdev->htt_pdev->htc_pdev); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 690 | ol_tx_desc_update_group_credit(pdev, tx_desc_id, 1, 0, status); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 691 | /* Per SDU update of byte count */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 692 | byte_cnt += qdf_nbuf_len(netbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 693 | if (OL_TX_DESC_NO_REFS(tx_desc)) { |
| 694 | ol_tx_statistics( |
| 695 | pdev->ctrl_pdev, |
| 696 | HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *) |
| 697 | (tx_desc-> |
| 698 | htt_tx_desc))), |
| 699 | status != htt_tx_status_ok); |
| 700 | ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf, |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 701 | lcl_freelist, tx_desc_last, status, |
| 702 | is_tx_desc_freed); |
| 703 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 704 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 705 | if (!is_tx_desc_freed) { |
| 706 | tx_desc->pkt_type = ol_tx_frm_freed; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 707 | #ifdef QCA_COMPUTE_TX_DELAY |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 708 | tx_desc->entry_timestamp_ticks = 0xffffffff; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 709 | #endif |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 710 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 711 | #endif |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 712 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 713 | } |
| 714 | |
| 715 | /* One shot protected access to pdev freelist, when setup */ |
| 716 | if (lcl_freelist) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 717 | qdf_spin_lock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 718 | tx_desc_last->next = pdev->tx_desc.freelist; |
| 719 | pdev->tx_desc.freelist = lcl_freelist; |
| 720 | pdev->tx_desc.num_free += (uint16_t) num_msdus; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 721 | qdf_spin_unlock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 722 | } else { |
| 723 | ol_tx_desc_frame_list_free(pdev, &tx_descs, |
| 724 | status != htt_tx_status_ok); |
| 725 | } |
| 726 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 727 | if (pdev->cfg.is_high_latency) { |
| 728 | /* |
| 729 | * Credit was already explicitly updated by HTT, |
| 730 | * but update the number of available tx descriptors, |
| 731 | * then invoke the scheduler, since new credit is probably |
| 732 | * available now. |
| 733 | */ |
| 734 | qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt); |
| 735 | ol_tx_sched(pdev); |
| 736 | } else { |
| 737 | ol_tx_target_credit_adjust(num_msdus, pdev, NULL); |
| 738 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 739 | |
| 740 | /* UNPAUSE OS Q */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 741 | ol_tx_flow_ct_unpause_os_q(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 742 | /* Do one shot statistics */ |
| 743 | TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt); |
| 744 | } |
| 745 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 746 | #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL |
| 747 | |
| 748 | void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev, |
| 749 | u_int16_t tx_desc_id, int credit, u_int8_t absolute, |
| 750 | enum htt_tx_status status) |
| 751 | { |
| 752 | uint8_t i, is_member; |
| 753 | uint16_t vdev_id_mask; |
| 754 | struct ol_tx_desc_t *tx_desc; |
| 755 | |
| 756 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
| 757 | for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) { |
| 758 | vdev_id_mask = |
| 759 | OL_TXQ_GROUP_VDEV_ID_MASK_GET( |
| 760 | pdev->txq_grps[i].membership); |
| 761 | is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask, |
gbian | 016a42e | 2017-03-01 18:49:11 +0800 | [diff] [blame] | 762 | tx_desc->vdev_id); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 763 | if (is_member) { |
| 764 | ol_txrx_update_group_credit(&pdev->txq_grps[i], |
| 765 | credit, absolute); |
| 766 | break; |
| 767 | } |
| 768 | } |
| 769 | ol_tx_update_group_credit_stats(pdev); |
| 770 | } |
| 771 | |
| 772 | #ifdef DEBUG_HL_LOGGING |
| 773 | |
| 774 | void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev) |
| 775 | { |
| 776 | uint16_t curr_index; |
| 777 | uint8_t i; |
| 778 | |
| 779 | qdf_spin_lock_bh(&pdev->grp_stat_spinlock); |
| 780 | pdev->grp_stats.last_valid_index++; |
| 781 | if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE |
| 782 | - 1)) { |
| 783 | pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE; |
| 784 | pdev->grp_stats.wrap_around = 1; |
| 785 | } |
| 786 | curr_index = pdev->grp_stats.last_valid_index; |
| 787 | |
| 788 | for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) { |
| 789 | pdev->grp_stats.stats[curr_index].grp[i].member_vdevs = |
| 790 | OL_TXQ_GROUP_VDEV_ID_MASK_GET( |
| 791 | pdev->txq_grps[i].membership); |
| 792 | pdev->grp_stats.stats[curr_index].grp[i].credit = |
| 793 | qdf_atomic_read(&pdev->txq_grps[i].credit); |
| 794 | } |
| 795 | |
| 796 | qdf_spin_unlock_bh(&pdev->grp_stat_spinlock); |
| 797 | } |
| 798 | |
| 799 | void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev) |
| 800 | { |
| 801 | uint16_t i, j, is_break = 0; |
| 802 | int16_t curr_index, old_index, wrap_around; |
| 803 | uint16_t curr_credit, old_credit, mem_vdevs; |
| 804 | |
| 805 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 806 | "Group credit stats:"); |
| 807 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 808 | " No: GrpID: Credit: Change: vdev_map"); |
| 809 | |
| 810 | qdf_spin_lock_bh(&pdev->grp_stat_spinlock); |
| 811 | curr_index = pdev->grp_stats.last_valid_index; |
| 812 | wrap_around = pdev->grp_stats.wrap_around; |
| 813 | qdf_spin_unlock_bh(&pdev->grp_stat_spinlock); |
| 814 | |
| 815 | if (curr_index < 0) { |
| 816 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 817 | "Not initialized"); |
| 818 | return; |
| 819 | } |
| 820 | |
| 821 | for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) { |
| 822 | old_index = curr_index - 1; |
| 823 | if (old_index < 0) { |
| 824 | if (wrap_around == 0) |
| 825 | is_break = 1; |
| 826 | else |
| 827 | old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1; |
| 828 | } |
| 829 | |
| 830 | for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) { |
| 831 | qdf_spin_lock_bh(&pdev->grp_stat_spinlock); |
| 832 | curr_credit = |
| 833 | pdev->grp_stats.stats[curr_index]. |
| 834 | grp[j].credit; |
| 835 | if (!is_break) |
| 836 | old_credit = |
| 837 | pdev->grp_stats.stats[old_index]. |
| 838 | grp[j].credit; |
| 839 | |
| 840 | mem_vdevs = |
| 841 | pdev->grp_stats.stats[curr_index].grp[j]. |
| 842 | member_vdevs; |
| 843 | qdf_spin_unlock_bh(&pdev->grp_stat_spinlock); |
| 844 | |
| 845 | if (!is_break) |
| 846 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 847 | QDF_TRACE_LEVEL_ERROR, |
| 848 | "%4d: %5d: %6d %6d %8x", |
| 849 | curr_index, j, |
| 850 | curr_credit, |
| 851 | (curr_credit - old_credit), |
| 852 | mem_vdevs); |
| 853 | else |
| 854 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 855 | QDF_TRACE_LEVEL_ERROR, |
| 856 | "%4d: %5d: %6d %6s %8x", |
| 857 | curr_index, j, |
| 858 | curr_credit, "NA", mem_vdevs); |
| 859 | } |
| 860 | |
| 861 | if (is_break) |
| 862 | break; |
| 863 | |
| 864 | curr_index = old_index; |
| 865 | } |
| 866 | } |
| 867 | |
| 868 | void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev) |
| 869 | { |
| 870 | qdf_spin_lock_bh(&pdev->grp_stat_spinlock); |
| 871 | qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats)); |
| 872 | pdev->grp_stats.last_valid_index = -1; |
| 873 | pdev->grp_stats.wrap_around = 0; |
| 874 | qdf_spin_unlock_bh(&pdev->grp_stat_spinlock); |
| 875 | } |
| 876 | #endif |
| 877 | #endif |
| 878 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 879 | /* |
| 880 | * ol_tx_single_completion_handler performs the same tx completion |
| 881 | * processing as ol_tx_completion_handler, but for a single frame. |
| 882 | * ol_tx_completion_handler is optimized to handle batch completions |
| 883 | * as efficiently as possible; in contrast ol_tx_single_completion_handler |
| 884 | * handles single frames as simply and generally as possible. |
| 885 | * Thus, this ol_tx_single_completion_handler function is suitable for |
| 886 | * intermittent usage, such as for tx mgmt frames. |
| 887 | */ |
| 888 | void |
| 889 | ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev, |
| 890 | enum htt_tx_status status, uint16_t tx_desc_id) |
| 891 | { |
| 892 | struct ol_tx_desc_t *tx_desc; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 893 | qdf_nbuf_t netbuf; |
Himanshu Agarwal | bb226bc | 2017-01-18 20:45:01 +0530 | [diff] [blame] | 894 | tp_ol_packetdump_cb packetdump_cb; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 895 | |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 896 | tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id); |
| 897 | if (tx_desc == NULL) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 898 | ol_txrx_err( |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 899 | "%s: invalid desc_id(%u), ignore it.\n", |
| 900 | __func__, |
| 901 | tx_desc_id); |
| 902 | return; |
| 903 | } |
| 904 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 905 | tx_desc->status = status; |
| 906 | netbuf = tx_desc->netbuf; |
| 907 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 908 | QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 909 | /* Do one shot statistics */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 910 | TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 911 | |
Himanshu Agarwal | bb226bc | 2017-01-18 20:45:01 +0530 | [diff] [blame] | 912 | packetdump_cb = pdev->ol_tx_packetdump_cb; |
| 913 | if (packetdump_cb) |
| 914 | packetdump_cb(netbuf, status, |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 915 | tx_desc->vdev->vdev_id, TX_MGMT_PKT); |
| 916 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 917 | if (OL_TX_DESC_NO_REFS(tx_desc)) { |
| 918 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, |
| 919 | status != htt_tx_status_ok); |
| 920 | } |
| 921 | |
| 922 | TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 923 | qdf_atomic_read(&pdev->target_tx_credit), |
| 924 | 1, qdf_atomic_read(&pdev->target_tx_credit) + 1); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 925 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 926 | if (pdev->cfg.is_high_latency) { |
| 927 | /* |
| 928 | * Credit was already explicitly updated by HTT, |
| 929 | * but update the number of available tx descriptors, |
| 930 | * then invoke the scheduler, since new credit is probably |
| 931 | * available now. |
| 932 | */ |
| 933 | qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt); |
| 934 | ol_tx_sched(pdev); |
| 935 | } else { |
| 936 | qdf_atomic_add(1, &pdev->target_tx_credit); |
| 937 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 938 | } |
| 939 | |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 940 | /** |
| 941 | * WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of |
| 942 | * ol_tx_completion_handler(). |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 943 | * any change in ol_tx_completion_handler() must be mirrored here. |
| 944 | */ |
| 945 | void |
| 946 | ol_tx_inspect_handler(ol_txrx_pdev_handle pdev, |
| 947 | int num_msdus, void *tx_desc_id_iterator) |
| 948 | { |
| 949 | uint16_t vdev_id, i; |
| 950 | struct ol_txrx_vdev_t *vdev; |
| 951 | uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator; |
| 952 | uint16_t tx_desc_id; |
| 953 | struct ol_tx_desc_t *tx_desc; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 954 | union ol_tx_desc_list_elem_t *lcl_freelist = NULL; |
| 955 | union ol_tx_desc_list_elem_t *tx_desc_last = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 956 | qdf_nbuf_t netbuf; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 957 | ol_tx_desc_list tx_descs; |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 958 | uint32_t is_tx_desc_freed = 0; |
| 959 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 960 | TAILQ_INIT(&tx_descs); |
| 961 | |
| 962 | for (i = 0; i < num_msdus; i++) { |
| 963 | tx_desc_id = desc_ids[i]; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 964 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 965 | netbuf = tx_desc->netbuf; |
| 966 | |
| 967 | /* find the "vdev" this tx_desc belongs to */ |
| 968 | vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *) |
| 969 | (tx_desc->htt_tx_desc))); |
| 970 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
| 971 | if (vdev->vdev_id == vdev_id) |
| 972 | break; |
| 973 | } |
| 974 | |
| 975 | /* vdev now points to the vdev for this descriptor. */ |
| 976 | |
| 977 | #ifndef ATH_11AC_TXCOMPACT |
| 978 | /* save this multicast packet to local free list */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 979 | if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 980 | #endif |
| 981 | { |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 982 | /* |
| 983 | * For this function only, force htt status to be |
| 984 | * "htt_tx_status_ok" |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 985 | * for graceful freeing of this multicast frame |
| 986 | */ |
| 987 | ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf, |
| 988 | lcl_freelist, tx_desc_last, |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 989 | htt_tx_status_ok, |
| 990 | is_tx_desc_freed); |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 991 | |
| 992 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 993 | if (!is_tx_desc_freed) { |
| 994 | tx_desc->pkt_type = ol_tx_frm_freed; |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 995 | #ifdef QCA_COMPUTE_TX_DELAY |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 996 | tx_desc->entry_timestamp_ticks = 0xffffffff; |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 997 | #endif |
gbian | 1bd297c | 2016-12-07 11:12:29 +0800 | [diff] [blame] | 998 | } |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 999 | #endif |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1000 | } |
| 1001 | } |
| 1002 | |
| 1003 | if (lcl_freelist) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1004 | qdf_spin_lock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1005 | tx_desc_last->next = pdev->tx_desc.freelist; |
| 1006 | pdev->tx_desc.freelist = lcl_freelist; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1007 | qdf_spin_unlock(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1008 | } else { |
| 1009 | ol_tx_desc_frame_list_free(pdev, &tx_descs, |
| 1010 | htt_tx_status_discard); |
| 1011 | } |
| 1012 | TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n", |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 1013 | qdf_atomic_read(&pdev->target_tx_credit), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1014 | num_msdus, |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 1015 | qdf_atomic_read(&pdev->target_tx_credit) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1016 | num_msdus); |
| 1017 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1018 | if (pdev->cfg.is_high_latency) { |
| 1019 | /* credit was already explicitly updated by HTT */ |
| 1020 | ol_tx_sched(pdev); |
| 1021 | } else { |
| 1022 | ol_tx_target_credit_adjust(num_msdus, pdev, NULL); |
| 1023 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1024 | } |
| 1025 | |
| 1026 | #ifdef QCA_COMPUTE_TX_DELAY |
Manjunathappa Prakash | 3454fd6 | 2016-04-01 08:52:06 -0700 | [diff] [blame] | 1027 | /** |
| 1028 | * @brief updates the compute interval period for TSM stats. |
| 1029 | * @details |
| 1030 | * @param interval - interval for stats computation |
| 1031 | */ |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1032 | void ol_tx_set_compute_interval(struct cdp_pdev *ppdev, uint32_t interval) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1033 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1034 | struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev; |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 1035 | pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1036 | } |
| 1037 | |
Manjunathappa Prakash | 3454fd6 | 2016-04-01 08:52:06 -0700 | [diff] [blame] | 1038 | /** |
| 1039 | * @brief Return the uplink (transmitted) packet count and loss count. |
| 1040 | * @details |
| 1041 | * This function will be called for getting uplink packet count and |
| 1042 | * loss count for given stream (access category) a regular interval. |
| 1043 | * This also resets the counters hence, the value returned is packets |
| 1044 | * counted in last 5(default) second interval. These counter are |
| 1045 | * incremented per access category in ol_tx_completion_handler() |
| 1046 | * |
| 1047 | * @param category - access category of interest |
| 1048 | * @param out_packet_count - number of packets transmitted |
| 1049 | * @param out_packet_loss_count - number of packets lost |
| 1050 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1051 | void |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1052 | ol_tx_packet_count(struct cdp_pdev *ppdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1053 | uint16_t *out_packet_count, |
| 1054 | uint16_t *out_packet_loss_count, int category) |
| 1055 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1056 | struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1057 | *out_packet_count = pdev->packet_count[category]; |
| 1058 | *out_packet_loss_count = pdev->packet_loss_count[category]; |
| 1059 | pdev->packet_count[category] = 0; |
| 1060 | pdev->packet_loss_count[category] = 0; |
| 1061 | } |
| 1062 | |
Jeff Johnson | f89f58f | 2016-10-14 09:58:29 -0700 | [diff] [blame] | 1063 | static uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1064 | { |
| 1065 | uint32_t sum32; |
| 1066 | int shift = 0; |
| 1067 | /* |
| 1068 | * To avoid doing a 64-bit divide, shift the sum down until it is |
| 1069 | * no more than 32 bits (and shift the denominator to match). |
| 1070 | */ |
| 1071 | while ((sum >> 32) != 0) { |
| 1072 | sum >>= 1; |
| 1073 | shift++; |
| 1074 | } |
| 1075 | sum32 = (uint32_t) sum; |
| 1076 | num >>= shift; |
| 1077 | return (sum32 + (num >> 1)) / num; /* round to nearest */ |
| 1078 | } |
| 1079 | |
| 1080 | void |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1081 | ol_tx_delay(struct cdp_pdev *ppdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1082 | uint32_t *queue_delay_microsec, |
| 1083 | uint32_t *tx_delay_microsec, int category) |
| 1084 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1085 | struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1086 | int index; |
| 1087 | uint32_t avg_delay_ticks; |
| 1088 | struct ol_tx_delay_data *data; |
| 1089 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 1090 | qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1091 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1092 | qdf_spin_lock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1093 | index = 1 - pdev->tx_delay.cats[category].in_progress_idx; |
| 1094 | |
| 1095 | data = &pdev->tx_delay.cats[category].copies[index]; |
| 1096 | |
| 1097 | if (data->avgs.transmit_num > 0) { |
| 1098 | avg_delay_ticks = |
| 1099 | ol_tx_delay_avg(data->avgs.transmit_sum_ticks, |
| 1100 | data->avgs.transmit_num); |
| 1101 | *tx_delay_microsec = |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 1102 | qdf_system_ticks_to_msecs(avg_delay_ticks * 1000); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1103 | } else { |
| 1104 | /* |
| 1105 | * This case should only happen if there's a query |
| 1106 | * within 5 sec after the first tx data frame. |
| 1107 | */ |
| 1108 | *tx_delay_microsec = 0; |
| 1109 | } |
| 1110 | if (data->avgs.queue_num > 0) { |
| 1111 | avg_delay_ticks = |
| 1112 | ol_tx_delay_avg(data->avgs.queue_sum_ticks, |
| 1113 | data->avgs.queue_num); |
| 1114 | *queue_delay_microsec = |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 1115 | qdf_system_ticks_to_msecs(avg_delay_ticks * 1000); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1116 | } else { |
| 1117 | /* |
| 1118 | * This case should only happen if there's a query |
| 1119 | * within 5 sec after the first tx data frame. |
| 1120 | */ |
| 1121 | *queue_delay_microsec = 0; |
| 1122 | } |
| 1123 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1124 | qdf_spin_unlock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1125 | } |
| 1126 | |
| 1127 | void |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1128 | ol_tx_delay_hist(struct cdp_pdev *ppdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1129 | uint16_t *report_bin_values, int category) |
| 1130 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1131 | struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1132 | int index, i, j; |
| 1133 | struct ol_tx_delay_data *data; |
| 1134 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 1135 | qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1136 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1137 | qdf_spin_lock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1138 | index = 1 - pdev->tx_delay.cats[category].in_progress_idx; |
| 1139 | |
| 1140 | data = &pdev->tx_delay.cats[category].copies[index]; |
| 1141 | |
| 1142 | for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) { |
| 1143 | uint16_t internal_bin_sum = 0; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 1144 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1145 | while (j < (1 << i)) |
| 1146 | internal_bin_sum += data->hist_bins_queue[j++]; |
| 1147 | |
| 1148 | report_bin_values[i] = internal_bin_sum; |
| 1149 | } |
| 1150 | report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */ |
| 1151 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1152 | qdf_spin_unlock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1153 | } |
| 1154 | |
| 1155 | #ifdef QCA_COMPUTE_TX_DELAY_PER_TID |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1156 | |
| 1157 | static uint8_t |
| 1158 | ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1159 | qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1160 | { |
| 1161 | uint16_t ethertype; |
| 1162 | uint8_t *dest_addr, *l3_hdr; |
| 1163 | int is_mgmt, is_mcast; |
| 1164 | int l2_hdr_size; |
| 1165 | |
| 1166 | dest_addr = ol_tx_dest_addr_find(pdev, msdu); |
| 1167 | if (NULL == dest_addr) |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 1168 | return QDF_NBUF_TX_EXT_TID_INVALID; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1169 | |
| 1170 | is_mcast = IEEE80211_IS_MULTICAST(dest_addr); |
| 1171 | is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE; |
| 1172 | if (is_mgmt) { |
| 1173 | return (is_mcast) ? |
| 1174 | OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT : |
| 1175 | HTT_TX_EXT_TID_MGMT; |
| 1176 | } |
| 1177 | if (is_mcast) |
| 1178 | return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST; |
| 1179 | |
| 1180 | if (pdev->frame_format == wlan_frm_fmt_802_3) { |
| 1181 | struct ethernet_hdr_t *enet_hdr; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 1182 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1183 | enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1184 | l2_hdr_size = sizeof(struct ethernet_hdr_t); |
| 1185 | ethertype = |
| 1186 | (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1]; |
| 1187 | if (!IS_ETHERTYPE(ethertype)) { |
| 1188 | struct llc_snap_hdr_t *llc_hdr; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 1189 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1190 | llc_hdr = (struct llc_snap_hdr_t *) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1191 | (qdf_nbuf_data(msdu) + l2_hdr_size); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1192 | l2_hdr_size += sizeof(struct llc_snap_hdr_t); |
| 1193 | ethertype = |
| 1194 | (llc_hdr->ethertype[0] << 8) | llc_hdr-> |
| 1195 | ethertype[1]; |
| 1196 | } |
| 1197 | } else { |
| 1198 | struct llc_snap_hdr_t *llc_hdr; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 1199 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1200 | l2_hdr_size = sizeof(struct ieee80211_frame); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1201 | llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1202 | + l2_hdr_size); |
| 1203 | l2_hdr_size += sizeof(struct llc_snap_hdr_t); |
| 1204 | ethertype = |
| 1205 | (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1]; |
| 1206 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1207 | l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1208 | if (ETHERTYPE_IPV4 == ethertype) { |
| 1209 | return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7; |
| 1210 | } else if (ETHERTYPE_IPV6 == ethertype) { |
| 1211 | return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) & |
| 1212 | 0x7; |
| 1213 | } else { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 1214 | return QDF_NBUF_TX_EXT_TID_INVALID; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1215 | } |
| 1216 | } |
| 1217 | #endif |
| 1218 | |
| 1219 | static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id) |
| 1220 | { |
| 1221 | #ifdef QCA_COMPUTE_TX_DELAY_PER_TID |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1222 | struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1223 | uint8_t tid; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1224 | qdf_nbuf_t msdu = tx_desc->netbuf; |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 1225 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1226 | tid = qdf_nbuf_get_tid(msdu); |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 1227 | if (tid == QDF_NBUF_TX_EXT_TID_INVALID) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1228 | tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc); |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 1229 | if (tid == QDF_NBUF_TX_EXT_TID_INVALID) { |
Yun Park | f967715 | 2017-04-08 13:29:34 -0700 | [diff] [blame] | 1230 | /* |
| 1231 | * TID could not be determined |
| 1232 | * (this is not an IP frame?) |
| 1233 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1234 | return -EINVAL; |
| 1235 | } |
| 1236 | } |
| 1237 | return tid; |
| 1238 | #else |
| 1239 | return 0; |
| 1240 | #endif |
| 1241 | } |
| 1242 | |
| 1243 | static inline int |
| 1244 | ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks) |
| 1245 | { |
| 1246 | int bin; |
| 1247 | /* |
| 1248 | * For speed, multiply and shift to approximate a divide. This causes |
| 1249 | * a small error, but the approximation error should be much less |
| 1250 | * than the other uncertainties in the tx delay computation. |
| 1251 | */ |
| 1252 | bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >> |
| 1253 | pdev->tx_delay.hist_internal_bin_width_shift; |
| 1254 | if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS) |
| 1255 | bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1; |
| 1256 | |
| 1257 | return bin; |
| 1258 | } |
| 1259 | |
| 1260 | static void |
| 1261 | ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev, |
| 1262 | enum htt_tx_status status, |
| 1263 | uint16_t *desc_ids, int num_msdus) |
| 1264 | { |
| 1265 | int i, index, cat; |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 1266 | uint32_t now_ticks = qdf_system_ticks(); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1267 | uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks; |
| 1268 | uint32_t avg_time_ticks; |
| 1269 | struct ol_tx_delay_data *data; |
| 1270 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 1271 | qdf_assert(num_msdus > 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1272 | |
| 1273 | /* |
| 1274 | * keep static counters for total packet and lost packets |
| 1275 | * reset them in ol_tx_delay(), function used to fetch the stats |
| 1276 | */ |
| 1277 | |
| 1278 | cat = ol_tx_delay_category(pdev, desc_ids[0]); |
| 1279 | if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES) |
| 1280 | return; |
| 1281 | |
| 1282 | pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus; |
| 1283 | if (status != htt_tx_status_ok) { |
| 1284 | for (i = 0; i < num_msdus; i++) { |
| 1285 | cat = ol_tx_delay_category(pdev, desc_ids[i]); |
| 1286 | if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES) |
| 1287 | return; |
| 1288 | pdev->packet_loss_count[cat]++; |
| 1289 | } |
| 1290 | return; |
| 1291 | } |
| 1292 | |
| 1293 | /* since we may switch the ping-pong index, provide mutex w. readers */ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1294 | qdf_spin_lock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1295 | index = pdev->tx_delay.cats[cat].in_progress_idx; |
| 1296 | |
| 1297 | data = &pdev->tx_delay.cats[cat].copies[index]; |
| 1298 | |
| 1299 | if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) { |
| 1300 | tx_delay_transmit_ticks = |
| 1301 | now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks; |
| 1302 | /* |
| 1303 | * We'd like to account for the number of MSDUs that were |
| 1304 | * transmitted together, but we don't know this. All we know |
| 1305 | * is the number of MSDUs that were acked together. |
| 1306 | * Since the frame error rate is small, this is nearly the same |
| 1307 | * as the number of frames transmitted together. |
| 1308 | */ |
| 1309 | data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks; |
| 1310 | data->avgs.transmit_num += num_msdus; |
| 1311 | } |
| 1312 | pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks; |
| 1313 | |
| 1314 | for (i = 0; i < num_msdus; i++) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1315 | int bin; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1316 | uint16_t id = desc_ids[i]; |
| 1317 | struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1318 | |
| 1319 | tx_delay_queue_ticks = |
| 1320 | now_ticks - tx_desc->entry_timestamp_ticks; |
| 1321 | |
| 1322 | data->avgs.queue_sum_ticks += tx_delay_queue_ticks; |
| 1323 | data->avgs.queue_num++; |
| 1324 | bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks); |
| 1325 | data->hist_bins_queue[bin]++; |
| 1326 | } |
| 1327 | |
| 1328 | /* check if it's time to start a new average */ |
| 1329 | avg_time_ticks = |
| 1330 | now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks; |
| 1331 | if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) { |
| 1332 | pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks; |
| 1333 | index = 1 - index; |
| 1334 | pdev->tx_delay.cats[cat].in_progress_idx = index; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1335 | qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index], |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1336 | sizeof(pdev->tx_delay.cats[cat].copies[index])); |
| 1337 | } |
| 1338 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1339 | qdf_spin_unlock_bh(&pdev->tx_delay.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1340 | } |
| 1341 | |
| 1342 | #endif /* QCA_COMPUTE_TX_DELAY */ |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 1343 | |
| 1344 | /** |
| 1345 | * ol_register_packetdump_callback() - registers |
| 1346 | * tx data packet, tx mgmt. packet and rx data packet |
| 1347 | * dump callback handler. |
| 1348 | * |
| 1349 | * @ol_tx_packetdump_cb: tx packetdump cb |
| 1350 | * @ol_rx_packetdump_cb: rx packetdump cb |
| 1351 | * |
| 1352 | * This function is used to register tx data pkt, tx mgmt. |
| 1353 | * pkt and rx data pkt dump callback |
| 1354 | * |
| 1355 | * Return: None |
| 1356 | * |
| 1357 | */ |
| 1358 | void ol_register_packetdump_callback(tp_ol_packetdump_cb ol_tx_packetdump_cb, |
| 1359 | tp_ol_packetdump_cb ol_rx_packetdump_cb) |
| 1360 | { |
| 1361 | ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
| 1362 | |
| 1363 | if (!pdev) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 1364 | ol_txrx_err( |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 1365 | "%s: pdev is NULL", __func__); |
| 1366 | return; |
| 1367 | } |
| 1368 | |
| 1369 | pdev->ol_tx_packetdump_cb = ol_tx_packetdump_cb; |
| 1370 | pdev->ol_rx_packetdump_cb = ol_rx_packetdump_cb; |
| 1371 | } |
| 1372 | |
| 1373 | /** |
| 1374 | * ol_deregister_packetdump_callback() - deregidters |
| 1375 | * tx data packet, tx mgmt. packet and rx data packet |
| 1376 | * dump callback handler |
| 1377 | * |
| 1378 | * This function is used to deregidter tx data pkt., |
| 1379 | * tx mgmt. pkt and rx data pkt. dump callback |
| 1380 | * |
| 1381 | * Return: None |
| 1382 | * |
| 1383 | */ |
| 1384 | void ol_deregister_packetdump_callback(void) |
| 1385 | { |
| 1386 | ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
| 1387 | |
| 1388 | if (!pdev) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 1389 | ol_txrx_err( |
Himanshu Agarwal | f65bd4c | 2016-12-05 17:21:12 +0530 | [diff] [blame] | 1390 | "%s: pdev is NULL", __func__); |
| 1391 | return; |
| 1392 | } |
| 1393 | |
| 1394 | pdev->ol_tx_packetdump_cb = NULL; |
| 1395 | pdev->ol_rx_packetdump_cb = NULL; |
| 1396 | } |
Yu Wang | ceb357b | 2017-06-01 12:04:18 +0800 | [diff] [blame] | 1397 | |
| 1398 | #ifdef WLAN_FEATURE_TSF_PLUS |
| 1399 | void ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb) |
| 1400 | { |
| 1401 | ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
| 1402 | |
| 1403 | if (!pdev) { |
| 1404 | ol_txrx_err("%s: pdev is NULL", __func__); |
| 1405 | return; |
| 1406 | } |
| 1407 | pdev->ol_tx_timestamp_cb = ol_tx_timestamp_cb; |
| 1408 | } |
| 1409 | |
| 1410 | void ol_deregister_timestamp_callback(void) |
| 1411 | { |
| 1412 | ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
| 1413 | |
| 1414 | if (!pdev) { |
| 1415 | ol_txrx_err("%s: pdev is NULL", __func__); |
| 1416 | return; |
| 1417 | } |
| 1418 | pdev->ol_tx_timestamp_cb = NULL; |
| 1419 | } |
| 1420 | #endif |