Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Komal Seelam | 7fde14c | 2016-02-02 13:05:57 +0530 | [diff] [blame] | 2 | * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | /* OS abstraction libraries */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 29 | #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 30 | #include <qdf_atomic.h> /* qdf_atomic_read, etc. */ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 31 | #include <qdf_util.h> /* qdf_unlikely */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 32 | |
| 33 | /* APIs for other modules */ |
| 34 | #include <htt.h> /* HTT_TX_EXT_TID_MGMT */ |
| 35 | #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 36 | |
| 37 | /* internal header files relevant for all systems */ |
| 38 | #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 39 | #include <ol_tx_desc.h> /* ol_tx_desc */ |
| 40 | #include <ol_tx_send.h> /* ol_tx_send */ |
| 41 | #include <ol_txrx.h> |
| 42 | |
| 43 | /* internal header files relevant only for HL systems */ |
| 44 | #include <ol_tx_queue.h> /* ol_tx_enqueue */ |
| 45 | |
| 46 | /* internal header files relevant only for specific systems (Pronto) */ |
| 47 | #include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */ |
| 48 | #include <ol_tx.h> |
| 49 | |
| 50 | #ifdef WLAN_FEATURE_FASTPATH |
| 51 | #include <hif.h> /* HIF_DEVICE */ |
| 52 | #include <htc_api.h> /* Layering violation, but required for fast path */ |
| 53 | #include <htt_internal.h> |
| 54 | #include <htt_types.h> /* htc_endpoint */ |
| 55 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 56 | int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 57 | unsigned int num_msdus, unsigned int transfer_id); |
| 58 | #endif /* WLAN_FEATURE_FASTPATH */ |
| 59 | |
| 60 | /* |
| 61 | * The TXRX module doesn't accept tx frames unless the target has |
| 62 | * enough descriptors for them. |
| 63 | * For LL, the TXRX descriptor pool is sized to match the target's |
| 64 | * descriptor pool. Hence, if the descriptor allocation in TXRX |
| 65 | * succeeds, that guarantees that the target has room to accept |
| 66 | * the new tx frame. |
| 67 | */ |
| 68 | #define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \ |
| 69 | do { \ |
| 70 | struct ol_txrx_pdev_t *pdev = vdev->pdev; \ |
| 71 | (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \ |
| 72 | tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 73 | if (qdf_unlikely(!tx_desc)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 74 | TXRX_STATS_MSDU_LIST_INCR( \ |
| 75 | pdev, tx.dropped.host_reject, msdu); \ |
| 76 | return msdu; /* the list of unaccepted MSDUs */ \ |
| 77 | } \ |
| 78 | } while (0) |
| 79 | |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 80 | #if defined(FEATURE_TSO) |
| 81 | /** |
| 82 | * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO |
| 83 | * related information in the msdu_info meta data |
| 84 | * @vdev: virtual device handle |
| 85 | * @msdu: network buffer |
| 86 | * @msdu_info: meta data associated with the msdu |
| 87 | * |
| 88 | * Return: 0 - success, >0 - error |
| 89 | */ |
| 90 | static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 91 | qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info) |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 92 | { |
| 93 | msdu_info->tso_info.curr_seg = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 94 | if (qdf_nbuf_is_tso(msdu)) { |
| 95 | int num_seg = qdf_nbuf_get_tso_num_seg(msdu); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 96 | msdu_info->tso_info.tso_seg_list = NULL; |
| 97 | msdu_info->tso_info.num_segs = num_seg; |
| 98 | while (num_seg) { |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 99 | struct qdf_tso_seg_elem_t *tso_seg = |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 100 | ol_tso_alloc_segment(vdev->pdev); |
| 101 | if (tso_seg) { |
| 102 | tso_seg->next = |
| 103 | msdu_info->tso_info.tso_seg_list; |
| 104 | msdu_info->tso_info.tso_seg_list |
| 105 | = tso_seg; |
| 106 | num_seg--; |
| 107 | } else { |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 108 | struct qdf_tso_seg_elem_t *next_seg; |
| 109 | struct qdf_tso_seg_elem_t *free_seg = |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 110 | msdu_info->tso_info.tso_seg_list; |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 111 | qdf_print("TSO seg alloc failed!\n"); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 112 | while (free_seg) { |
| 113 | next_seg = free_seg->next; |
| 114 | ol_tso_free_segment(vdev->pdev, |
| 115 | free_seg); |
| 116 | free_seg = next_seg; |
| 117 | } |
| 118 | return 1; |
| 119 | } |
| 120 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 121 | qdf_nbuf_get_tso_info(vdev->pdev->osdev, |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 122 | msdu, &(msdu_info->tso_info)); |
| 123 | msdu_info->tso_info.curr_seg = |
| 124 | msdu_info->tso_info.tso_seg_list; |
| 125 | num_seg = msdu_info->tso_info.num_segs; |
| 126 | } else { |
| 127 | msdu_info->tso_info.is_tso = 0; |
| 128 | msdu_info->tso_info.num_segs = 1; |
| 129 | } |
| 130 | return 0; |
| 131 | } |
| 132 | #endif |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 133 | |
| 134 | /** |
Dhanashri Atre | 168d2b4 | 2016-02-22 14:43:06 -0800 | [diff] [blame^] | 135 | * ol_tx_data() - send data frame |
| 136 | * @vdev: virtual device handle |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 137 | * @skb: skb |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 138 | * |
| 139 | * Return: skb/NULL for success |
| 140 | */ |
Dhanashri Atre | 168d2b4 | 2016-02-22 14:43:06 -0800 | [diff] [blame^] | 141 | qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 142 | { |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 143 | void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); |
Dhanashri Atre | 168d2b4 | 2016-02-22 14:43:06 -0800 | [diff] [blame^] | 144 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 145 | qdf_nbuf_t ret; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 146 | QDF_STATUS status; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 147 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 148 | if (qdf_unlikely(!pdev)) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 149 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 150 | "%s:pdev is null", __func__); |
| 151 | return skb; |
| 152 | } |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 153 | if (qdf_unlikely(!qdf_ctx)) { |
Orhan K AKYILDIZ | c409461 | 2015-11-11 18:01:15 -0800 | [diff] [blame] | 154 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 155 | "%s:qdf_ctx is null", __func__); |
Orhan K AKYILDIZ | c409461 | 2015-11-11 18:01:15 -0800 | [diff] [blame] | 156 | return skb; |
| 157 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 158 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 159 | status = qdf_nbuf_map_single(qdf_ctx, skb, QDF_DMA_TO_DEVICE); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 160 | if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 161 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 162 | "%s: nbuf map failed", __func__); |
| 163 | return skb; |
| 164 | } |
| 165 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 166 | if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev)) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 167 | && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP)) |
| 168 | && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL)) |
| 169 | qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 170 | |
| 171 | /* Terminate the (single-element) list of tx frames */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 172 | qdf_nbuf_set_next(skb, NULL); |
Dhanashri Atre | 168d2b4 | 2016-02-22 14:43:06 -0800 | [diff] [blame^] | 173 | ret = OL_TX_LL(vdev, skb); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 174 | if (ret) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 175 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 176 | "%s: Failed to tx", __func__); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 177 | qdf_nbuf_unmap_single(qdf_ctx, ret, QDF_DMA_TO_DEVICE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 178 | return ret; |
| 179 | } |
| 180 | |
| 181 | return NULL; |
| 182 | } |
| 183 | |
| 184 | #ifdef IPA_OFFLOAD |
| 185 | /** |
| 186 | * ol_tx_send_ipa_data_frame() - send IPA data frame |
| 187 | * @vdev: vdev |
| 188 | * @skb: skb |
| 189 | * |
| 190 | * Return: skb/ NULL is for success |
| 191 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 192 | qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev, |
| 193 | qdf_nbuf_t skb) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 194 | { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 195 | ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 196 | qdf_nbuf_t ret; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 197 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 198 | if (qdf_unlikely(!pdev)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 199 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 200 | "%s: pdev is NULL", __func__); |
| 201 | return skb; |
| 202 | } |
| 203 | |
| 204 | if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev)) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 205 | && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP)) |
| 206 | && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL)) |
| 207 | qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 208 | |
| 209 | /* Terminate the (single-element) list of tx frames */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 210 | qdf_nbuf_set_next(skb, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 211 | ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb); |
| 212 | if (ret) { |
| 213 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 214 | "%s: Failed to tx", __func__); |
| 215 | return ret; |
| 216 | } |
| 217 | |
| 218 | return NULL; |
| 219 | } |
| 220 | #endif |
| 221 | |
| 222 | |
| 223 | #if defined(FEATURE_TSO) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 224 | qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 225 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 226 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 227 | struct ol_txrx_msdu_info_t msdu_info; |
| 228 | |
| 229 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 230 | msdu_info.htt.action.tx_comp_req = 0; |
| 231 | /* |
| 232 | * The msdu_list variable could be used instead of the msdu var, |
| 233 | * but just to clarify which operations are done on a single MSDU |
| 234 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 235 | * within the list. |
| 236 | */ |
| 237 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 238 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 239 | struct ol_tx_desc_t *tx_desc; |
| 240 | int segments = 1; |
| 241 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 242 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 243 | msdu_info.peer = NULL; |
| 244 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 245 | if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 246 | qdf_print("ol_tx_prepare_tso failed\n"); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 247 | TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, |
| 248 | tx.dropped.host_reject, msdu); |
| 249 | return msdu; |
| 250 | } |
| 251 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 252 | segments = msdu_info.tso_info.num_segs; |
| 253 | |
| 254 | /* |
| 255 | * The netbuf may get linked into a different list inside the |
| 256 | * ol_tx_send function, so store the next pointer before the |
| 257 | * tx_send call. |
| 258 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 259 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 260 | /* init the current segment to the 1st segment in the list */ |
| 261 | while (segments) { |
| 262 | |
| 263 | if (msdu_info.tso_info.curr_seg) |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 264 | QDF_NBUF_CB_PADDR(msdu) = |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 265 | msdu_info.tso_info.curr_seg-> |
| 266 | seg.tso_frags[0].paddr_low_32; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 267 | |
| 268 | segments--; |
| 269 | |
| 270 | /** |
| 271 | * if this is a jumbo nbuf, then increment the number |
| 272 | * of nbuf users for each additional segment of the msdu. |
| 273 | * This will ensure that the skb is freed only after |
| 274 | * receiving tx completion for all segments of an nbuf |
| 275 | */ |
| 276 | if (segments) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 277 | qdf_nbuf_inc_users(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 278 | |
| 279 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 280 | |
| 281 | /* |
| 282 | * If debug display is enabled, show the meta-data being |
| 283 | * downloaded to the target via the HTT tx descriptor. |
| 284 | */ |
| 285 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 286 | |
| 287 | ol_tx_send(vdev->pdev, tx_desc, msdu); |
| 288 | |
| 289 | if (msdu_info.tso_info.curr_seg) { |
| 290 | msdu_info.tso_info.curr_seg = |
| 291 | msdu_info.tso_info.curr_seg->next; |
| 292 | } |
| 293 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 294 | qdf_nbuf_reset_num_frags(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 295 | |
| 296 | if (msdu_info.tso_info.is_tso) { |
| 297 | TXRX_STATS_TSO_INC_SEG(vdev->pdev); |
| 298 | TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev); |
| 299 | } |
| 300 | } /* while segments */ |
| 301 | |
| 302 | msdu = next; |
| 303 | if (msdu_info.tso_info.is_tso) { |
| 304 | TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev); |
| 305 | TXRX_STATS_TSO_RESET_MSDU(vdev->pdev); |
| 306 | } |
| 307 | } /* while msdus */ |
| 308 | return NULL; /* all MSDUs were accepted */ |
| 309 | } |
| 310 | #else /* TSO */ |
| 311 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 312 | qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 313 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 314 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 315 | struct ol_txrx_msdu_info_t msdu_info; |
| 316 | |
| 317 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 318 | msdu_info.htt.action.tx_comp_req = 0; |
| 319 | msdu_info.tso_info.is_tso = 0; |
| 320 | /* |
| 321 | * The msdu_list variable could be used instead of the msdu var, |
| 322 | * but just to clarify which operations are done on a single MSDU |
| 323 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 324 | * within the list. |
| 325 | */ |
| 326 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 327 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 328 | struct ol_tx_desc_t *tx_desc; |
| 329 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 330 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 331 | msdu_info.peer = NULL; |
| 332 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 333 | |
| 334 | /* |
| 335 | * If debug display is enabled, show the meta-data being |
| 336 | * downloaded to the target via the HTT tx descriptor. |
| 337 | */ |
| 338 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 339 | /* |
| 340 | * The netbuf may get linked into a different list inside the |
| 341 | * ol_tx_send function, so store the next pointer before the |
| 342 | * tx_send call. |
| 343 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 344 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 345 | ol_tx_send(vdev->pdev, tx_desc, msdu); |
| 346 | msdu = next; |
| 347 | } |
| 348 | return NULL; /* all MSDUs were accepted */ |
| 349 | } |
| 350 | #endif /* TSO */ |
| 351 | |
| 352 | #ifdef WLAN_FEATURE_FASTPATH |
| 353 | /** |
| 354 | * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor |
| 355 | * |
| 356 | * Allocate and prepare Tx descriptor with msdu and fragment descritor |
| 357 | * inforamtion. |
| 358 | * |
| 359 | * @pdev: pointer to ol pdev handle |
| 360 | * @vdev: pointer to ol vdev handle |
| 361 | * @msdu: linked list of msdu packets |
| 362 | * @pkt_download_len: packet download length |
| 363 | * @ep_id: endpoint ID |
| 364 | * @msdu_info: Handle to msdu_info |
| 365 | * |
| 366 | * Return: Pointer to Tx descriptor |
| 367 | */ |
| 368 | static inline struct ol_tx_desc_t * |
| 369 | ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 370 | ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 371 | uint32_t pkt_download_len, uint32_t ep_id, |
| 372 | struct ol_txrx_msdu_info_t *msdu_info) |
| 373 | { |
| 374 | struct ol_tx_desc_t *tx_desc = NULL; |
| 375 | uint32_t *htt_tx_desc; |
| 376 | void *htc_hdr_vaddr; |
| 377 | u_int32_t num_frags, i; |
| 378 | |
| 379 | tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 380 | if (qdf_unlikely(!tx_desc)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 381 | return NULL; |
| 382 | |
| 383 | tx_desc->netbuf = msdu; |
| 384 | if (msdu_info->tso_info.is_tso) { |
| 385 | tx_desc->tso_desc = msdu_info->tso_info.curr_seg; |
| 386 | tx_desc->pkt_type = ol_tx_frm_tso; |
| 387 | TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu); |
| 388 | } else { |
| 389 | tx_desc->pkt_type = ol_tx_frm_std; |
| 390 | } |
| 391 | |
| 392 | htt_tx_desc = tx_desc->htt_tx_desc; |
| 393 | |
| 394 | /* Make sure frags num is set to 0 */ |
| 395 | /* |
| 396 | * Do this here rather than in hardstart, so |
| 397 | * that we can hopefully take only one cache-miss while |
| 398 | * accessing skb->cb. |
| 399 | */ |
| 400 | |
| 401 | /* HTT Header */ |
| 402 | /* TODO : Take care of multiple fragments */ |
| 403 | |
| 404 | /* TODO: Precompute and store paddr in ol_tx_desc_t */ |
| 405 | /* Virtual address of the HTT/HTC header, added by driver */ |
| 406 | htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN; |
| 407 | htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc, |
| 408 | tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu, |
| 409 | &msdu_info->htt, &msdu_info->tso_info, |
| 410 | NULL, vdev->opmode == wlan_op_mode_ocb); |
| 411 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 412 | num_frags = qdf_nbuf_get_num_frags(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 413 | /* num_frags are expected to be 2 max */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 414 | num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS) |
| 415 | ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 416 | : num_frags; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 417 | #if defined(HELIUMPLUS_PADDR64) |
| 418 | /* |
| 419 | * Use num_frags - 1, since 1 frag is used to store |
| 420 | * the HTT/HTC descriptor |
| 421 | * Refer to htt_tx_desc_init() |
| 422 | */ |
| 423 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc, |
| 424 | num_frags - 1); |
| 425 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 426 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 427 | num_frags-1); |
| 428 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 429 | if (msdu_info->tso_info.is_tso) { |
| 430 | htt_tx_desc_fill_tso_info(pdev->htt_pdev, |
| 431 | tx_desc->htt_frag_desc, &msdu_info->tso_info); |
| 432 | TXRX_STATS_TSO_SEG_UPDATE(pdev, |
| 433 | msdu_info->tso_info.curr_seg->seg); |
| 434 | } else { |
| 435 | for (i = 1; i < num_frags; i++) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 436 | qdf_size_t frag_len; |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 437 | qdf_dma_addr_t frag_paddr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 438 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 439 | frag_len = qdf_nbuf_get_frag_len(msdu, i); |
| 440 | frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 441 | #if defined(HELIUMPLUS_PADDR64) |
| 442 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, |
| 443 | i - 1, frag_paddr, frag_len); |
| 444 | #if defined(HELIUMPLUS_DEBUG) |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 445 | qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 446 | __func__, __LINE__, tx_desc->htt_frag_desc, |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 447 | i-1, frag_paddr, frag_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 448 | dump_pkt(netbuf, frag_paddr, 64); |
| 449 | #endif /* HELIUMPLUS_DEBUG */ |
| 450 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 451 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 452 | i - 1, frag_paddr, frag_len); |
| 453 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 454 | } |
| 455 | } |
| 456 | |
| 457 | /* |
| 458 | * Do we want to turn on word_stream bit-map here ? For linux, non-TSO |
| 459 | * this is not required. We still have to mark the swap bit correctly, |
| 460 | * when posting to the ring |
| 461 | */ |
| 462 | /* Check to make sure, data download length is correct */ |
| 463 | |
| 464 | /* |
| 465 | * TODO : Can we remove this check and always download a fixed length ? |
| 466 | * */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 467 | if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len)) |
| 468 | pkt_download_len = qdf_nbuf_len(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 469 | |
| 470 | /* Fill the HTC header information */ |
| 471 | /* |
| 472 | * Passing 0 as the seq_no field, we can probably get away |
| 473 | * with it for the time being, since this is not checked in f/w |
| 474 | */ |
| 475 | /* TODO : Prefill this, look at multi-fragment case */ |
| 476 | HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0); |
| 477 | |
| 478 | return tx_desc; |
| 479 | } |
| 480 | #if defined(FEATURE_TSO) |
| 481 | /** |
| 482 | * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE |
| 483 | * |
| 484 | * @vdev: handle to ol_txrx_vdev_t |
| 485 | * @msdu_list: msdu list to be sent out. |
| 486 | * |
| 487 | * Return: on success return NULL, pointer to nbuf when it fails to send. |
| 488 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 489 | qdf_nbuf_t |
| 490 | ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 491 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 492 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 493 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 494 | uint32_t pkt_download_len = |
| 495 | ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len; |
| 496 | uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev); |
| 497 | struct ol_txrx_msdu_info_t msdu_info; |
| 498 | |
| 499 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 500 | msdu_info.htt.action.tx_comp_req = 0; |
| 501 | /* |
| 502 | * The msdu_list variable could be used instead of the msdu var, |
| 503 | * but just to clarify which operations are done on a single MSDU |
| 504 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 505 | * within the list. |
| 506 | */ |
| 507 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 508 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 509 | struct ol_tx_desc_t *tx_desc; |
| 510 | int segments = 1; |
| 511 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 512 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 513 | msdu_info.peer = NULL; |
| 514 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 515 | if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 516 | qdf_print("ol_tx_prepare_tso failed\n"); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 517 | TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, |
| 518 | tx.dropped.host_reject, msdu); |
| 519 | return msdu; |
| 520 | } |
| 521 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 522 | segments = msdu_info.tso_info.num_segs; |
| 523 | |
| 524 | /* |
| 525 | * The netbuf may get linked into a different list |
| 526 | * inside the ce_send_fast function, so store the next |
| 527 | * pointer before the ce_send call. |
| 528 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 529 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 530 | /* init the current segment to the 1st segment in the list */ |
| 531 | while (segments) { |
| 532 | |
| 533 | if (msdu_info.tso_info.curr_seg) |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 534 | QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 535 | curr_seg->seg.tso_frags[0].paddr_low_32; |
| 536 | |
| 537 | segments--; |
| 538 | |
| 539 | /** |
| 540 | * if this is a jumbo nbuf, then increment the number |
| 541 | * of nbuf users for each additional segment of the msdu. |
| 542 | * This will ensure that the skb is freed only after |
| 543 | * receiving tx completion for all segments of an nbuf |
| 544 | */ |
| 545 | if (segments) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 546 | qdf_nbuf_inc_users(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 547 | |
| 548 | msdu_info.htt.info.frame_type = pdev->htt_pkt_type; |
| 549 | msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 550 | msdu_info.htt.action.cksum_offload = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 551 | qdf_nbuf_get_tx_cksum(msdu); |
| 552 | switch (qdf_nbuf_get_exemption_type(msdu)) { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 553 | case QDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 554 | case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 555 | /* We want to encrypt this frame */ |
| 556 | msdu_info.htt.action.do_encrypt = 1; |
| 557 | break; |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 558 | case QDF_NBUF_EXEMPT_ALWAYS: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 559 | /* We don't want to encrypt this frame */ |
| 560 | msdu_info.htt.action.do_encrypt = 0; |
| 561 | break; |
| 562 | default: |
| 563 | msdu_info.htt.action.do_encrypt = 1; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 564 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 565 | break; |
| 566 | } |
| 567 | |
| 568 | tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu, |
| 569 | pkt_download_len, ep_id, |
| 570 | &msdu_info); |
| 571 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 572 | if (qdf_likely(tx_desc)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 573 | /* |
| 574 | * If debug display is enabled, show the meta |
| 575 | * data being downloaded to the target via the |
| 576 | * HTT tx descriptor. |
| 577 | */ |
| 578 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 579 | if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, |
| 580 | 1, ep_id))) { |
| 581 | /* |
| 582 | * The packet could not be sent. |
| 583 | * Free the descriptor, return the |
| 584 | * packet to the caller. |
| 585 | */ |
| 586 | ol_tx_desc_free(pdev, tx_desc); |
| 587 | return msdu; |
| 588 | } |
| 589 | if (msdu_info.tso_info.curr_seg) { |
| 590 | msdu_info.tso_info.curr_seg = |
| 591 | msdu_info.tso_info.curr_seg->next; |
| 592 | } |
| 593 | |
| 594 | if (msdu_info.tso_info.is_tso) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 595 | qdf_nbuf_reset_num_frags(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 596 | TXRX_STATS_TSO_INC_SEG(vdev->pdev); |
| 597 | TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev); |
| 598 | } |
| 599 | } else { |
| 600 | TXRX_STATS_MSDU_LIST_INCR( |
| 601 | pdev, tx.dropped.host_reject, msdu); |
| 602 | /* the list of unaccepted MSDUs */ |
| 603 | return msdu; |
| 604 | } |
| 605 | } /* while segments */ |
| 606 | |
| 607 | msdu = next; |
| 608 | if (msdu_info.tso_info.is_tso) { |
| 609 | TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev); |
| 610 | TXRX_STATS_TSO_RESET_MSDU(vdev->pdev); |
| 611 | } |
| 612 | } /* while msdus */ |
| 613 | return NULL; /* all MSDUs were accepted */ |
| 614 | } |
| 615 | #else |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 616 | qdf_nbuf_t |
| 617 | ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 618 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 619 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 620 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 621 | uint32_t pkt_download_len = |
| 622 | ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len; |
| 623 | uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev); |
| 624 | struct ol_txrx_msdu_info_t msdu_info; |
| 625 | |
| 626 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 627 | msdu_info.htt.action.tx_comp_req = 0; |
| 628 | msdu_info.tso_info.is_tso = 0; |
| 629 | /* |
| 630 | * The msdu_list variable could be used instead of the msdu var, |
| 631 | * but just to clarify which operations are done on a single MSDU |
| 632 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 633 | * within the list. |
| 634 | */ |
| 635 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 636 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 637 | struct ol_tx_desc_t *tx_desc; |
| 638 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 639 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 640 | msdu_info.peer = NULL; |
| 641 | |
| 642 | msdu_info.htt.info.frame_type = pdev->htt_pkt_type; |
| 643 | msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 644 | msdu_info.htt.action.cksum_offload = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 645 | qdf_nbuf_get_tx_cksum(msdu); |
| 646 | switch (qdf_nbuf_get_exemption_type(msdu)) { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 647 | case QDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 648 | case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 649 | /* We want to encrypt this frame */ |
| 650 | msdu_info.htt.action.do_encrypt = 1; |
| 651 | break; |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 652 | case QDF_NBUF_EXEMPT_ALWAYS: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 653 | /* We don't want to encrypt this frame */ |
| 654 | msdu_info.htt.action.do_encrypt = 0; |
| 655 | break; |
| 656 | default: |
| 657 | msdu_info.htt.action.do_encrypt = 1; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 658 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 659 | break; |
| 660 | } |
| 661 | |
| 662 | tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu, |
| 663 | pkt_download_len, ep_id, |
| 664 | &msdu_info); |
| 665 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 666 | if (qdf_likely(tx_desc)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 667 | /* |
| 668 | * If debug display is enabled, show the meta-data being |
| 669 | * downloaded to the target via the HTT tx descriptor. |
| 670 | */ |
| 671 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 672 | /* |
| 673 | * The netbuf may get linked into a different list |
| 674 | * inside the ce_send_fast function, so store the next |
| 675 | * pointer before the ce_send call. |
| 676 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 677 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 678 | if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1, |
| 679 | ep_id))) { |
| 680 | /* The packet could not be sent */ |
| 681 | /* Free the descriptor, return the packet to the |
| 682 | * caller */ |
| 683 | ol_tx_desc_free(pdev, tx_desc); |
| 684 | return msdu; |
| 685 | } |
| 686 | msdu = next; |
| 687 | } else { |
| 688 | TXRX_STATS_MSDU_LIST_INCR( |
| 689 | pdev, tx.dropped.host_reject, msdu); |
| 690 | return msdu; /* the list of unaccepted MSDUs */ |
| 691 | } |
| 692 | } |
| 693 | |
| 694 | return NULL; /* all MSDUs were accepted */ |
| 695 | } |
| 696 | #endif /* FEATURE_TSO */ |
| 697 | #endif /* WLAN_FEATURE_FASTPATH */ |
| 698 | |
| 699 | #ifdef WLAN_FEATURE_FASTPATH |
| 700 | /** |
| 701 | * ol_tx_ll_wrapper() wrapper to ol_tx_ll |
| 702 | * |
| 703 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 704 | static inline qdf_nbuf_t |
| 705 | ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 706 | { |
Komal Seelam | 3d20286 | 2016-02-24 18:43:24 +0530 | [diff] [blame] | 707 | struct hif_opaque_softc *hif_device = |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 708 | (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 709 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 710 | if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device))) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 711 | msdu_list = ol_tx_ll_fast(vdev, msdu_list); |
| 712 | else |
| 713 | msdu_list = ol_tx_ll(vdev, msdu_list); |
| 714 | |
| 715 | return msdu_list; |
| 716 | } |
| 717 | #else |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 718 | static inline qdf_nbuf_t |
| 719 | ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 720 | { |
| 721 | return ol_tx_ll(vdev, msdu_list); |
| 722 | } |
| 723 | #endif /* WLAN_FEATURE_FASTPATH */ |
| 724 | |
| 725 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 726 | |
| 727 | #define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400 |
| 728 | #define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5 |
| 729 | static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev) |
| 730 | { |
| 731 | int max_to_accept; |
| 732 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 733 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 734 | if (vdev->ll_pause.paused_reason) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 735 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 736 | return; |
| 737 | } |
| 738 | |
| 739 | /* |
| 740 | * Send as much of the backlog as possible, but leave some margin |
| 741 | * of unallocated tx descriptors that can be used for new frames |
| 742 | * being transmitted by other vdevs. |
| 743 | * Ideally there would be a scheduler, which would not only leave |
| 744 | * some margin for new frames for other vdevs, but also would |
| 745 | * fairly apportion the tx descriptors between multiple vdevs that |
| 746 | * have backlogs in their pause queues. |
| 747 | * However, the fairness benefit of having a scheduler for frames |
| 748 | * from multiple vdev's pause queues is not sufficient to outweigh |
| 749 | * the extra complexity. |
| 750 | */ |
| 751 | max_to_accept = vdev->pdev->tx_desc.num_free - |
| 752 | OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN; |
| 753 | while (max_to_accept > 0 && vdev->ll_pause.txq.depth) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 754 | qdf_nbuf_t tx_msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 755 | max_to_accept--; |
| 756 | vdev->ll_pause.txq.depth--; |
| 757 | tx_msdu = vdev->ll_pause.txq.head; |
| 758 | if (tx_msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 759 | vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 760 | if (NULL == vdev->ll_pause.txq.head) |
| 761 | vdev->ll_pause.txq.tail = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 762 | qdf_nbuf_set_next(tx_msdu, NULL); |
| 763 | QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu, |
| 764 | QDF_NBUF_TX_PKT_TXRX_DEQUEUE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 765 | tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu); |
| 766 | /* |
| 767 | * It is unexpected that ol_tx_ll would reject the frame |
| 768 | * since we checked that there's room for it, though |
| 769 | * there's an infinitesimal possibility that between the |
| 770 | * time we checked the room available and now, a |
| 771 | * concurrent batch of tx frames used up all the room. |
| 772 | * For simplicity, just drop the frame. |
| 773 | */ |
| 774 | if (tx_msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 775 | qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 776 | QDF_DMA_TO_DEVICE); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 777 | qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 778 | } |
| 779 | } |
| 780 | } |
| 781 | if (vdev->ll_pause.txq.depth) { |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 782 | qdf_timer_stop(&vdev->ll_pause.timer); |
| 783 | qdf_timer_start(&vdev->ll_pause.timer, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 784 | OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS); |
| 785 | vdev->ll_pause.is_q_timer_on = true; |
| 786 | if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth) |
| 787 | vdev->ll_pause.q_overflow_cnt++; |
| 788 | } |
| 789 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 790 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 791 | } |
| 792 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 793 | static qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 794 | ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 795 | qdf_nbuf_t msdu_list, uint8_t start_timer) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 796 | { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 797 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 798 | while (msdu_list && |
| 799 | vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 800 | qdf_nbuf_t next = qdf_nbuf_next(msdu_list); |
| 801 | QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list, |
| 802 | QDF_NBUF_TX_PKT_TXRX_ENQUEUE); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 803 | DPTRACE(qdf_dp_trace(msdu_list, |
| 804 | QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 805 | (uint8_t *)(qdf_nbuf_data(msdu_list)), |
| 806 | sizeof(qdf_nbuf_data(msdu_list)))); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 807 | |
| 808 | vdev->ll_pause.txq.depth++; |
| 809 | if (!vdev->ll_pause.txq.head) { |
| 810 | vdev->ll_pause.txq.head = msdu_list; |
| 811 | vdev->ll_pause.txq.tail = msdu_list; |
| 812 | } else { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 813 | qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 814 | } |
| 815 | vdev->ll_pause.txq.tail = msdu_list; |
| 816 | |
| 817 | msdu_list = next; |
| 818 | } |
| 819 | if (vdev->ll_pause.txq.tail) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 820 | qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 821 | |
| 822 | if (start_timer) { |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 823 | qdf_timer_stop(&vdev->ll_pause.timer); |
| 824 | qdf_timer_start(&vdev->ll_pause.timer, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 825 | OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS); |
| 826 | vdev->ll_pause.is_q_timer_on = true; |
| 827 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 828 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 829 | |
| 830 | return msdu_list; |
| 831 | } |
| 832 | |
| 833 | /* |
| 834 | * Store up the tx frame in the vdev's tx queue if the vdev is paused. |
| 835 | * If there are too many frames in the tx queue, reject it. |
| 836 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 837 | qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 838 | { |
| 839 | uint16_t eth_type; |
| 840 | uint32_t paused_reason; |
| 841 | |
| 842 | if (msdu_list == NULL) |
| 843 | return NULL; |
| 844 | |
| 845 | paused_reason = vdev->ll_pause.paused_reason; |
| 846 | if (paused_reason) { |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 847 | if (qdf_unlikely((paused_reason & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 848 | OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) == |
| 849 | paused_reason)) { |
| 850 | eth_type = (((struct ethernet_hdr_t *) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 851 | qdf_nbuf_data(msdu_list))-> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 852 | ethertype[0] << 8) | |
| 853 | (((struct ethernet_hdr_t *) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 854 | qdf_nbuf_data(msdu_list))->ethertype[1]); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 855 | if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) { |
| 856 | msdu_list = ol_tx_ll_wrapper(vdev, msdu_list); |
| 857 | return msdu_list; |
| 858 | } |
| 859 | } |
| 860 | msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1); |
| 861 | } else { |
| 862 | if (vdev->ll_pause.txq.depth > 0 || |
| 863 | vdev->pdev->tx_throttle.current_throttle_level != |
| 864 | THROTTLE_LEVEL_0) { |
| 865 | /* not paused, but there is a backlog of frms |
| 866 | from a prior pause or throttle off phase */ |
| 867 | msdu_list = ol_tx_vdev_pause_queue_append( |
| 868 | vdev, msdu_list, 0); |
| 869 | /* if throttle is disabled or phase is "on", |
| 870 | send the frame */ |
| 871 | if (vdev->pdev->tx_throttle.current_throttle_level == |
| 872 | THROTTLE_LEVEL_0 || |
| 873 | vdev->pdev->tx_throttle.current_throttle_phase == |
| 874 | THROTTLE_PHASE_ON) { |
| 875 | /* send as many frames as possible |
| 876 | from the vdevs backlog */ |
| 877 | ol_tx_vdev_ll_pause_queue_send_base(vdev); |
| 878 | } |
| 879 | } else { |
| 880 | /* not paused, no throttle and no backlog - |
| 881 | send the new frames */ |
| 882 | msdu_list = ol_tx_ll_wrapper(vdev, msdu_list); |
| 883 | } |
| 884 | } |
| 885 | return msdu_list; |
| 886 | } |
| 887 | |
| 888 | /* |
| 889 | * Run through the transmit queues for all the vdevs and |
| 890 | * send the pending frames |
| 891 | */ |
| 892 | void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev) |
| 893 | { |
| 894 | int max_to_send; /* tracks how many frames have been sent */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 895 | qdf_nbuf_t tx_msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 896 | struct ol_txrx_vdev_t *vdev = NULL; |
| 897 | uint8_t more; |
| 898 | |
| 899 | if (NULL == pdev) |
| 900 | return; |
| 901 | |
| 902 | if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) |
| 903 | return; |
| 904 | |
| 905 | /* ensure that we send no more than tx_threshold frames at once */ |
| 906 | max_to_send = pdev->tx_throttle.tx_threshold; |
| 907 | |
| 908 | /* round robin through the vdev queues for the given pdev */ |
| 909 | |
| 910 | /* Potential improvement: download several frames from the same vdev |
| 911 | at a time, since it is more likely that those frames could be |
| 912 | aggregated together, remember which vdev was serviced last, |
| 913 | so the next call this function can resume the round-robin |
| 914 | traversing where the current invocation left off */ |
| 915 | do { |
| 916 | more = 0; |
| 917 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
| 918 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 919 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 920 | if (vdev->ll_pause.txq.depth) { |
| 921 | if (vdev->ll_pause.paused_reason) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 922 | qdf_spin_unlock_bh(&vdev->ll_pause. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 923 | mutex); |
| 924 | continue; |
| 925 | } |
| 926 | |
| 927 | tx_msdu = vdev->ll_pause.txq.head; |
| 928 | if (NULL == tx_msdu) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 929 | qdf_spin_unlock_bh(&vdev->ll_pause. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 930 | mutex); |
| 931 | continue; |
| 932 | } |
| 933 | |
| 934 | max_to_send--; |
| 935 | vdev->ll_pause.txq.depth--; |
| 936 | |
| 937 | vdev->ll_pause.txq.head = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 938 | qdf_nbuf_next(tx_msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 939 | |
| 940 | if (NULL == vdev->ll_pause.txq.head) |
| 941 | vdev->ll_pause.txq.tail = NULL; |
| 942 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 943 | qdf_nbuf_set_next(tx_msdu, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 944 | tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu); |
| 945 | /* |
| 946 | * It is unexpected that ol_tx_ll would reject |
| 947 | * the frame, since we checked that there's |
| 948 | * room for it, though there's an infinitesimal |
| 949 | * possibility that between the time we checked |
| 950 | * the room available and now, a concurrent |
| 951 | * batch of tx frames used up all the room. |
| 952 | * For simplicity, just drop the frame. |
| 953 | */ |
| 954 | if (tx_msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 955 | qdf_nbuf_unmap(pdev->osdev, tx_msdu, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 956 | QDF_DMA_TO_DEVICE); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 957 | qdf_nbuf_tx_free(tx_msdu, |
| 958 | QDF_NBUF_PKT_ERROR); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 959 | } |
| 960 | } |
| 961 | /*check if there are more msdus to transmit */ |
| 962 | if (vdev->ll_pause.txq.depth) |
| 963 | more = 1; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 964 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 965 | } |
| 966 | } while (more && max_to_send); |
| 967 | |
| 968 | vdev = NULL; |
| 969 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 970 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 971 | if (vdev->ll_pause.txq.depth) { |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 972 | qdf_timer_stop(&pdev->tx_throttle.tx_timer); |
| 973 | qdf_timer_start( |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 974 | &pdev->tx_throttle.tx_timer, |
| 975 | OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 976 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 977 | return; |
| 978 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 979 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 980 | } |
| 981 | } |
| 982 | |
| 983 | void ol_tx_vdev_ll_pause_queue_send(void *context) |
| 984 | { |
| 985 | struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context; |
| 986 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 987 | |
| 988 | if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 && |
| 989 | pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) |
| 990 | return; |
| 991 | ol_tx_vdev_ll_pause_queue_send_base(vdev); |
| 992 | } |
| 993 | #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ |
| 994 | |
| 995 | static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec) |
| 996 | { |
| 997 | return |
| 998 | tx_spec & |
| 999 | (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt); |
| 1000 | } |
| 1001 | |
| 1002 | static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec) |
| 1003 | { |
| 1004 | uint8_t sub_type = 0x1; /* 802.11 MAC header present */ |
| 1005 | |
| 1006 | if (tx_spec & ol_tx_spec_no_aggr) |
| 1007 | sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S; |
| 1008 | if (tx_spec & ol_tx_spec_no_encrypt) |
| 1009 | sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S; |
| 1010 | if (tx_spec & ol_tx_spec_nwifi_no_encrypt) |
| 1011 | sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S; |
| 1012 | return sub_type; |
| 1013 | } |
| 1014 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1015 | qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1016 | ol_tx_non_std_ll(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1017 | enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1018 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1019 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1020 | htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev; |
| 1021 | struct ol_txrx_msdu_info_t msdu_info; |
| 1022 | |
| 1023 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 1024 | msdu_info.htt.action.tx_comp_req = 0; |
| 1025 | |
| 1026 | /* |
| 1027 | * The msdu_list variable could be used instead of the msdu var, |
| 1028 | * but just to clarify which operations are done on a single MSDU |
| 1029 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 1030 | * within the list. |
| 1031 | */ |
| 1032 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1033 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1034 | struct ol_tx_desc_t *tx_desc; |
| 1035 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1036 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1037 | msdu_info.peer = NULL; |
| 1038 | msdu_info.tso_info.is_tso = 0; |
| 1039 | |
| 1040 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 1041 | |
| 1042 | /* |
| 1043 | * The netbuf may get linked into a different list inside the |
| 1044 | * ol_tx_send function, so store the next pointer before the |
| 1045 | * tx_send call. |
| 1046 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1047 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1048 | |
| 1049 | if (tx_spec != ol_tx_spec_std) { |
| 1050 | if (tx_spec & ol_tx_spec_no_free) { |
| 1051 | tx_desc->pkt_type = ol_tx_frm_no_free; |
| 1052 | } else if (tx_spec & ol_tx_spec_tso) { |
| 1053 | tx_desc->pkt_type = ol_tx_frm_tso; |
| 1054 | } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) { |
| 1055 | uint8_t sub_type = |
| 1056 | ol_txrx_tx_raw_subtype(tx_spec); |
| 1057 | htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc, |
| 1058 | htt_pkt_type_native_wifi, |
| 1059 | sub_type); |
| 1060 | } else if (ol_txrx_tx_is_raw(tx_spec)) { |
| 1061 | /* different types of raw frames */ |
| 1062 | uint8_t sub_type = |
| 1063 | ol_txrx_tx_raw_subtype(tx_spec); |
| 1064 | htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc, |
| 1065 | htt_pkt_type_raw, sub_type); |
| 1066 | } |
| 1067 | } |
| 1068 | /* |
| 1069 | * If debug display is enabled, show the meta-data being |
| 1070 | * downloaded to the target via the HTT tx descriptor. |
| 1071 | */ |
| 1072 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 1073 | ol_tx_send(vdev->pdev, tx_desc, msdu); |
| 1074 | msdu = next; |
| 1075 | } |
| 1076 | return NULL; /* all MSDUs were accepted */ |
| 1077 | } |
| 1078 | |
| 1079 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 1080 | #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \ |
| 1081 | do { \ |
| 1082 | if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 1083 | qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1084 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \ |
| 1085 | if (tx_msdu_info.peer) { \ |
| 1086 | /* remove the peer reference added above */ \ |
| 1087 | ol_txrx_peer_unref_delete(tx_msdu_info.peer); \ |
| 1088 | } \ |
| 1089 | goto MSDU_LOOP_BOTTOM; \ |
| 1090 | } \ |
| 1091 | } while (0) |
| 1092 | #else |
| 1093 | #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */ |
| 1094 | #endif |
| 1095 | |
| 1096 | /* tx filtering is handled within the target FW */ |
| 1097 | #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */ |
| 1098 | |
| 1099 | /** |
| 1100 | * parse_ocb_tx_header() - Function to check for OCB |
| 1101 | * TX control header on a packet and extract it if present |
| 1102 | * |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1103 | * @msdu: Pointer to OS packet (qdf_nbuf_t) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1104 | */ |
| 1105 | #define OCB_HEADER_VERSION 1 |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1106 | bool parse_ocb_tx_header(qdf_nbuf_t msdu, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1107 | struct ocb_tx_ctrl_hdr_t *tx_ctrl) |
| 1108 | { |
| 1109 | struct ether_header *eth_hdr_p; |
| 1110 | struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr; |
| 1111 | |
| 1112 | /* Check if TX control header is present */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1113 | eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1114 | if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1115 | /* TX control header is not present. Nothing to do.. */ |
| 1116 | return true; |
| 1117 | |
| 1118 | /* Remove the ethernet header */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1119 | qdf_nbuf_pull_head(msdu, sizeof(struct ether_header)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1120 | |
| 1121 | /* Parse the TX control header */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1122 | tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1123 | |
| 1124 | if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) { |
| 1125 | if (tx_ctrl) |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1126 | qdf_mem_copy(tx_ctrl, tx_ctrl_hdr, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1127 | sizeof(*tx_ctrl_hdr)); |
| 1128 | } else { |
| 1129 | /* The TX control header is invalid. */ |
| 1130 | return false; |
| 1131 | } |
| 1132 | |
| 1133 | /* Remove the TX control header */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1134 | qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1135 | return true; |
| 1136 | } |
| 1137 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1138 | qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1139 | ol_tx_non_std(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1140 | enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1141 | { |
| 1142 | return ol_tx_non_std_ll(vdev, tx_spec, msdu_list); |
| 1143 | } |
| 1144 | |
| 1145 | void |
| 1146 | ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev, |
| 1147 | ol_txrx_data_tx_cb callback, void *ctxt) |
| 1148 | { |
| 1149 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1150 | pdev->tx_data_callback.func = callback; |
| 1151 | pdev->tx_data_callback.ctxt = ctxt; |
| 1152 | } |
| 1153 | |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 1154 | /** |
| 1155 | * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery |
| 1156 | * notifications for management frames. |
| 1157 | * |
| 1158 | * @pdev - the data physical device object |
| 1159 | * @type - the type of mgmt frame the callback is used for |
| 1160 | * @download_cb - the callback for notification of delivery to the target |
| 1161 | * @ota_ack_cb - the callback for notification of delivery to the peer |
| 1162 | * @ctxt - context to use with the callback |
| 1163 | * |
| 1164 | * When the txrx SW receives notifications from the target that a tx frame |
| 1165 | * has been delivered to its recipient, it will check if the tx frame |
| 1166 | * is a management frame. If so, the txrx SW will check the management |
| 1167 | * frame type specified when the frame was submitted for transmission. |
| 1168 | * If there is a callback function registered for the type of managment |
| 1169 | * frame in question, the txrx code will invoke the callback to inform |
| 1170 | * the management + control SW that the mgmt frame was delivered. |
| 1171 | * This function is used by the control SW to store a callback pointer |
| 1172 | * for a given type of management frame. |
| 1173 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1174 | void |
| 1175 | ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev, |
| 1176 | uint8_t type, |
| 1177 | ol_txrx_mgmt_tx_cb download_cb, |
| 1178 | ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) |
| 1179 | { |
| 1180 | TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES); |
| 1181 | pdev->tx_mgmt.callbacks[type].download_cb = download_cb; |
| 1182 | pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb; |
| 1183 | pdev->tx_mgmt.callbacks[type].ctxt = ctxt; |
| 1184 | } |
| 1185 | |
| 1186 | #if defined(HELIUMPLUS_PADDR64) |
| 1187 | void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc) |
| 1188 | { |
| 1189 | uint32_t *frag_ptr_i_p; |
| 1190 | int i; |
| 1191 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1192 | qdf_print("OL TX Descriptor 0x%p msdu_id %d\n", |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1193 | tx_desc, tx_desc->id); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1194 | qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1195 | tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1196 | qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)", |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 1197 | __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1198 | |
| 1199 | /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc |
| 1200 | is already de-referrable (=> in virtual address space) */ |
| 1201 | frag_ptr_i_p = tx_desc->htt_frag_desc; |
| 1202 | |
| 1203 | /* Dump 6 words of TSO flags */ |
| 1204 | print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ", |
| 1205 | DUMP_PREFIX_NONE, 8, 4, |
| 1206 | frag_ptr_i_p, 24, true); |
| 1207 | |
| 1208 | frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */ |
| 1209 | |
| 1210 | i = 0; |
| 1211 | while (*frag_ptr_i_p) { |
| 1212 | print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ", |
| 1213 | DUMP_PREFIX_NONE, 8, 4, |
| 1214 | frag_ptr_i_p, 8, true); |
| 1215 | i++; |
| 1216 | if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */ |
| 1217 | break; |
| 1218 | else /* jump to next pointer - skip length */ |
| 1219 | frag_ptr_i_p += 2; |
| 1220 | } |
| 1221 | return; |
| 1222 | } |
| 1223 | #endif /* HELIUMPLUS_PADDR64 */ |
| 1224 | |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 1225 | /** |
| 1226 | * ol_txrx_mgmt_send_ext() - Transmit a management frame |
| 1227 | * |
| 1228 | * @vdev - virtual device transmitting the frame |
| 1229 | * @tx_mgmt_frm - management frame to transmit |
| 1230 | * @type - the type of managment frame (determines what callback to use) |
| 1231 | * @use_6mbps - specify whether management frame to transmit should |
| 1232 | * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P) |
| 1233 | * @chanfreq - channel to transmit the frame on |
| 1234 | * |
| 1235 | * Send the specified management frame from the specified virtual device. |
| 1236 | * The type is used for determining whether to invoke a callback to inform |
| 1237 | * the sender that the tx mgmt frame was delivered, and if so, which |
| 1238 | * callback to use. |
| 1239 | * |
| 1240 | * Return: 0 - the frame is accepted for transmission |
| 1241 | * 1 - the frame was not accepted |
| 1242 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1243 | int |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 1244 | ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1245 | qdf_nbuf_t tx_mgmt_frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1246 | uint8_t type, uint8_t use_6mbps, uint16_t chanfreq) |
| 1247 | { |
| 1248 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1249 | struct ol_tx_desc_t *tx_desc; |
| 1250 | struct ol_txrx_msdu_info_t tx_msdu_info; |
| 1251 | |
| 1252 | tx_msdu_info.tso_info.is_tso = 0; |
| 1253 | |
| 1254 | tx_msdu_info.htt.action.use_6mbps = use_6mbps; |
| 1255 | tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT; |
| 1256 | tx_msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 1257 | tx_msdu_info.htt.action.do_tx_complete = |
| 1258 | pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0; |
| 1259 | |
| 1260 | /* |
| 1261 | * FIX THIS: l2_hdr_type should only specify L2 header type |
| 1262 | * The Peregrine/Rome HTT layer provides the FW with a "pkt type" |
| 1263 | * that is a combination of L2 header type and 802.11 frame type. |
| 1264 | * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt". |
| 1265 | * But if the 802.11 frame type is "data", then the HTT pkt type is |
| 1266 | * the L2 header type (more or less): 802.3 vs. Native WiFi |
| 1267 | * (basic 802.11). |
| 1268 | * (Or the header type can be "raw", which is any version of the 802.11 |
| 1269 | * header, and also implies that some of the offloaded tx data |
| 1270 | * processing steps may not apply.) |
| 1271 | * For efficiency, the Peregrine/Rome HTT uses the msdu_info's |
| 1272 | * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW |
| 1273 | * needs to overload the l2_hdr_type to indicate whether the frame is |
| 1274 | * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header. |
| 1275 | * To fix this, the msdu_info's l2_hdr_type should be left specifying |
| 1276 | * just the L2 header type. For mgmt frames, there should be a |
| 1277 | * separate function to patch the HTT pkt type to store a "mgmt" value |
| 1278 | * rather than the L2 header type. Then the HTT pkt type can be |
| 1279 | * programmed efficiently for data frames, and the msdu_info's |
| 1280 | * l2_hdr_type field won't be confusingly overloaded to hold the 802.11 |
| 1281 | * frame type rather than the L2 header type. |
| 1282 | */ |
| 1283 | /* |
| 1284 | * FIX THIS: remove duplication of htt_frm_type_mgmt and |
| 1285 | * htt_pkt_type_mgmt |
| 1286 | * The htt module expects a "enum htt_pkt_type" value. |
| 1287 | * The htt_dxe module expects a "enum htt_frm_type" value. |
| 1288 | * This needs to be cleaned up, so both versions of htt use a |
| 1289 | * consistent method of specifying the frame type. |
| 1290 | */ |
| 1291 | #ifdef QCA_SUPPORT_INTEGRATED_SOC |
| 1292 | /* tx mgmt frames always come with a 802.11 header */ |
| 1293 | tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi; |
| 1294 | tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt; |
| 1295 | #else |
| 1296 | tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt; |
| 1297 | tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt; |
| 1298 | #endif |
| 1299 | |
| 1300 | tx_msdu_info.peer = NULL; |
| 1301 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1302 | qdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, QDF_DMA_TO_DEVICE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1303 | /* For LL tx_comp_req is not used so initialized to 0 */ |
| 1304 | tx_msdu_info.htt.action.tx_comp_req = 0; |
| 1305 | tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info); |
| 1306 | /* FIX THIS - |
| 1307 | * The FW currently has trouble using the host's fragments table |
| 1308 | * for management frames. Until this is fixed, rather than |
| 1309 | * specifying the fragment table to the FW, specify just the |
| 1310 | * address of the initial fragment. |
| 1311 | */ |
| 1312 | #if defined(HELIUMPLUS_PADDR64) |
| 1313 | /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll", |
| 1314 | tx_desc); */ |
| 1315 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 1316 | if (tx_desc) { |
| 1317 | /* |
| 1318 | * Following the call to ol_tx_desc_ll, frag 0 is the |
| 1319 | * HTT tx HW descriptor, and the frame payload is in |
| 1320 | * frag 1. |
| 1321 | */ |
| 1322 | htt_tx_desc_frags_table_set( |
| 1323 | pdev->htt_pdev, |
| 1324 | tx_desc->htt_tx_desc, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1325 | qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1326 | 0, 0); |
| 1327 | #if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG) |
| 1328 | dump_frag_desc( |
| 1329 | "after htt_tx_desc_frags_table_set", |
| 1330 | tx_desc); |
| 1331 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 1332 | } |
| 1333 | if (!tx_desc) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1334 | qdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1335 | QDF_DMA_TO_DEVICE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1336 | return -EINVAL; /* can't accept the tx mgmt frame */ |
| 1337 | } |
| 1338 | TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm); |
| 1339 | TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES); |
| 1340 | tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE; |
| 1341 | |
| 1342 | htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1343 | QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) = |
| 1344 | QDF_NBUF_TX_PKT_MGMT_TRACK; |
| 1345 | ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1346 | htt_pkt_type_mgmt); |
| 1347 | |
| 1348 | return 0; /* accepted the tx mgmt frame */ |
| 1349 | } |
| 1350 | |
| 1351 | void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt) |
| 1352 | { |
| 1353 | htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt); |
| 1354 | } |
| 1355 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1356 | qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev, |
| 1357 | qdf_nbuf_t msdu, uint16_t peer_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1358 | { |
| 1359 | struct ol_tx_desc_t *tx_desc; |
| 1360 | struct ol_txrx_msdu_info_t msdu_info; |
| 1361 | |
| 1362 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 1363 | msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID; |
| 1364 | msdu_info.peer = NULL; |
| 1365 | msdu_info.htt.action.tx_comp_req = 0; |
| 1366 | msdu_info.tso_info.is_tso = 0; |
| 1367 | |
| 1368 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 1369 | HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true); |
| 1370 | |
| 1371 | htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id); |
| 1372 | |
| 1373 | ol_tx_send(vdev->pdev, tx_desc, msdu); |
| 1374 | |
| 1375 | return NULL; |
| 1376 | } |
| 1377 | |
| 1378 | #if defined(FEATURE_TSO) |
| 1379 | void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg) |
| 1380 | { |
| 1381 | int i; |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1382 | struct qdf_tso_seg_elem_t *c_element; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1383 | |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1384 | c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1385 | pdev->tso_seg_pool.freelist = c_element; |
| 1386 | for (i = 0; i < (num_seg - 1); i++) { |
| 1387 | c_element->next = |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1388 | qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1389 | c_element = c_element->next; |
| 1390 | c_element->next = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1391 | } |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1392 | pdev->tso_seg_pool.pool_size = num_seg; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1393 | qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1394 | } |
| 1395 | |
| 1396 | void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev) |
| 1397 | { |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1398 | int i; |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1399 | struct qdf_tso_seg_elem_t *c_element; |
| 1400 | struct qdf_tso_seg_elem_t *temp; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1401 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1402 | qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1403 | c_element = pdev->tso_seg_pool.freelist; |
| 1404 | for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) { |
| 1405 | temp = c_element->next; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1406 | qdf_mem_free(c_element); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1407 | c_element = temp; |
| 1408 | if (!c_element) |
| 1409 | break; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1410 | } |
| 1411 | |
| 1412 | pdev->tso_seg_pool.freelist = NULL; |
| 1413 | pdev->tso_seg_pool.num_free = 0; |
| 1414 | pdev->tso_seg_pool.pool_size = 0; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1415 | qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex); |
| 1416 | qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1417 | } |
| 1418 | #endif /* FEATURE_TSO */ |