Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Komal Seelam | 7fde14c | 2016-02-02 13:05:57 +0530 | [diff] [blame] | 2 | * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | /* OS abstraction libraries */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 29 | #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 30 | #include <qdf_atomic.h> /* qdf_atomic_read, etc. */ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 31 | #include <qdf_util.h> /* qdf_unlikely */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 32 | |
| 33 | /* APIs for other modules */ |
| 34 | #include <htt.h> /* HTT_TX_EXT_TID_MGMT */ |
| 35 | #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 36 | |
| 37 | /* internal header files relevant for all systems */ |
| 38 | #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 39 | #include <ol_tx_desc.h> /* ol_tx_desc */ |
| 40 | #include <ol_tx_send.h> /* ol_tx_send */ |
| 41 | #include <ol_txrx.h> |
| 42 | |
| 43 | /* internal header files relevant only for HL systems */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 44 | #include <ol_tx_classify.h> /* ol_tx_classify, ol_tx_classify_mgmt */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 45 | #include <ol_tx_queue.h> /* ol_tx_enqueue */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 46 | #include <ol_tx_sched.h> /* ol_tx_sched */ |
| 47 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 48 | |
| 49 | /* internal header files relevant only for specific systems (Pronto) */ |
| 50 | #include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */ |
| 51 | #include <ol_tx.h> |
| 52 | |
| 53 | #ifdef WLAN_FEATURE_FASTPATH |
| 54 | #include <hif.h> /* HIF_DEVICE */ |
| 55 | #include <htc_api.h> /* Layering violation, but required for fast path */ |
| 56 | #include <htt_internal.h> |
| 57 | #include <htt_types.h> /* htc_endpoint */ |
Manjunathappa Prakash | 3454fd6 | 2016-04-01 08:52:06 -0700 | [diff] [blame] | 58 | #include <cdp_txrx_peer_ops.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 59 | |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 60 | int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, |
| 61 | unsigned int transfer_id, uint32_t download_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 62 | #endif /* WLAN_FEATURE_FASTPATH */ |
| 63 | |
| 64 | /* |
| 65 | * The TXRX module doesn't accept tx frames unless the target has |
| 66 | * enough descriptors for them. |
| 67 | * For LL, the TXRX descriptor pool is sized to match the target's |
| 68 | * descriptor pool. Hence, if the descriptor allocation in TXRX |
| 69 | * succeeds, that guarantees that the target has room to accept |
| 70 | * the new tx frame. |
| 71 | */ |
| 72 | #define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \ |
| 73 | do { \ |
| 74 | struct ol_txrx_pdev_t *pdev = vdev->pdev; \ |
| 75 | (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \ |
| 76 | tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 77 | if (qdf_unlikely(!tx_desc)) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 78 | TXRX_STATS_MSDU_LIST_INCR( \ |
| 79 | pdev, tx.dropped.host_reject, msdu); \ |
| 80 | return msdu; /* the list of unaccepted MSDUs */ \ |
| 81 | } \ |
| 82 | } while (0) |
| 83 | |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 84 | #if defined(FEATURE_TSO) |
| 85 | /** |
| 86 | * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO |
| 87 | * related information in the msdu_info meta data |
| 88 | * @vdev: virtual device handle |
| 89 | * @msdu: network buffer |
| 90 | * @msdu_info: meta data associated with the msdu |
| 91 | * |
| 92 | * Return: 0 - success, >0 - error |
| 93 | */ |
| 94 | static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 95 | qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info) |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 96 | { |
| 97 | msdu_info->tso_info.curr_seg = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 98 | if (qdf_nbuf_is_tso(msdu)) { |
| 99 | int num_seg = qdf_nbuf_get_tso_num_seg(msdu); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 100 | msdu_info->tso_info.tso_seg_list = NULL; |
| 101 | msdu_info->tso_info.num_segs = num_seg; |
| 102 | while (num_seg) { |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 103 | struct qdf_tso_seg_elem_t *tso_seg = |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 104 | ol_tso_alloc_segment(vdev->pdev); |
| 105 | if (tso_seg) { |
| 106 | tso_seg->next = |
| 107 | msdu_info->tso_info.tso_seg_list; |
| 108 | msdu_info->tso_info.tso_seg_list |
| 109 | = tso_seg; |
| 110 | num_seg--; |
| 111 | } else { |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 112 | struct qdf_tso_seg_elem_t *next_seg; |
| 113 | struct qdf_tso_seg_elem_t *free_seg = |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 114 | msdu_info->tso_info.tso_seg_list; |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 115 | qdf_print("TSO seg alloc failed!\n"); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 116 | while (free_seg) { |
| 117 | next_seg = free_seg->next; |
| 118 | ol_tso_free_segment(vdev->pdev, |
| 119 | free_seg); |
| 120 | free_seg = next_seg; |
| 121 | } |
| 122 | return 1; |
| 123 | } |
| 124 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 125 | qdf_nbuf_get_tso_info(vdev->pdev->osdev, |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 126 | msdu, &(msdu_info->tso_info)); |
| 127 | msdu_info->tso_info.curr_seg = |
| 128 | msdu_info->tso_info.tso_seg_list; |
| 129 | num_seg = msdu_info->tso_info.num_segs; |
| 130 | } else { |
| 131 | msdu_info->tso_info.is_tso = 0; |
| 132 | msdu_info->tso_info.num_segs = 1; |
| 133 | } |
| 134 | return 0; |
| 135 | } |
| 136 | #endif |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 137 | |
| 138 | /** |
Dhanashri Atre | 168d2b4 | 2016-02-22 14:43:06 -0800 | [diff] [blame] | 139 | * ol_tx_data() - send data frame |
| 140 | * @vdev: virtual device handle |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 141 | * @skb: skb |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 142 | * |
| 143 | * Return: skb/NULL for success |
| 144 | */ |
Dhanashri Atre | 168d2b4 | 2016-02-22 14:43:06 -0800 | [diff] [blame] | 145 | qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 146 | { |
Mohit Khanna | 043efbd | 2016-05-04 14:19:35 -0700 | [diff] [blame] | 147 | struct ol_txrx_pdev_t *pdev; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 148 | qdf_nbuf_t ret; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 149 | |
Mohit Khanna | 043efbd | 2016-05-04 14:19:35 -0700 | [diff] [blame] | 150 | if (qdf_unlikely(!vdev)) { |
| 151 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, |
| 152 | "%s:vdev is null", __func__); |
| 153 | return skb; |
| 154 | } else { |
| 155 | pdev = vdev->pdev; |
| 156 | } |
| 157 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 158 | if (qdf_unlikely(!pdev)) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 159 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 160 | "%s:pdev is null", __func__); |
| 161 | return skb; |
| 162 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 163 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 164 | if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev)) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 165 | && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP)) |
| 166 | && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL)) |
| 167 | qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 168 | |
| 169 | /* Terminate the (single-element) list of tx frames */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 170 | qdf_nbuf_set_next(skb, NULL); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 171 | ret = OL_TX_SEND(vdev, skb); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 172 | if (ret) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 173 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 174 | "%s: Failed to tx", __func__); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 175 | return ret; |
| 176 | } |
| 177 | |
| 178 | return NULL; |
| 179 | } |
| 180 | |
| 181 | #ifdef IPA_OFFLOAD |
| 182 | /** |
| 183 | * ol_tx_send_ipa_data_frame() - send IPA data frame |
| 184 | * @vdev: vdev |
| 185 | * @skb: skb |
| 186 | * |
| 187 | * Return: skb/ NULL is for success |
| 188 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 189 | qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev, |
| 190 | qdf_nbuf_t skb) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 191 | { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 192 | ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 193 | qdf_nbuf_t ret; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 194 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 195 | if (qdf_unlikely(!pdev)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 196 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 197 | "%s: pdev is NULL", __func__); |
| 198 | return skb; |
| 199 | } |
| 200 | |
| 201 | if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev)) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 202 | && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP)) |
| 203 | && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL)) |
| 204 | qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 205 | |
| 206 | /* Terminate the (single-element) list of tx frames */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 207 | qdf_nbuf_set_next(skb, NULL); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 208 | ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 209 | if (ret) { |
| 210 | TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, |
| 211 | "%s: Failed to tx", __func__); |
| 212 | return ret; |
| 213 | } |
| 214 | |
| 215 | return NULL; |
| 216 | } |
| 217 | #endif |
| 218 | |
| 219 | |
| 220 | #if defined(FEATURE_TSO) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 221 | qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 222 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 223 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 224 | struct ol_txrx_msdu_info_t msdu_info; |
| 225 | |
| 226 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 227 | msdu_info.htt.action.tx_comp_req = 0; |
| 228 | /* |
| 229 | * The msdu_list variable could be used instead of the msdu var, |
| 230 | * but just to clarify which operations are done on a single MSDU |
| 231 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 232 | * within the list. |
| 233 | */ |
| 234 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 235 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 236 | struct ol_tx_desc_t *tx_desc; |
| 237 | int segments = 1; |
| 238 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 239 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 240 | msdu_info.peer = NULL; |
| 241 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 242 | if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 243 | qdf_print("ol_tx_prepare_tso failed\n"); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 244 | TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, |
| 245 | tx.dropped.host_reject, msdu); |
| 246 | return msdu; |
| 247 | } |
| 248 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 249 | segments = msdu_info.tso_info.num_segs; |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 250 | TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments); |
| 251 | TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev, |
| 252 | qdf_nbuf_tcp_tso_size(msdu)); |
| 253 | TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev, |
| 254 | qdf_nbuf_len(msdu)); |
| 255 | TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev, |
| 256 | qdf_nbuf_get_nr_frags(msdu)); |
| 257 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 258 | |
| 259 | /* |
| 260 | * The netbuf may get linked into a different list inside the |
| 261 | * ol_tx_send function, so store the next pointer before the |
| 262 | * tx_send call. |
| 263 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 264 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 265 | /* init the current segment to the 1st segment in the list */ |
| 266 | while (segments) { |
| 267 | |
| 268 | if (msdu_info.tso_info.curr_seg) |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 269 | QDF_NBUF_CB_PADDR(msdu) = |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 270 | msdu_info.tso_info.curr_seg-> |
Dhanashri Atre | 5166d57 | 2016-06-03 14:12:22 -0700 | [diff] [blame] | 271 | seg.tso_frags[0].paddr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 272 | |
| 273 | segments--; |
| 274 | |
| 275 | /** |
| 276 | * if this is a jumbo nbuf, then increment the number |
| 277 | * of nbuf users for each additional segment of the msdu. |
| 278 | * This will ensure that the skb is freed only after |
| 279 | * receiving tx completion for all segments of an nbuf |
| 280 | */ |
| 281 | if (segments) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 282 | qdf_nbuf_inc_users(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 283 | |
| 284 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 285 | |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 286 | TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu); |
| 287 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 288 | /* |
| 289 | * If debug display is enabled, show the meta-data being |
| 290 | * downloaded to the target via the HTT tx descriptor. |
| 291 | */ |
| 292 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 293 | |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 294 | ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 295 | |
| 296 | if (msdu_info.tso_info.curr_seg) { |
| 297 | msdu_info.tso_info.curr_seg = |
| 298 | msdu_info.tso_info.curr_seg->next; |
| 299 | } |
| 300 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 301 | qdf_nbuf_reset_num_frags(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 302 | |
| 303 | if (msdu_info.tso_info.is_tso) { |
| 304 | TXRX_STATS_TSO_INC_SEG(vdev->pdev); |
| 305 | TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev); |
| 306 | } |
| 307 | } /* while segments */ |
| 308 | |
| 309 | msdu = next; |
| 310 | if (msdu_info.tso_info.is_tso) { |
| 311 | TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev); |
| 312 | TXRX_STATS_TSO_RESET_MSDU(vdev->pdev); |
| 313 | } |
| 314 | } /* while msdus */ |
| 315 | return NULL; /* all MSDUs were accepted */ |
| 316 | } |
| 317 | #else /* TSO */ |
| 318 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 319 | qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 320 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 321 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 322 | struct ol_txrx_msdu_info_t msdu_info; |
| 323 | |
| 324 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 325 | msdu_info.htt.action.tx_comp_req = 0; |
| 326 | msdu_info.tso_info.is_tso = 0; |
| 327 | /* |
| 328 | * The msdu_list variable could be used instead of the msdu var, |
| 329 | * but just to clarify which operations are done on a single MSDU |
| 330 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 331 | * within the list. |
| 332 | */ |
| 333 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 334 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 335 | struct ol_tx_desc_t *tx_desc; |
| 336 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 337 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 338 | msdu_info.peer = NULL; |
| 339 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 340 | |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 341 | TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu); |
| 342 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 343 | /* |
| 344 | * If debug display is enabled, show the meta-data being |
| 345 | * downloaded to the target via the HTT tx descriptor. |
| 346 | */ |
| 347 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 348 | /* |
| 349 | * The netbuf may get linked into a different list inside the |
| 350 | * ol_tx_send function, so store the next pointer before the |
| 351 | * tx_send call. |
| 352 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 353 | next = qdf_nbuf_next(msdu); |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 354 | ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 355 | msdu = next; |
| 356 | } |
| 357 | return NULL; /* all MSDUs were accepted */ |
| 358 | } |
| 359 | #endif /* TSO */ |
| 360 | |
| 361 | #ifdef WLAN_FEATURE_FASTPATH |
| 362 | /** |
| 363 | * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor |
| 364 | * |
| 365 | * Allocate and prepare Tx descriptor with msdu and fragment descritor |
| 366 | * inforamtion. |
| 367 | * |
| 368 | * @pdev: pointer to ol pdev handle |
| 369 | * @vdev: pointer to ol vdev handle |
| 370 | * @msdu: linked list of msdu packets |
| 371 | * @pkt_download_len: packet download length |
| 372 | * @ep_id: endpoint ID |
| 373 | * @msdu_info: Handle to msdu_info |
| 374 | * |
| 375 | * Return: Pointer to Tx descriptor |
| 376 | */ |
| 377 | static inline struct ol_tx_desc_t * |
| 378 | ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 379 | ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 380 | uint32_t pkt_download_len, uint32_t ep_id, |
| 381 | struct ol_txrx_msdu_info_t *msdu_info) |
| 382 | { |
| 383 | struct ol_tx_desc_t *tx_desc = NULL; |
| 384 | uint32_t *htt_tx_desc; |
| 385 | void *htc_hdr_vaddr; |
| 386 | u_int32_t num_frags, i; |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 387 | enum extension_header_type type; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 388 | |
| 389 | tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 390 | if (qdf_unlikely(!tx_desc)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 391 | return NULL; |
| 392 | |
| 393 | tx_desc->netbuf = msdu; |
| 394 | if (msdu_info->tso_info.is_tso) { |
| 395 | tx_desc->tso_desc = msdu_info->tso_info.curr_seg; |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 396 | tx_desc->pkt_type = OL_TX_FRM_TSO; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 397 | TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu); |
| 398 | } else { |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 399 | tx_desc->pkt_type = OL_TX_FRM_STD; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | htt_tx_desc = tx_desc->htt_tx_desc; |
| 403 | |
| 404 | /* Make sure frags num is set to 0 */ |
| 405 | /* |
| 406 | * Do this here rather than in hardstart, so |
| 407 | * that we can hopefully take only one cache-miss while |
| 408 | * accessing skb->cb. |
| 409 | */ |
| 410 | |
| 411 | /* HTT Header */ |
| 412 | /* TODO : Take care of multiple fragments */ |
| 413 | |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 414 | type = ol_tx_get_ext_header_type(vdev, msdu); |
| 415 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 416 | /* TODO: Precompute and store paddr in ol_tx_desc_t */ |
| 417 | /* Virtual address of the HTT/HTC header, added by driver */ |
| 418 | htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN; |
| 419 | htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc, |
| 420 | tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu, |
| 421 | &msdu_info->htt, &msdu_info->tso_info, |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 422 | NULL, type); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 423 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 424 | num_frags = qdf_nbuf_get_num_frags(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 425 | /* num_frags are expected to be 2 max */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 426 | num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS) |
| 427 | ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 428 | : num_frags; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 429 | #if defined(HELIUMPLUS_PADDR64) |
| 430 | /* |
| 431 | * Use num_frags - 1, since 1 frag is used to store |
| 432 | * the HTT/HTC descriptor |
| 433 | * Refer to htt_tx_desc_init() |
| 434 | */ |
| 435 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc, |
| 436 | num_frags - 1); |
| 437 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 438 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 439 | num_frags-1); |
| 440 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 441 | if (msdu_info->tso_info.is_tso) { |
| 442 | htt_tx_desc_fill_tso_info(pdev->htt_pdev, |
| 443 | tx_desc->htt_frag_desc, &msdu_info->tso_info); |
| 444 | TXRX_STATS_TSO_SEG_UPDATE(pdev, |
| 445 | msdu_info->tso_info.curr_seg->seg); |
| 446 | } else { |
| 447 | for (i = 1; i < num_frags; i++) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 448 | qdf_size_t frag_len; |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 449 | qdf_dma_addr_t frag_paddr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 450 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 451 | frag_len = qdf_nbuf_get_frag_len(msdu, i); |
| 452 | frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i); |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 453 | if (type != EXT_HEADER_NOT_PRESENT) { |
| 454 | frag_paddr += |
| 455 | sizeof(struct htt_tx_msdu_desc_ext_t); |
| 456 | frag_len -= |
| 457 | sizeof(struct htt_tx_msdu_desc_ext_t); |
| 458 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 459 | #if defined(HELIUMPLUS_PADDR64) |
| 460 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, |
| 461 | i - 1, frag_paddr, frag_len); |
| 462 | #if defined(HELIUMPLUS_DEBUG) |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 463 | qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 464 | __func__, __LINE__, tx_desc->htt_frag_desc, |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 465 | i-1, frag_paddr, frag_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 466 | dump_pkt(netbuf, frag_paddr, 64); |
| 467 | #endif /* HELIUMPLUS_DEBUG */ |
| 468 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 469 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 470 | i - 1, frag_paddr, frag_len); |
| 471 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * Do we want to turn on word_stream bit-map here ? For linux, non-TSO |
| 477 | * this is not required. We still have to mark the swap bit correctly, |
| 478 | * when posting to the ring |
| 479 | */ |
| 480 | /* Check to make sure, data download length is correct */ |
| 481 | |
| 482 | /* |
| 483 | * TODO : Can we remove this check and always download a fixed length ? |
| 484 | * */ |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 485 | |
| 486 | |
| 487 | if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu)) |
| 488 | pkt_download_len += sizeof(struct htt_tx_msdu_desc_ext_t); |
| 489 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 490 | if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len)) |
| 491 | pkt_download_len = qdf_nbuf_len(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 492 | |
| 493 | /* Fill the HTC header information */ |
| 494 | /* |
| 495 | * Passing 0 as the seq_no field, we can probably get away |
| 496 | * with it for the time being, since this is not checked in f/w |
| 497 | */ |
| 498 | /* TODO : Prefill this, look at multi-fragment case */ |
| 499 | HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0); |
| 500 | |
| 501 | return tx_desc; |
| 502 | } |
| 503 | #if defined(FEATURE_TSO) |
| 504 | /** |
| 505 | * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE |
| 506 | * |
| 507 | * @vdev: handle to ol_txrx_vdev_t |
| 508 | * @msdu_list: msdu list to be sent out. |
| 509 | * |
| 510 | * Return: on success return NULL, pointer to nbuf when it fails to send. |
| 511 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 512 | qdf_nbuf_t |
| 513 | ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 514 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 515 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 516 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 517 | uint32_t pkt_download_len = |
| 518 | ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len; |
| 519 | uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev); |
| 520 | struct ol_txrx_msdu_info_t msdu_info; |
| 521 | |
| 522 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 523 | msdu_info.htt.action.tx_comp_req = 0; |
| 524 | /* |
| 525 | * The msdu_list variable could be used instead of the msdu var, |
| 526 | * but just to clarify which operations are done on a single MSDU |
| 527 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 528 | * within the list. |
| 529 | */ |
| 530 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 531 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 532 | struct ol_tx_desc_t *tx_desc; |
| 533 | int segments = 1; |
| 534 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 535 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 536 | msdu_info.peer = NULL; |
| 537 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 538 | if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 539 | qdf_print("ol_tx_prepare_tso failed\n"); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 540 | TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, |
| 541 | tx.dropped.host_reject, msdu); |
| 542 | return msdu; |
| 543 | } |
| 544 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 545 | segments = msdu_info.tso_info.num_segs; |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 546 | TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments); |
| 547 | TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev, |
| 548 | qdf_nbuf_tcp_tso_size(msdu)); |
| 549 | TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev, |
| 550 | qdf_nbuf_len(msdu)); |
| 551 | TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev, |
| 552 | qdf_nbuf_get_nr_frags(msdu)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 553 | |
| 554 | /* |
| 555 | * The netbuf may get linked into a different list |
| 556 | * inside the ce_send_fast function, so store the next |
| 557 | * pointer before the ce_send call. |
| 558 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 559 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 560 | /* init the current segment to the 1st segment in the list */ |
| 561 | while (segments) { |
| 562 | |
| 563 | if (msdu_info.tso_info.curr_seg) |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 564 | QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info. |
Dhanashri Atre | 5166d57 | 2016-06-03 14:12:22 -0700 | [diff] [blame] | 565 | curr_seg->seg.tso_frags[0].paddr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 566 | |
| 567 | segments--; |
| 568 | |
| 569 | /** |
| 570 | * if this is a jumbo nbuf, then increment the number |
| 571 | * of nbuf users for each additional segment of the msdu. |
| 572 | * This will ensure that the skb is freed only after |
| 573 | * receiving tx completion for all segments of an nbuf |
| 574 | */ |
| 575 | if (segments) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 576 | qdf_nbuf_inc_users(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 577 | |
| 578 | msdu_info.htt.info.frame_type = pdev->htt_pkt_type; |
| 579 | msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 580 | msdu_info.htt.action.cksum_offload = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 581 | qdf_nbuf_get_tx_cksum(msdu); |
| 582 | switch (qdf_nbuf_get_exemption_type(msdu)) { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 583 | case QDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 584 | case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 585 | /* We want to encrypt this frame */ |
| 586 | msdu_info.htt.action.do_encrypt = 1; |
| 587 | break; |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 588 | case QDF_NBUF_EXEMPT_ALWAYS: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 589 | /* We don't want to encrypt this frame */ |
| 590 | msdu_info.htt.action.do_encrypt = 0; |
| 591 | break; |
| 592 | default: |
| 593 | msdu_info.htt.action.do_encrypt = 1; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 594 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 595 | break; |
| 596 | } |
| 597 | |
| 598 | tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu, |
| 599 | pkt_download_len, ep_id, |
| 600 | &msdu_info); |
| 601 | |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 602 | TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu); |
| 603 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 604 | if (qdf_likely(tx_desc)) { |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 605 | DPTRACE(qdf_dp_trace_ptr(msdu, |
| 606 | QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD, |
| 607 | qdf_nbuf_data_addr(msdu), |
| 608 | sizeof(qdf_nbuf_data(msdu)), |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 609 | tx_desc->id, vdev->vdev_id)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 610 | /* |
| 611 | * If debug display is enabled, show the meta |
| 612 | * data being downloaded to the target via the |
| 613 | * HTT tx descriptor. |
| 614 | */ |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 615 | if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER |
| 616 | (msdu)) |
| 617 | pkt_download_len += |
| 618 | sizeof(struct htt_tx_msdu_desc_ext_t); |
| 619 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 620 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 621 | if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu, |
| 622 | ep_id, pkt_download_len))) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 623 | /* |
| 624 | * The packet could not be sent. |
| 625 | * Free the descriptor, return the |
| 626 | * packet to the caller. |
| 627 | */ |
| 628 | ol_tx_desc_free(pdev, tx_desc); |
| 629 | return msdu; |
| 630 | } |
| 631 | if (msdu_info.tso_info.curr_seg) { |
| 632 | msdu_info.tso_info.curr_seg = |
| 633 | msdu_info.tso_info.curr_seg->next; |
| 634 | } |
| 635 | |
| 636 | if (msdu_info.tso_info.is_tso) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 637 | qdf_nbuf_reset_num_frags(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 638 | TXRX_STATS_TSO_INC_SEG(vdev->pdev); |
| 639 | TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev); |
| 640 | } |
| 641 | } else { |
| 642 | TXRX_STATS_MSDU_LIST_INCR( |
| 643 | pdev, tx.dropped.host_reject, msdu); |
| 644 | /* the list of unaccepted MSDUs */ |
| 645 | return msdu; |
| 646 | } |
| 647 | } /* while segments */ |
| 648 | |
| 649 | msdu = next; |
| 650 | if (msdu_info.tso_info.is_tso) { |
| 651 | TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev); |
| 652 | TXRX_STATS_TSO_RESET_MSDU(vdev->pdev); |
| 653 | } |
| 654 | } /* while msdus */ |
| 655 | return NULL; /* all MSDUs were accepted */ |
| 656 | } |
| 657 | #else |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 658 | qdf_nbuf_t |
| 659 | ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 660 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 661 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 662 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 663 | uint32_t pkt_download_len = |
| 664 | ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len; |
| 665 | uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev); |
| 666 | struct ol_txrx_msdu_info_t msdu_info; |
| 667 | |
| 668 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 669 | msdu_info.htt.action.tx_comp_req = 0; |
| 670 | msdu_info.tso_info.is_tso = 0; |
| 671 | /* |
| 672 | * The msdu_list variable could be used instead of the msdu var, |
| 673 | * but just to clarify which operations are done on a single MSDU |
| 674 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 675 | * within the list. |
| 676 | */ |
| 677 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 678 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 679 | struct ol_tx_desc_t *tx_desc; |
| 680 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 681 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 682 | msdu_info.peer = NULL; |
| 683 | |
| 684 | msdu_info.htt.info.frame_type = pdev->htt_pkt_type; |
| 685 | msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 686 | msdu_info.htt.action.cksum_offload = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 687 | qdf_nbuf_get_tx_cksum(msdu); |
| 688 | switch (qdf_nbuf_get_exemption_type(msdu)) { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 689 | case QDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 690 | case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 691 | /* We want to encrypt this frame */ |
| 692 | msdu_info.htt.action.do_encrypt = 1; |
| 693 | break; |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 694 | case QDF_NBUF_EXEMPT_ALWAYS: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 695 | /* We don't want to encrypt this frame */ |
| 696 | msdu_info.htt.action.do_encrypt = 0; |
| 697 | break; |
| 698 | default: |
| 699 | msdu_info.htt.action.do_encrypt = 1; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 700 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 701 | break; |
| 702 | } |
| 703 | |
| 704 | tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu, |
| 705 | pkt_download_len, ep_id, |
| 706 | &msdu_info); |
| 707 | |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 708 | TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu); |
| 709 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 710 | if (qdf_likely(tx_desc)) { |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 711 | DPTRACE(qdf_dp_trace_ptr(msdu, |
| 712 | QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD, |
| 713 | qdf_nbuf_data_addr(msdu), |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 714 | sizeof(qdf_nbuf_data(msdu)), tx_desc->id, |
| 715 | vdev->vdev_id)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 716 | /* |
| 717 | * If debug display is enabled, show the meta-data being |
| 718 | * downloaded to the target via the HTT tx descriptor. |
| 719 | */ |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 720 | if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu)) |
| 721 | pkt_download_len += |
| 722 | sizeof(struct htt_tx_msdu_desc_ext_t); |
| 723 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 724 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 725 | /* |
| 726 | * The netbuf may get linked into a different list |
| 727 | * inside the ce_send_fast function, so store the next |
| 728 | * pointer before the ce_send call. |
| 729 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 730 | next = qdf_nbuf_next(msdu); |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 731 | if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu, |
| 732 | ep_id, pkt_download_len))) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 733 | /* The packet could not be sent */ |
| 734 | /* Free the descriptor, return the packet to the |
| 735 | * caller */ |
| 736 | ol_tx_desc_free(pdev, tx_desc); |
| 737 | return msdu; |
| 738 | } |
| 739 | msdu = next; |
| 740 | } else { |
| 741 | TXRX_STATS_MSDU_LIST_INCR( |
| 742 | pdev, tx.dropped.host_reject, msdu); |
| 743 | return msdu; /* the list of unaccepted MSDUs */ |
| 744 | } |
| 745 | } |
| 746 | |
| 747 | return NULL; /* all MSDUs were accepted */ |
| 748 | } |
| 749 | #endif /* FEATURE_TSO */ |
| 750 | #endif /* WLAN_FEATURE_FASTPATH */ |
| 751 | |
| 752 | #ifdef WLAN_FEATURE_FASTPATH |
| 753 | /** |
| 754 | * ol_tx_ll_wrapper() wrapper to ol_tx_ll |
| 755 | * |
| 756 | */ |
Manjunathappa Prakash | 7e16b0a | 2016-03-08 15:24:48 -0800 | [diff] [blame] | 757 | qdf_nbuf_t |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 758 | ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 759 | { |
Komal Seelam | 3d20286 | 2016-02-24 18:43:24 +0530 | [diff] [blame] | 760 | struct hif_opaque_softc *hif_device = |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 761 | (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 762 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 763 | if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device))) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 764 | msdu_list = ol_tx_ll_fast(vdev, msdu_list); |
| 765 | else |
| 766 | msdu_list = ol_tx_ll(vdev, msdu_list); |
| 767 | |
| 768 | return msdu_list; |
| 769 | } |
| 770 | #else |
Manjunathappa Prakash | 7e16b0a | 2016-03-08 15:24:48 -0800 | [diff] [blame] | 771 | qdf_nbuf_t |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 772 | ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 773 | { |
| 774 | return ol_tx_ll(vdev, msdu_list); |
| 775 | } |
| 776 | #endif /* WLAN_FEATURE_FASTPATH */ |
| 777 | |
| 778 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 779 | |
| 780 | #define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400 |
| 781 | #define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5 |
| 782 | static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev) |
| 783 | { |
| 784 | int max_to_accept; |
| 785 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 786 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 787 | if (vdev->ll_pause.paused_reason) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 788 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 789 | return; |
| 790 | } |
| 791 | |
| 792 | /* |
| 793 | * Send as much of the backlog as possible, but leave some margin |
| 794 | * of unallocated tx descriptors that can be used for new frames |
| 795 | * being transmitted by other vdevs. |
| 796 | * Ideally there would be a scheduler, which would not only leave |
| 797 | * some margin for new frames for other vdevs, but also would |
| 798 | * fairly apportion the tx descriptors between multiple vdevs that |
| 799 | * have backlogs in their pause queues. |
| 800 | * However, the fairness benefit of having a scheduler for frames |
| 801 | * from multiple vdev's pause queues is not sufficient to outweigh |
| 802 | * the extra complexity. |
| 803 | */ |
| 804 | max_to_accept = vdev->pdev->tx_desc.num_free - |
| 805 | OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN; |
| 806 | while (max_to_accept > 0 && vdev->ll_pause.txq.depth) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 807 | qdf_nbuf_t tx_msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 808 | max_to_accept--; |
| 809 | vdev->ll_pause.txq.depth--; |
| 810 | tx_msdu = vdev->ll_pause.txq.head; |
| 811 | if (tx_msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 812 | vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 813 | if (NULL == vdev->ll_pause.txq.head) |
| 814 | vdev->ll_pause.txq.tail = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 815 | qdf_nbuf_set_next(tx_msdu, NULL); |
| 816 | QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu, |
| 817 | QDF_NBUF_TX_PKT_TXRX_DEQUEUE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 818 | tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu); |
| 819 | /* |
| 820 | * It is unexpected that ol_tx_ll would reject the frame |
| 821 | * since we checked that there's room for it, though |
| 822 | * there's an infinitesimal possibility that between the |
| 823 | * time we checked the room available and now, a |
| 824 | * concurrent batch of tx frames used up all the room. |
| 825 | * For simplicity, just drop the frame. |
| 826 | */ |
| 827 | if (tx_msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 828 | qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 829 | QDF_DMA_TO_DEVICE); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 830 | qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 831 | } |
| 832 | } |
| 833 | } |
| 834 | if (vdev->ll_pause.txq.depth) { |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 835 | qdf_timer_stop(&vdev->ll_pause.timer); |
| 836 | qdf_timer_start(&vdev->ll_pause.timer, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 837 | OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS); |
| 838 | vdev->ll_pause.is_q_timer_on = true; |
| 839 | if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth) |
| 840 | vdev->ll_pause.q_overflow_cnt++; |
| 841 | } |
| 842 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 843 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 844 | } |
| 845 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 846 | static qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 847 | ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 848 | qdf_nbuf_t msdu_list, uint8_t start_timer) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 849 | { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 850 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 851 | while (msdu_list && |
| 852 | vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 853 | qdf_nbuf_t next = qdf_nbuf_next(msdu_list); |
| 854 | QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list, |
| 855 | QDF_NBUF_TX_PKT_TXRX_ENQUEUE); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 856 | DPTRACE(qdf_dp_trace(msdu_list, |
| 857 | QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD, |
Nirav Shah | 07e39a6 | 2016-04-25 17:46:40 +0530 | [diff] [blame] | 858 | qdf_nbuf_data_addr(msdu_list), |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 859 | sizeof(qdf_nbuf_data(msdu_list)), QDF_TX)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 860 | |
| 861 | vdev->ll_pause.txq.depth++; |
| 862 | if (!vdev->ll_pause.txq.head) { |
| 863 | vdev->ll_pause.txq.head = msdu_list; |
| 864 | vdev->ll_pause.txq.tail = msdu_list; |
| 865 | } else { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 866 | qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 867 | } |
| 868 | vdev->ll_pause.txq.tail = msdu_list; |
| 869 | |
| 870 | msdu_list = next; |
| 871 | } |
| 872 | if (vdev->ll_pause.txq.tail) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 873 | qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 874 | |
| 875 | if (start_timer) { |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 876 | qdf_timer_stop(&vdev->ll_pause.timer); |
| 877 | qdf_timer_start(&vdev->ll_pause.timer, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 878 | OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS); |
| 879 | vdev->ll_pause.is_q_timer_on = true; |
| 880 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 881 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 882 | |
| 883 | return msdu_list; |
| 884 | } |
| 885 | |
| 886 | /* |
| 887 | * Store up the tx frame in the vdev's tx queue if the vdev is paused. |
| 888 | * If there are too many frames in the tx queue, reject it. |
| 889 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 890 | qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 891 | { |
| 892 | uint16_t eth_type; |
| 893 | uint32_t paused_reason; |
| 894 | |
| 895 | if (msdu_list == NULL) |
| 896 | return NULL; |
| 897 | |
| 898 | paused_reason = vdev->ll_pause.paused_reason; |
| 899 | if (paused_reason) { |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 900 | if (qdf_unlikely((paused_reason & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 901 | OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) == |
| 902 | paused_reason)) { |
| 903 | eth_type = (((struct ethernet_hdr_t *) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 904 | qdf_nbuf_data(msdu_list))-> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 905 | ethertype[0] << 8) | |
| 906 | (((struct ethernet_hdr_t *) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 907 | qdf_nbuf_data(msdu_list))->ethertype[1]); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 908 | if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) { |
| 909 | msdu_list = ol_tx_ll_wrapper(vdev, msdu_list); |
| 910 | return msdu_list; |
| 911 | } |
| 912 | } |
| 913 | msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1); |
| 914 | } else { |
| 915 | if (vdev->ll_pause.txq.depth > 0 || |
| 916 | vdev->pdev->tx_throttle.current_throttle_level != |
| 917 | THROTTLE_LEVEL_0) { |
| 918 | /* not paused, but there is a backlog of frms |
| 919 | from a prior pause or throttle off phase */ |
| 920 | msdu_list = ol_tx_vdev_pause_queue_append( |
| 921 | vdev, msdu_list, 0); |
| 922 | /* if throttle is disabled or phase is "on", |
| 923 | send the frame */ |
| 924 | if (vdev->pdev->tx_throttle.current_throttle_level == |
| 925 | THROTTLE_LEVEL_0 || |
| 926 | vdev->pdev->tx_throttle.current_throttle_phase == |
| 927 | THROTTLE_PHASE_ON) { |
| 928 | /* send as many frames as possible |
| 929 | from the vdevs backlog */ |
| 930 | ol_tx_vdev_ll_pause_queue_send_base(vdev); |
| 931 | } |
| 932 | } else { |
| 933 | /* not paused, no throttle and no backlog - |
| 934 | send the new frames */ |
| 935 | msdu_list = ol_tx_ll_wrapper(vdev, msdu_list); |
| 936 | } |
| 937 | } |
| 938 | return msdu_list; |
| 939 | } |
| 940 | |
| 941 | /* |
| 942 | * Run through the transmit queues for all the vdevs and |
| 943 | * send the pending frames |
| 944 | */ |
| 945 | void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev) |
| 946 | { |
| 947 | int max_to_send; /* tracks how many frames have been sent */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 948 | qdf_nbuf_t tx_msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 949 | struct ol_txrx_vdev_t *vdev = NULL; |
| 950 | uint8_t more; |
| 951 | |
| 952 | if (NULL == pdev) |
| 953 | return; |
| 954 | |
| 955 | if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) |
| 956 | return; |
| 957 | |
| 958 | /* ensure that we send no more than tx_threshold frames at once */ |
| 959 | max_to_send = pdev->tx_throttle.tx_threshold; |
| 960 | |
| 961 | /* round robin through the vdev queues for the given pdev */ |
| 962 | |
| 963 | /* Potential improvement: download several frames from the same vdev |
| 964 | at a time, since it is more likely that those frames could be |
| 965 | aggregated together, remember which vdev was serviced last, |
| 966 | so the next call this function can resume the round-robin |
| 967 | traversing where the current invocation left off */ |
| 968 | do { |
| 969 | more = 0; |
| 970 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
| 971 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 972 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 973 | if (vdev->ll_pause.txq.depth) { |
| 974 | if (vdev->ll_pause.paused_reason) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 975 | qdf_spin_unlock_bh(&vdev->ll_pause. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 976 | mutex); |
| 977 | continue; |
| 978 | } |
| 979 | |
| 980 | tx_msdu = vdev->ll_pause.txq.head; |
| 981 | if (NULL == tx_msdu) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 982 | qdf_spin_unlock_bh(&vdev->ll_pause. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 983 | mutex); |
| 984 | continue; |
| 985 | } |
| 986 | |
| 987 | max_to_send--; |
| 988 | vdev->ll_pause.txq.depth--; |
| 989 | |
| 990 | vdev->ll_pause.txq.head = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 991 | qdf_nbuf_next(tx_msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 992 | |
| 993 | if (NULL == vdev->ll_pause.txq.head) |
| 994 | vdev->ll_pause.txq.tail = NULL; |
| 995 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 996 | qdf_nbuf_set_next(tx_msdu, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 997 | tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu); |
| 998 | /* |
| 999 | * It is unexpected that ol_tx_ll would reject |
| 1000 | * the frame, since we checked that there's |
| 1001 | * room for it, though there's an infinitesimal |
| 1002 | * possibility that between the time we checked |
| 1003 | * the room available and now, a concurrent |
| 1004 | * batch of tx frames used up all the room. |
| 1005 | * For simplicity, just drop the frame. |
| 1006 | */ |
| 1007 | if (tx_msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1008 | qdf_nbuf_unmap(pdev->osdev, tx_msdu, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1009 | QDF_DMA_TO_DEVICE); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1010 | qdf_nbuf_tx_free(tx_msdu, |
| 1011 | QDF_NBUF_PKT_ERROR); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1012 | } |
| 1013 | } |
| 1014 | /*check if there are more msdus to transmit */ |
| 1015 | if (vdev->ll_pause.txq.depth) |
| 1016 | more = 1; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1017 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1018 | } |
| 1019 | } while (more && max_to_send); |
| 1020 | |
| 1021 | vdev = NULL; |
| 1022 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1023 | qdf_spin_lock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1024 | if (vdev->ll_pause.txq.depth) { |
Anurag Chouhan | 754fbd8 | 2016-02-19 17:00:08 +0530 | [diff] [blame] | 1025 | qdf_timer_stop(&pdev->tx_throttle.tx_timer); |
| 1026 | qdf_timer_start( |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1027 | &pdev->tx_throttle.tx_timer, |
| 1028 | OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1029 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1030 | return; |
| 1031 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1032 | qdf_spin_unlock_bh(&vdev->ll_pause.mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1033 | } |
| 1034 | } |
| 1035 | |
| 1036 | void ol_tx_vdev_ll_pause_queue_send(void *context) |
| 1037 | { |
| 1038 | struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context; |
| 1039 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1040 | |
| 1041 | if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 && |
| 1042 | pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) |
| 1043 | return; |
| 1044 | ol_tx_vdev_ll_pause_queue_send_base(vdev); |
| 1045 | } |
| 1046 | #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ |
| 1047 | |
| 1048 | static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec) |
| 1049 | { |
| 1050 | return |
| 1051 | tx_spec & |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1052 | (OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1053 | } |
| 1054 | |
| 1055 | static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec) |
| 1056 | { |
| 1057 | uint8_t sub_type = 0x1; /* 802.11 MAC header present */ |
| 1058 | |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1059 | if (tx_spec & OL_TX_SPEC_NO_AGGR) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1060 | sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S; |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1061 | if (tx_spec & OL_TX_SPEC_NO_ENCRYPT) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1062 | sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S; |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1063 | if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1064 | sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S; |
| 1065 | return sub_type; |
| 1066 | } |
| 1067 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1068 | qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1069 | ol_tx_non_std_ll(ol_txrx_vdev_handle vdev, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1070 | enum ol_tx_spec tx_spec, |
| 1071 | qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1072 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1073 | qdf_nbuf_t msdu = msdu_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1074 | htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev; |
| 1075 | struct ol_txrx_msdu_info_t msdu_info; |
| 1076 | |
| 1077 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 1078 | msdu_info.htt.action.tx_comp_req = 0; |
| 1079 | |
| 1080 | /* |
| 1081 | * The msdu_list variable could be used instead of the msdu var, |
| 1082 | * but just to clarify which operations are done on a single MSDU |
| 1083 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 1084 | * within the list. |
| 1085 | */ |
| 1086 | while (msdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1087 | qdf_nbuf_t next; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1088 | struct ol_tx_desc_t *tx_desc; |
| 1089 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1090 | msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1091 | msdu_info.peer = NULL; |
| 1092 | msdu_info.tso_info.is_tso = 0; |
| 1093 | |
| 1094 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 1095 | |
| 1096 | /* |
| 1097 | * The netbuf may get linked into a different list inside the |
| 1098 | * ol_tx_send function, so store the next pointer before the |
| 1099 | * tx_send call. |
| 1100 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1101 | next = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1102 | |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1103 | if (tx_spec != OL_TX_SPEC_STD) { |
| 1104 | if (tx_spec & OL_TX_SPEC_NO_FREE) { |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 1105 | tx_desc->pkt_type = OL_TX_FRM_NO_FREE; |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1106 | } else if (tx_spec & OL_TX_SPEC_TSO) { |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 1107 | tx_desc->pkt_type = OL_TX_FRM_TSO; |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1108 | } else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1109 | uint8_t sub_type = |
| 1110 | ol_txrx_tx_raw_subtype(tx_spec); |
| 1111 | htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1112 | htt_pkt_type_native_wifi, |
| 1113 | sub_type); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1114 | } else if (ol_txrx_tx_is_raw(tx_spec)) { |
| 1115 | /* different types of raw frames */ |
| 1116 | uint8_t sub_type = |
| 1117 | ol_txrx_tx_raw_subtype(tx_spec); |
| 1118 | htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1119 | htt_pkt_type_raw, sub_type); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1120 | } |
| 1121 | } |
| 1122 | /* |
| 1123 | * If debug display is enabled, show the meta-data being |
| 1124 | * downloaded to the target via the HTT tx descriptor. |
| 1125 | */ |
| 1126 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 1127 | ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1128 | msdu = next; |
| 1129 | } |
| 1130 | return NULL; /* all MSDUs were accepted */ |
| 1131 | } |
| 1132 | |
| 1133 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 1134 | #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \ |
| 1135 | do { \ |
| 1136 | if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 1137 | qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1138 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \ |
| 1139 | if (tx_msdu_info.peer) { \ |
| 1140 | /* remove the peer reference added above */ \ |
| 1141 | ol_txrx_peer_unref_delete(tx_msdu_info.peer); \ |
| 1142 | } \ |
| 1143 | goto MSDU_LOOP_BOTTOM; \ |
| 1144 | } \ |
| 1145 | } while (0) |
| 1146 | #else |
| 1147 | #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */ |
| 1148 | #endif |
| 1149 | |
| 1150 | /* tx filtering is handled within the target FW */ |
| 1151 | #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */ |
| 1152 | |
| 1153 | /** |
| 1154 | * parse_ocb_tx_header() - Function to check for OCB |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1155 | * @msdu: Pointer to OS packet (qdf_nbuf_t) |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1156 | * @tx_ctrl: TX control header on a packet and extract it if present |
| 1157 | * |
| 1158 | * Return: true if ocb parsing is successful |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1159 | */ |
| 1160 | #define OCB_HEADER_VERSION 1 |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1161 | bool parse_ocb_tx_header(qdf_nbuf_t msdu, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1162 | struct ocb_tx_ctrl_hdr_t *tx_ctrl) |
| 1163 | { |
| 1164 | struct ether_header *eth_hdr_p; |
| 1165 | struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr; |
| 1166 | |
| 1167 | /* Check if TX control header is present */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1168 | eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1169 | if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1170 | /* TX control header is not present. Nothing to do.. */ |
| 1171 | return true; |
| 1172 | |
| 1173 | /* Remove the ethernet header */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1174 | qdf_nbuf_pull_head(msdu, sizeof(struct ether_header)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1175 | |
| 1176 | /* Parse the TX control header */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1177 | tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1178 | |
| 1179 | if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) { |
| 1180 | if (tx_ctrl) |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1181 | qdf_mem_copy(tx_ctrl, tx_ctrl_hdr, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1182 | sizeof(*tx_ctrl_hdr)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1183 | } else { |
| 1184 | /* The TX control header is invalid. */ |
| 1185 | return false; |
| 1186 | } |
| 1187 | |
| 1188 | /* Remove the TX control header */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1189 | qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1190 | return true; |
| 1191 | } |
| 1192 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1193 | |
| 1194 | #if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE) |
| 1195 | |
| 1196 | /** |
| 1197 | * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor |
| 1198 | * for a HL system. |
| 1199 | * @pdev: the data physical device sending the data |
| 1200 | * @vdev: the virtual device sending the data |
| 1201 | * @msdu: the tx frame |
| 1202 | * @msdu_info: the tx meta data |
| 1203 | * |
| 1204 | * Return: the tx decriptor |
| 1205 | */ |
| 1206 | static inline |
| 1207 | struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev, |
| 1208 | struct ol_txrx_vdev_t *vdev, |
| 1209 | qdf_nbuf_t msdu, |
| 1210 | struct ol_txrx_msdu_info_t *msdu_info) |
| 1211 | { |
| 1212 | struct ol_tx_desc_t *tx_desc = NULL; |
| 1213 | |
| 1214 | if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) > |
| 1215 | TXRX_HL_TX_DESC_HI_PRIO_RESERVED) { |
| 1216 | tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); |
| 1217 | } else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) { |
| 1218 | if ((qdf_nbuf_is_ipv4_dhcp_pkt(msdu) == true) || |
| 1219 | (qdf_nbuf_is_ipv4_eapol_pkt(msdu) == true)) { |
| 1220 | tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); |
| 1221 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 1222 | "Provided tx descriptor from reserve pool for DHCP/EAPOL\n"); |
| 1223 | } |
| 1224 | } |
| 1225 | return tx_desc; |
| 1226 | } |
| 1227 | #else |
| 1228 | |
| 1229 | static inline |
| 1230 | struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev, |
| 1231 | struct ol_txrx_vdev_t *vdev, |
| 1232 | qdf_nbuf_t msdu, |
| 1233 | struct ol_txrx_msdu_info_t *msdu_info) |
| 1234 | { |
| 1235 | struct ol_tx_desc_t *tx_desc = NULL; |
| 1236 | tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); |
| 1237 | return tx_desc; |
| 1238 | } |
| 1239 | #endif |
| 1240 | |
| 1241 | #if defined(CONFIG_HL_SUPPORT) |
| 1242 | |
| 1243 | /** |
| 1244 | * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor |
| 1245 | * for management frame |
| 1246 | * @pdev: the data physical device sending the data |
| 1247 | * @vdev: the virtual device sending the data |
| 1248 | * @tx_mgmt_frm: the tx managment frame |
| 1249 | * @tx_msdu_info: the tx meta data |
| 1250 | * |
| 1251 | * Return: the tx decriptor |
| 1252 | */ |
| 1253 | static inline |
| 1254 | struct ol_tx_desc_t * |
| 1255 | ol_txrx_mgmt_tx_desc_alloc( |
| 1256 | struct ol_txrx_pdev_t *pdev, |
| 1257 | struct ol_txrx_vdev_t *vdev, |
| 1258 | qdf_nbuf_t tx_mgmt_frm, |
| 1259 | struct ol_txrx_msdu_info_t *tx_msdu_info) |
| 1260 | { |
| 1261 | struct ol_tx_desc_t *tx_desc; |
| 1262 | tx_msdu_info->htt.action.tx_comp_req = 1; |
| 1263 | tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info); |
| 1264 | return tx_desc; |
| 1265 | } |
| 1266 | |
| 1267 | /** |
| 1268 | * ol_txrx_mgmt_send_frame() - send a management frame |
| 1269 | * @vdev: virtual device sending the frame |
| 1270 | * @tx_desc: tx desc |
| 1271 | * @tx_mgmt_frm: management frame to send |
| 1272 | * @tx_msdu_info: the tx meta data |
| 1273 | * @chanfreq: download change frequency |
| 1274 | * |
| 1275 | * Return: |
| 1276 | * 0 -> the frame is accepted for transmission, -OR- |
| 1277 | * 1 -> the frame was not accepted |
| 1278 | */ |
| 1279 | static inline |
| 1280 | int ol_txrx_mgmt_send_frame( |
| 1281 | struct ol_txrx_vdev_t *vdev, |
| 1282 | struct ol_tx_desc_t *tx_desc, |
| 1283 | qdf_nbuf_t tx_mgmt_frm, |
| 1284 | struct ol_txrx_msdu_info_t *tx_msdu_info, |
| 1285 | uint16_t chanfreq) |
| 1286 | { |
| 1287 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1288 | struct ol_tx_frms_queue_t *txq; |
| 1289 | /* |
| 1290 | * 1. Look up the peer and queue the frame in the peer's mgmt queue. |
| 1291 | * 2. Invoke the download scheduler. |
| 1292 | */ |
| 1293 | txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info); |
| 1294 | if (!txq) { |
| 1295 | /*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq, |
| 1296 | msdu);*/ |
| 1297 | qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); |
| 1298 | ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc, |
| 1299 | 1 /* error */); |
| 1300 | if (tx_msdu_info->peer) { |
| 1301 | /* remove the peer reference added above */ |
| 1302 | ol_txrx_peer_unref_delete(tx_msdu_info->peer); |
| 1303 | } |
| 1304 | return 1; /* can't accept the tx mgmt frame */ |
| 1305 | } |
| 1306 | /* Initialize the HTT tx desc l2 header offset field. |
| 1307 | * Even though tx encap does not apply to mgmt frames, |
| 1308 | * htt_tx_desc_mpdu_header still needs to be called, |
| 1309 | * to specifiy that there was no L2 header added by tx encap, |
| 1310 | * so the frame's length does not need to be adjusted to account for |
| 1311 | * an added L2 header. |
| 1312 | */ |
| 1313 | htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0); |
| 1314 | htt_tx_desc_init( |
| 1315 | pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 1316 | tx_desc->htt_tx_desc_paddr, |
| 1317 | ol_tx_desc_id(pdev, tx_desc), |
| 1318 | tx_mgmt_frm, |
| 1319 | &tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0); |
| 1320 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 1321 | htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq); |
| 1322 | |
| 1323 | ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info); |
| 1324 | if (tx_msdu_info->peer) { |
| 1325 | /* remove the peer reference added above */ |
| 1326 | ol_txrx_peer_unref_delete(tx_msdu_info->peer); |
| 1327 | } |
| 1328 | ol_tx_sched(vdev->pdev); |
| 1329 | |
| 1330 | return 0; |
| 1331 | } |
| 1332 | |
| 1333 | #else |
| 1334 | |
| 1335 | static inline |
| 1336 | struct ol_tx_desc_t * |
| 1337 | ol_txrx_mgmt_tx_desc_alloc( |
| 1338 | struct ol_txrx_pdev_t *pdev, |
| 1339 | struct ol_txrx_vdev_t *vdev, |
| 1340 | qdf_nbuf_t tx_mgmt_frm, |
| 1341 | struct ol_txrx_msdu_info_t *tx_msdu_info) |
| 1342 | { |
| 1343 | struct ol_tx_desc_t *tx_desc; |
| 1344 | /* For LL tx_comp_req is not used so initialized to 0 */ |
| 1345 | tx_msdu_info->htt.action.tx_comp_req = 0; |
| 1346 | tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info); |
| 1347 | /* FIX THIS - |
| 1348 | * The FW currently has trouble using the host's fragments table |
| 1349 | * for management frames. Until this is fixed, rather than |
| 1350 | * specifying the fragment table to the FW, specify just the |
| 1351 | * address of the initial fragment. |
| 1352 | */ |
| 1353 | #if defined(HELIUMPLUS_PADDR64) |
| 1354 | /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll", |
| 1355 | tx_desc); */ |
| 1356 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 1357 | if (tx_desc) { |
| 1358 | /* |
| 1359 | * Following the call to ol_tx_desc_ll, frag 0 is the |
| 1360 | * HTT tx HW descriptor, and the frame payload is in |
| 1361 | * frag 1. |
| 1362 | */ |
| 1363 | htt_tx_desc_frags_table_set( |
| 1364 | pdev->htt_pdev, |
| 1365 | tx_desc->htt_tx_desc, |
| 1366 | qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1), |
| 1367 | 0, 0); |
| 1368 | #if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG) |
| 1369 | dump_frag_desc( |
| 1370 | "after htt_tx_desc_frags_table_set", |
| 1371 | tx_desc); |
| 1372 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 1373 | } |
| 1374 | |
| 1375 | return tx_desc; |
| 1376 | } |
| 1377 | |
| 1378 | static inline |
| 1379 | int ol_txrx_mgmt_send_frame( |
| 1380 | struct ol_txrx_vdev_t *vdev, |
| 1381 | struct ol_tx_desc_t *tx_desc, |
| 1382 | qdf_nbuf_t tx_mgmt_frm, |
| 1383 | struct ol_txrx_msdu_info_t *tx_msdu_info, |
| 1384 | uint16_t chanfreq) |
| 1385 | { |
| 1386 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1387 | htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq); |
| 1388 | QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) = |
| 1389 | QDF_NBUF_TX_PKT_MGMT_TRACK; |
| 1390 | ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm, |
| 1391 | htt_pkt_type_mgmt); |
| 1392 | |
| 1393 | return 0; |
| 1394 | } |
| 1395 | #endif |
| 1396 | |
| 1397 | /** |
| 1398 | * ol_tx_hl_base() - send tx frames for a HL system. |
| 1399 | * @vdev: the virtual device sending the data |
| 1400 | * @tx_spec: indicate what non-standard transmission actions to apply |
| 1401 | * @msdu_list: the tx frames to send |
| 1402 | * @tx_comp_req: tx completion req |
| 1403 | * |
| 1404 | * Return: NULL if all MSDUs are accepted |
| 1405 | */ |
| 1406 | static inline qdf_nbuf_t |
| 1407 | ol_tx_hl_base( |
| 1408 | ol_txrx_vdev_handle vdev, |
| 1409 | enum ol_tx_spec tx_spec, |
| 1410 | qdf_nbuf_t msdu_list, |
| 1411 | int tx_comp_req) |
| 1412 | { |
| 1413 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1414 | qdf_nbuf_t msdu = msdu_list; |
| 1415 | struct ol_txrx_msdu_info_t tx_msdu_info; |
| 1416 | struct ocb_tx_ctrl_hdr_t tx_ctrl; |
| 1417 | |
| 1418 | htt_pdev_handle htt_pdev = pdev->htt_pdev; |
| 1419 | tx_msdu_info.peer = NULL; |
| 1420 | tx_msdu_info.tso_info.is_tso = 0; |
| 1421 | |
| 1422 | /* |
| 1423 | * The msdu_list variable could be used instead of the msdu var, |
| 1424 | * but just to clarify which operations are done on a single MSDU |
| 1425 | * vs. a list of MSDUs, use a distinct variable for single MSDUs |
| 1426 | * within the list. |
| 1427 | */ |
| 1428 | while (msdu) { |
| 1429 | qdf_nbuf_t next; |
| 1430 | struct ol_tx_frms_queue_t *txq; |
| 1431 | struct ol_tx_desc_t *tx_desc = NULL; |
| 1432 | |
| 1433 | qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl)); |
| 1434 | |
| 1435 | /* |
| 1436 | * The netbuf will get stored into a (peer-TID) tx queue list |
| 1437 | * inside the ol_tx_classify_store function or else dropped, |
| 1438 | * so store the next pointer immediately. |
| 1439 | */ |
| 1440 | next = qdf_nbuf_next(msdu); |
| 1441 | |
| 1442 | tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info); |
| 1443 | |
| 1444 | if (!tx_desc) { |
| 1445 | /* |
| 1446 | * If we're out of tx descs, there's no need to try |
| 1447 | * to allocate tx descs for the remaining MSDUs. |
| 1448 | */ |
| 1449 | TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject, |
| 1450 | msdu); |
| 1451 | return msdu; /* the list of unaccepted MSDUs */ |
| 1452 | } |
| 1453 | |
| 1454 | /* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/ |
| 1455 | |
| 1456 | if (tx_spec != OL_TX_SPEC_STD) { |
| 1457 | #if defined(FEATURE_WLAN_TDLS) |
| 1458 | if (tx_spec & OL_TX_SPEC_NO_FREE) { |
| 1459 | tx_desc->pkt_type = OL_TX_FRM_NO_FREE; |
| 1460 | } else if (tx_spec & OL_TX_SPEC_TSO) { |
| 1461 | #else |
| 1462 | if (tx_spec & OL_TX_SPEC_TSO) { |
| 1463 | #endif |
| 1464 | tx_desc->pkt_type = OL_TX_FRM_TSO; |
| 1465 | } |
| 1466 | if (ol_txrx_tx_is_raw(tx_spec)) { |
| 1467 | /* CHECK THIS: does this need |
| 1468 | * to happen after htt_tx_desc_init? |
| 1469 | */ |
| 1470 | /* different types of raw frames */ |
| 1471 | u_int8_t sub_type = |
| 1472 | ol_txrx_tx_raw_subtype( |
| 1473 | tx_spec); |
| 1474 | htt_tx_desc_type(htt_pdev, |
| 1475 | tx_desc->htt_tx_desc, |
| 1476 | htt_pkt_type_raw, |
| 1477 | sub_type); |
| 1478 | } |
| 1479 | } |
| 1480 | |
| 1481 | tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); |
| 1482 | tx_msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 1483 | tx_msdu_info.htt.info.frame_type = htt_frm_type_data; |
| 1484 | tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type; |
| 1485 | tx_msdu_info.htt.action.tx_comp_req = tx_comp_req; |
| 1486 | |
| 1487 | /* If the vdev is in OCB mode, |
| 1488 | * parse the tx control header. |
| 1489 | */ |
| 1490 | if (vdev->opmode == wlan_op_mode_ocb) { |
| 1491 | if (!parse_ocb_tx_header(msdu, &tx_ctrl)) { |
| 1492 | /* There was an error parsing |
| 1493 | * the header.Skip this packet. |
| 1494 | */ |
| 1495 | goto MSDU_LOOP_BOTTOM; |
| 1496 | } |
| 1497 | } |
| 1498 | |
| 1499 | txq = ol_tx_classify(vdev, tx_desc, msdu, |
| 1500 | &tx_msdu_info); |
| 1501 | |
| 1502 | if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) { |
| 1503 | /* drop this frame, |
| 1504 | * but try sending subsequent frames |
| 1505 | */ |
| 1506 | /*TXRX_STATS_MSDU_LIST_INCR(pdev, |
| 1507 | tx.dropped.no_txq, |
| 1508 | msdu);*/ |
| 1509 | qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); |
| 1510 | ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); |
| 1511 | if (tx_msdu_info.peer) { |
| 1512 | /* remove the peer reference |
| 1513 | * added above */ |
| 1514 | ol_txrx_peer_unref_delete( |
| 1515 | tx_msdu_info.peer); |
| 1516 | } |
| 1517 | goto MSDU_LOOP_BOTTOM; |
| 1518 | } |
| 1519 | |
| 1520 | if (tx_msdu_info.peer) { |
| 1521 | /*If the state is not associated then drop all |
| 1522 | *the data packets received for that peer*/ |
| 1523 | if (tx_msdu_info.peer->state == |
| 1524 | OL_TXRX_PEER_STATE_DISC) { |
| 1525 | qdf_atomic_inc( |
| 1526 | &pdev->tx_queue.rsrc_cnt); |
| 1527 | ol_tx_desc_frame_free_nonstd(pdev, |
| 1528 | tx_desc, |
| 1529 | 1); |
| 1530 | ol_txrx_peer_unref_delete( |
| 1531 | tx_msdu_info.peer); |
| 1532 | msdu = next; |
| 1533 | continue; |
| 1534 | } else if (tx_msdu_info.peer->state != |
| 1535 | OL_TXRX_PEER_STATE_AUTH) { |
| 1536 | if (tx_msdu_info.htt.info.ethertype != |
| 1537 | ETHERTYPE_PAE && |
| 1538 | tx_msdu_info.htt.info.ethertype |
| 1539 | != ETHERTYPE_WAI) { |
| 1540 | qdf_atomic_inc( |
| 1541 | &pdev->tx_queue. |
| 1542 | rsrc_cnt); |
| 1543 | ol_tx_desc_frame_free_nonstd( |
| 1544 | pdev, |
| 1545 | tx_desc, 1); |
| 1546 | ol_txrx_peer_unref_delete( |
| 1547 | tx_msdu_info.peer); |
| 1548 | msdu = next; |
| 1549 | continue; |
| 1550 | } |
| 1551 | } |
| 1552 | } |
| 1553 | /* |
| 1554 | * Initialize the HTT tx desc l2 header offset field. |
| 1555 | * htt_tx_desc_mpdu_header needs to be called to |
| 1556 | * make sure, the l2 header size is initialized |
| 1557 | * correctly to handle cases where TX ENCAP is disabled |
| 1558 | * or Tx Encap fails to perform Encap |
| 1559 | */ |
| 1560 | htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0); |
| 1561 | |
| 1562 | /* |
| 1563 | * Note: when the driver is built without support for |
| 1564 | * SW tx encap,the following macro is a no-op. |
| 1565 | * When the driver is built with support for SW tx |
| 1566 | * encap, it performs encap, and if an error is |
| 1567 | * encountered, jumps to the MSDU_LOOP_BOTTOM label. |
| 1568 | */ |
| 1569 | OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, |
| 1570 | tx_msdu_info); |
| 1571 | |
| 1572 | /* initialize the HW tx descriptor */ |
| 1573 | htt_tx_desc_init( |
| 1574 | pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 1575 | tx_desc->htt_tx_desc_paddr, |
| 1576 | ol_tx_desc_id(pdev, tx_desc), |
| 1577 | msdu, |
| 1578 | &tx_msdu_info.htt, |
| 1579 | &tx_msdu_info.tso_info, |
| 1580 | &tx_ctrl, |
| 1581 | vdev->opmode == wlan_op_mode_ocb); |
| 1582 | /* |
| 1583 | * If debug display is enabled, show the meta-data |
| 1584 | * being downloaded to the target via the |
| 1585 | * HTT tx descriptor. |
| 1586 | */ |
| 1587 | htt_tx_desc_display(tx_desc->htt_tx_desc); |
| 1588 | |
| 1589 | ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info); |
| 1590 | if (tx_msdu_info.peer) { |
| 1591 | OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer, |
| 1592 | msdu); |
| 1593 | /* remove the peer reference added above */ |
| 1594 | ol_txrx_peer_unref_delete(tx_msdu_info.peer); |
| 1595 | } |
| 1596 | MSDU_LOOP_BOTTOM: |
| 1597 | msdu = next; |
| 1598 | } |
| 1599 | ol_tx_sched(pdev); |
| 1600 | return NULL; /* all MSDUs were accepted */ |
| 1601 | } |
| 1602 | |
| 1603 | qdf_nbuf_t |
| 1604 | ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list) |
| 1605 | { |
| 1606 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1607 | int tx_comp_req = pdev->cfg.default_tx_comp_req; |
| 1608 | return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req); |
| 1609 | } |
| 1610 | |
| 1611 | qdf_nbuf_t |
| 1612 | ol_tx_non_std_hl(ol_txrx_vdev_handle vdev, |
| 1613 | enum ol_tx_spec tx_spec, |
| 1614 | qdf_nbuf_t msdu_list) |
| 1615 | { |
| 1616 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1617 | int tx_comp_req = pdev->cfg.default_tx_comp_req; |
| 1618 | |
| 1619 | if (!tx_comp_req) { |
| 1620 | if ((tx_spec == OL_TX_SPEC_NO_FREE) && |
| 1621 | (pdev->tx_data_callback.func)) |
| 1622 | tx_comp_req = 1; |
| 1623 | } |
| 1624 | return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req); |
| 1625 | } |
| 1626 | |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 1627 | /** |
| 1628 | * ol_tx_non_std - Allow the control-path SW to send data frames |
| 1629 | * |
| 1630 | * @data_vdev - which vdev should transmit the tx data frames |
| 1631 | * @tx_spec - what non-standard handling to apply to the tx data frames |
| 1632 | * @msdu_list - NULL-terminated list of tx MSDUs |
| 1633 | * |
| 1634 | * Generally, all tx data frames come from the OS shim into the txrx layer. |
| 1635 | * However, there are rare cases such as TDLS messaging where the UMAC |
| 1636 | * control-path SW creates tx data frames. |
| 1637 | * This UMAC SW can call this function to provide the tx data frames to |
| 1638 | * the txrx layer. |
| 1639 | * The UMAC SW can request a callback for these data frames after their |
| 1640 | * transmission completes, by using the ol_txrx_data_tx_cb_set function |
| 1641 | * to register a tx completion callback, and by specifying |
| 1642 | * ol_tx_spec_no_free as the tx_spec arg when giving the frames to |
| 1643 | * ol_tx_non_std. |
| 1644 | * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11), |
| 1645 | * as specified by ol_cfg_frame_type(). |
| 1646 | * |
| 1647 | * Return: null - success, skb - failure |
| 1648 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1649 | qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1650 | ol_tx_non_std(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1651 | enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1652 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1653 | if (vdev->pdev->cfg.is_high_latency) |
| 1654 | return ol_tx_non_std_hl(vdev, tx_spec, msdu_list); |
| 1655 | else |
| 1656 | return ol_tx_non_std_ll(vdev, tx_spec, msdu_list); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1657 | } |
| 1658 | |
| 1659 | void |
| 1660 | ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev, |
| 1661 | ol_txrx_data_tx_cb callback, void *ctxt) |
| 1662 | { |
| 1663 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1664 | pdev->tx_data_callback.func = callback; |
| 1665 | pdev->tx_data_callback.ctxt = ctxt; |
| 1666 | } |
| 1667 | |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 1668 | /** |
| 1669 | * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery |
| 1670 | * notifications for management frames. |
| 1671 | * |
| 1672 | * @pdev - the data physical device object |
| 1673 | * @type - the type of mgmt frame the callback is used for |
| 1674 | * @download_cb - the callback for notification of delivery to the target |
| 1675 | * @ota_ack_cb - the callback for notification of delivery to the peer |
| 1676 | * @ctxt - context to use with the callback |
| 1677 | * |
| 1678 | * When the txrx SW receives notifications from the target that a tx frame |
| 1679 | * has been delivered to its recipient, it will check if the tx frame |
| 1680 | * is a management frame. If so, the txrx SW will check the management |
| 1681 | * frame type specified when the frame was submitted for transmission. |
| 1682 | * If there is a callback function registered for the type of managment |
| 1683 | * frame in question, the txrx code will invoke the callback to inform |
| 1684 | * the management + control SW that the mgmt frame was delivered. |
| 1685 | * This function is used by the control SW to store a callback pointer |
| 1686 | * for a given type of management frame. |
| 1687 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1688 | void |
| 1689 | ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev, |
| 1690 | uint8_t type, |
| 1691 | ol_txrx_mgmt_tx_cb download_cb, |
| 1692 | ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) |
| 1693 | { |
| 1694 | TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES); |
| 1695 | pdev->tx_mgmt.callbacks[type].download_cb = download_cb; |
| 1696 | pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb; |
| 1697 | pdev->tx_mgmt.callbacks[type].ctxt = ctxt; |
| 1698 | } |
| 1699 | |
| 1700 | #if defined(HELIUMPLUS_PADDR64) |
| 1701 | void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc) |
| 1702 | { |
| 1703 | uint32_t *frag_ptr_i_p; |
| 1704 | int i; |
| 1705 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1706 | qdf_print("OL TX Descriptor 0x%p msdu_id %d\n", |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1707 | tx_desc, tx_desc->id); |
Hardik Kantilal Patel | e9b97ea | 2016-07-14 17:26:13 +0530 | [diff] [blame] | 1708 | qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: %pad", |
| 1709 | tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr); |
| 1710 | qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=%pad)", |
| 1711 | __func__, __LINE__, tx_desc->htt_frag_desc, |
| 1712 | &tx_desc->htt_frag_desc_paddr); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1713 | |
| 1714 | /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc |
| 1715 | is already de-referrable (=> in virtual address space) */ |
| 1716 | frag_ptr_i_p = tx_desc->htt_frag_desc; |
| 1717 | |
| 1718 | /* Dump 6 words of TSO flags */ |
| 1719 | print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ", |
| 1720 | DUMP_PREFIX_NONE, 8, 4, |
| 1721 | frag_ptr_i_p, 24, true); |
| 1722 | |
| 1723 | frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */ |
| 1724 | |
| 1725 | i = 0; |
| 1726 | while (*frag_ptr_i_p) { |
| 1727 | print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ", |
| 1728 | DUMP_PREFIX_NONE, 8, 4, |
| 1729 | frag_ptr_i_p, 8, true); |
| 1730 | i++; |
| 1731 | if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */ |
| 1732 | break; |
| 1733 | else /* jump to next pointer - skip length */ |
| 1734 | frag_ptr_i_p += 2; |
| 1735 | } |
| 1736 | return; |
| 1737 | } |
| 1738 | #endif /* HELIUMPLUS_PADDR64 */ |
| 1739 | |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 1740 | /** |
| 1741 | * ol_txrx_mgmt_send_ext() - Transmit a management frame |
| 1742 | * |
| 1743 | * @vdev - virtual device transmitting the frame |
| 1744 | * @tx_mgmt_frm - management frame to transmit |
| 1745 | * @type - the type of managment frame (determines what callback to use) |
| 1746 | * @use_6mbps - specify whether management frame to transmit should |
| 1747 | * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P) |
| 1748 | * @chanfreq - channel to transmit the frame on |
| 1749 | * |
| 1750 | * Send the specified management frame from the specified virtual device. |
| 1751 | * The type is used for determining whether to invoke a callback to inform |
| 1752 | * the sender that the tx mgmt frame was delivered, and if so, which |
| 1753 | * callback to use. |
| 1754 | * |
| 1755 | * Return: 0 - the frame is accepted for transmission |
| 1756 | * 1 - the frame was not accepted |
| 1757 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1758 | int |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 1759 | ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1760 | qdf_nbuf_t tx_mgmt_frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1761 | uint8_t type, uint8_t use_6mbps, uint16_t chanfreq) |
| 1762 | { |
| 1763 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 1764 | struct ol_tx_desc_t *tx_desc; |
| 1765 | struct ol_txrx_msdu_info_t tx_msdu_info; |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1766 | int result = 0; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1767 | tx_msdu_info.tso_info.is_tso = 0; |
| 1768 | |
| 1769 | tx_msdu_info.htt.action.use_6mbps = use_6mbps; |
| 1770 | tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT; |
| 1771 | tx_msdu_info.htt.info.vdev_id = vdev->vdev_id; |
| 1772 | tx_msdu_info.htt.action.do_tx_complete = |
| 1773 | pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0; |
| 1774 | |
| 1775 | /* |
| 1776 | * FIX THIS: l2_hdr_type should only specify L2 header type |
| 1777 | * The Peregrine/Rome HTT layer provides the FW with a "pkt type" |
| 1778 | * that is a combination of L2 header type and 802.11 frame type. |
| 1779 | * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt". |
| 1780 | * But if the 802.11 frame type is "data", then the HTT pkt type is |
| 1781 | * the L2 header type (more or less): 802.3 vs. Native WiFi |
| 1782 | * (basic 802.11). |
| 1783 | * (Or the header type can be "raw", which is any version of the 802.11 |
| 1784 | * header, and also implies that some of the offloaded tx data |
| 1785 | * processing steps may not apply.) |
| 1786 | * For efficiency, the Peregrine/Rome HTT uses the msdu_info's |
| 1787 | * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW |
| 1788 | * needs to overload the l2_hdr_type to indicate whether the frame is |
| 1789 | * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header. |
| 1790 | * To fix this, the msdu_info's l2_hdr_type should be left specifying |
| 1791 | * just the L2 header type. For mgmt frames, there should be a |
| 1792 | * separate function to patch the HTT pkt type to store a "mgmt" value |
| 1793 | * rather than the L2 header type. Then the HTT pkt type can be |
| 1794 | * programmed efficiently for data frames, and the msdu_info's |
| 1795 | * l2_hdr_type field won't be confusingly overloaded to hold the 802.11 |
| 1796 | * frame type rather than the L2 header type. |
| 1797 | */ |
| 1798 | /* |
| 1799 | * FIX THIS: remove duplication of htt_frm_type_mgmt and |
| 1800 | * htt_pkt_type_mgmt |
| 1801 | * The htt module expects a "enum htt_pkt_type" value. |
| 1802 | * The htt_dxe module expects a "enum htt_frm_type" value. |
| 1803 | * This needs to be cleaned up, so both versions of htt use a |
| 1804 | * consistent method of specifying the frame type. |
| 1805 | */ |
| 1806 | #ifdef QCA_SUPPORT_INTEGRATED_SOC |
| 1807 | /* tx mgmt frames always come with a 802.11 header */ |
| 1808 | tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi; |
| 1809 | tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt; |
| 1810 | #else |
| 1811 | tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt; |
| 1812 | tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt; |
| 1813 | #endif |
| 1814 | |
| 1815 | tx_msdu_info.peer = NULL; |
| 1816 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1817 | tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm, |
| 1818 | &tx_msdu_info); |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 1819 | if (!tx_desc) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1820 | return -EINVAL; /* can't accept the tx mgmt frame */ |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 1821 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1822 | TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm); |
| 1823 | TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES); |
| 1824 | tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE; |
| 1825 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1826 | result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm, |
| 1827 | &tx_msdu_info, chanfreq); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1828 | |
| 1829 | return 0; /* accepted the tx mgmt frame */ |
| 1830 | } |
| 1831 | |
| 1832 | void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt) |
| 1833 | { |
| 1834 | htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt); |
| 1835 | } |
| 1836 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1837 | qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev, |
| 1838 | qdf_nbuf_t msdu, uint16_t peer_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1839 | { |
| 1840 | struct ol_tx_desc_t *tx_desc; |
| 1841 | struct ol_txrx_msdu_info_t msdu_info; |
| 1842 | |
| 1843 | msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; |
| 1844 | msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID; |
| 1845 | msdu_info.peer = NULL; |
| 1846 | msdu_info.htt.action.tx_comp_req = 0; |
| 1847 | msdu_info.tso_info.is_tso = 0; |
| 1848 | |
| 1849 | ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); |
| 1850 | HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true); |
| 1851 | |
| 1852 | htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id); |
| 1853 | |
Nirav Shah | 0d58a7e | 2016-04-26 22:54:12 +0530 | [diff] [blame] | 1854 | ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1855 | |
| 1856 | return NULL; |
| 1857 | } |
| 1858 | |
| 1859 | #if defined(FEATURE_TSO) |
| 1860 | void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg) |
| 1861 | { |
| 1862 | int i; |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1863 | struct qdf_tso_seg_elem_t *c_element; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1864 | |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1865 | c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1866 | pdev->tso_seg_pool.freelist = c_element; |
| 1867 | for (i = 0; i < (num_seg - 1); i++) { |
| 1868 | c_element->next = |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1869 | qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1870 | c_element = c_element->next; |
| 1871 | c_element->next = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1872 | } |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1873 | pdev->tso_seg_pool.pool_size = num_seg; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1874 | qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev) |
| 1878 | { |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1879 | int i; |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1880 | struct qdf_tso_seg_elem_t *c_element; |
| 1881 | struct qdf_tso_seg_elem_t *temp; |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1882 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1883 | qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1884 | c_element = pdev->tso_seg_pool.freelist; |
| 1885 | for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) { |
| 1886 | temp = c_element->next; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1887 | qdf_mem_free(c_element); |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 1888 | c_element = temp; |
| 1889 | if (!c_element) |
| 1890 | break; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1891 | } |
| 1892 | |
| 1893 | pdev->tso_seg_pool.freelist = NULL; |
| 1894 | pdev->tso_seg_pool.num_free = 0; |
| 1895 | pdev->tso_seg_pool.pool_size = 0; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 1896 | qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex); |
| 1897 | qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1898 | } |
| 1899 | #endif /* FEATURE_TSO */ |