Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
hangtian | 127c953 | 2019-01-12 13:29:07 +0800 | [diff] [blame] | 2 | * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 19 | /** |
| 20 | * @file htt.c |
| 21 | * @brief Provide functions to create+init and destroy a HTT instance. |
| 22 | * @details |
| 23 | * This file contains functions for creating a HTT instance; initializing |
| 24 | * the HTT instance, e.g. by allocating a pool of HTT tx descriptors and |
| 25 | * connecting the HTT service with HTC; and deleting a HTT instance. |
| 26 | */ |
| 27 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 28 | #include <qdf_mem.h> /* qdf_mem_malloc */ |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 29 | #include <qdf_types.h> /* qdf_device_t, qdf_print */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 30 | |
| 31 | #include <htt.h> /* htt_tx_msdu_desc_t */ |
| 32 | #include <ol_cfg.h> |
| 33 | #include <ol_txrx_htt_api.h> /* ol_tx_dowload_done_ll, etc. */ |
| 34 | #include <ol_htt_api.h> |
| 35 | |
| 36 | #include <htt_internal.h> |
Houston Hoffman | 5be9bac | 2015-10-20 17:04:42 -0700 | [diff] [blame] | 37 | #include <ol_htt_tx_api.h> |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 38 | #include <cds_api.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 39 | #include "hif.h" |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 40 | #include <cdp_txrx_handle.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 41 | |
| 42 | #define HTT_HTC_PKT_POOL_INIT_SIZE 100 /* enough for a large A-MPDU */ |
| 43 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 44 | QDF_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev); |
Manjunathappa Prakash | fff753c | 2016-09-01 19:34:56 -0700 | [diff] [blame] | 45 | QDF_STATUS(*htt_h2t_rx_ring_rfs_cfg_msg)(struct htt_pdev_t *pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 46 | |
| 47 | #ifdef IPA_OFFLOAD |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 48 | static QDF_STATUS htt_ipa_config(htt_pdev_handle pdev, QDF_STATUS status) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 49 | { |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 50 | if ((QDF_STATUS_SUCCESS == status) && |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 51 | ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) |
| 52 | status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev); |
| 53 | return status; |
| 54 | } |
| 55 | |
| 56 | #define HTT_IPA_CONFIG htt_ipa_config |
| 57 | #else |
| 58 | #define HTT_IPA_CONFIG(pdev, status) status /* no-op */ |
| 59 | #endif /* IPA_OFFLOAD */ |
| 60 | |
| 61 | struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev) |
| 62 | { |
| 63 | struct htt_htc_pkt_union *pkt = NULL; |
| 64 | |
| 65 | HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex); |
| 66 | if (pdev->htt_htc_pkt_freelist) { |
| 67 | pkt = pdev->htt_htc_pkt_freelist; |
| 68 | pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next; |
| 69 | } |
| 70 | HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex); |
| 71 | |
Jeff Johnson | 6795c3a | 2019-03-18 13:43:04 -0700 | [diff] [blame] | 72 | if (!pkt) |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 73 | pkt = qdf_mem_malloc(sizeof(*pkt)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 74 | |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 75 | if (!pkt) |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 76 | return NULL; |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 77 | |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 78 | htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 79 | return &pkt->u.pkt; /* not actually a dereference */ |
| 80 | } |
| 81 | |
| 82 | void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt) |
| 83 | { |
| 84 | struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt; |
| 85 | |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 86 | if (!u_pkt) { |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 87 | qdf_print("HTC packet is NULL"); |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 88 | return; |
| 89 | } |
| 90 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 91 | HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex); |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 92 | htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 93 | u_pkt->u.next = pdev->htt_htc_pkt_freelist; |
| 94 | pdev->htt_htc_pkt_freelist = u_pkt; |
| 95 | HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex); |
| 96 | } |
| 97 | |
| 98 | void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev) |
| 99 | { |
| 100 | struct htt_htc_pkt_union *pkt, *next; |
Yun Park | 56e32d9 | 2017-04-04 13:58:17 -0700 | [diff] [blame] | 101 | |
wadesong | 43468c4 | 2017-10-28 07:15:51 +0800 | [diff] [blame] | 102 | HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 103 | pkt = pdev->htt_htc_pkt_freelist; |
wadesong | 43468c4 | 2017-10-28 07:15:51 +0800 | [diff] [blame] | 104 | pdev->htt_htc_pkt_freelist = NULL; |
| 105 | HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex); |
| 106 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 107 | while (pkt) { |
| 108 | next = pkt->u.next; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 109 | qdf_mem_free(pkt); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 110 | pkt = next; |
| 111 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | #ifdef ATH_11AC_TXCOMPACT |
Yun Park | eea1c9c | 2017-03-08 11:26:37 -0800 | [diff] [blame] | 115 | |
| 116 | void |
| 117 | htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level) |
| 118 | { |
| 119 | struct htt_htc_pkt_union *pkt, *next, *prev = NULL; |
| 120 | int i = 0; |
| 121 | qdf_nbuf_t netbuf; |
| 122 | |
| 123 | HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex); |
| 124 | pkt = pdev->htt_htc_pkt_misclist; |
| 125 | while (pkt) { |
| 126 | next = pkt->u.next; |
| 127 | /* trim the out grown list*/ |
| 128 | if (++i > level) { |
Yun Park | 56e32d9 | 2017-04-04 13:58:17 -0700 | [diff] [blame] | 129 | netbuf = |
| 130 | (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); |
Yun Park | eea1c9c | 2017-03-08 11:26:37 -0800 | [diff] [blame] | 131 | qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE); |
| 132 | qdf_nbuf_free(netbuf); |
| 133 | qdf_mem_free(pkt); |
| 134 | pkt = NULL; |
| 135 | if (prev) |
| 136 | prev->u.next = NULL; |
| 137 | } |
| 138 | prev = pkt; |
| 139 | pkt = next; |
| 140 | } |
| 141 | HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex); |
| 142 | } |
| 143 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 144 | void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt) |
| 145 | { |
| 146 | struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt; |
Houston Hoffman | 13f4be5 | 2017-03-14 16:17:03 -0700 | [diff] [blame] | 147 | int misclist_trim_level = htc_get_tx_queue_depth(pdev->htc_pdev, |
| 148 | pkt->htc_pkt.Endpoint) |
| 149 | + HTT_HTC_PKT_MISCLIST_SIZE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 150 | |
| 151 | HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex); |
| 152 | if (pdev->htt_htc_pkt_misclist) { |
| 153 | u_pkt->u.next = pdev->htt_htc_pkt_misclist; |
| 154 | pdev->htt_htc_pkt_misclist = u_pkt; |
| 155 | } else { |
| 156 | pdev->htt_htc_pkt_misclist = u_pkt; |
| 157 | } |
| 158 | HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex); |
Yun Park | eea1c9c | 2017-03-08 11:26:37 -0800 | [diff] [blame] | 159 | |
Houston Hoffman | 13f4be5 | 2017-03-14 16:17:03 -0700 | [diff] [blame] | 160 | /* only ce pipe size + tx_queue_depth could possibly be in use |
| 161 | * free older packets in the msiclist |
| 162 | */ |
| 163 | htt_htc_misc_pkt_list_trim(pdev, misclist_trim_level); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev) |
| 167 | { |
| 168 | struct htt_htc_pkt_union *pkt, *next; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 169 | qdf_nbuf_t netbuf; |
Yun Park | 56e32d9 | 2017-04-04 13:58:17 -0700 | [diff] [blame] | 170 | |
wadesong | 43468c4 | 2017-10-28 07:15:51 +0800 | [diff] [blame] | 171 | HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 172 | pkt = pdev->htt_htc_pkt_misclist; |
wadesong | 43468c4 | 2017-10-28 07:15:51 +0800 | [diff] [blame] | 173 | pdev->htt_htc_pkt_misclist = NULL; |
| 174 | HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 175 | |
| 176 | while (pkt) { |
| 177 | next = pkt->u.next; |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 178 | if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) != |
| 179 | HTC_PACKET_MAGIC_COOKIE) { |
wadesong | 43468c4 | 2017-10-28 07:15:51 +0800 | [diff] [blame] | 180 | QDF_ASSERT(0); |
Himanshu Agarwal | 289e40b | 2017-03-08 21:06:20 +0530 | [diff] [blame] | 181 | pkt = next; |
| 182 | continue; |
| 183 | } |
| 184 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 185 | netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); |
| 186 | qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE); |
| 187 | qdf_nbuf_free(netbuf); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 188 | qdf_mem_free(pkt); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 189 | pkt = next; |
| 190 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 191 | } |
| 192 | #endif |
| 193 | |
Houston Hoffman | 90e24d8 | 2016-04-27 17:15:44 -0700 | [diff] [blame] | 194 | |
| 195 | /* AR6004 don't need HTT layer. */ |
| 196 | #ifdef AR6004_HW |
| 197 | #define NO_HTT_NEEDED true |
| 198 | #else |
| 199 | #define NO_HTT_NEEDED false |
| 200 | #endif |
| 201 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 202 | #if defined(QCA_TX_HTT2_SUPPORT) && defined(CONFIG_HL_SUPPORT) |
| 203 | |
| 204 | /** |
| 205 | * htt_htc_tx_htt2_service_start() - Start TX HTT2 service |
| 206 | * |
| 207 | * @pdev: pointer to htt device. |
| 208 | * @connect_req: pointer to service connection request information |
| 209 | * @connect_resp: pointer to service connection response information |
| 210 | * |
| 211 | * |
| 212 | * Return: None |
| 213 | */ |
| 214 | static void |
| 215 | htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev, |
Manikandan Mohan | 83c939c | 2017-04-13 20:23:07 -0700 | [diff] [blame] | 216 | struct htc_service_connect_req *connect_req, |
| 217 | struct htc_service_connect_resp *connect_resp) |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 218 | { |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 219 | QDF_STATUS status; |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 220 | |
hangtian | 127c953 | 2019-01-12 13:29:07 +0800 | [diff] [blame] | 221 | qdf_mem_zero(connect_req, sizeof(struct htc_service_connect_req)); |
| 222 | qdf_mem_zero(connect_resp, sizeof(struct htc_service_connect_resp)); |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 223 | |
| 224 | /* The same as HTT service but no RX. */ |
| 225 | connect_req->EpCallbacks.pContext = pdev; |
| 226 | connect_req->EpCallbacks.EpTxComplete = htt_h2t_send_complete; |
| 227 | connect_req->EpCallbacks.EpSendFull = htt_h2t_full; |
| 228 | connect_req->MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH; |
| 229 | /* Should NOT support credit flow control. */ |
| 230 | connect_req->ConnectionFlags |= |
| 231 | HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; |
| 232 | /* Enable HTC schedule mechanism for TX HTT2 service. */ |
| 233 | connect_req->ConnectionFlags |= HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE; |
| 234 | |
Mohit Khanna | c6f0398 | 2016-05-15 20:37:55 -0700 | [diff] [blame] | 235 | connect_req->service_id = HTT_DATA2_MSG_SVC; |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 236 | |
| 237 | status = htc_connect_service(pdev->htc_pdev, connect_req, connect_resp); |
| 238 | |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 239 | if (status != QDF_STATUS_SUCCESS) { |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 240 | pdev->htc_tx_htt2_endpoint = ENDPOINT_UNUSED; |
| 241 | pdev->htc_tx_htt2_max_size = 0; |
| 242 | } else { |
| 243 | pdev->htc_tx_htt2_endpoint = connect_resp->Endpoint; |
| 244 | pdev->htc_tx_htt2_max_size = HTC_TX_HTT2_MAX_SIZE; |
| 245 | } |
| 246 | |
| 247 | qdf_print("TX HTT %s, ep %d size %d\n", |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 248 | (status == QDF_STATUS_SUCCESS ? "ON" : "OFF"), |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 249 | pdev->htc_tx_htt2_endpoint, |
| 250 | pdev->htc_tx_htt2_max_size); |
| 251 | } |
| 252 | #else |
| 253 | |
| 254 | static inline void |
| 255 | htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev, |
Manikandan Mohan | 83c939c | 2017-04-13 20:23:07 -0700 | [diff] [blame] | 256 | struct htc_service_connect_req *connect_req, |
| 257 | struct htc_service_connect_resp *connect_resp) |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 258 | { |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 259 | } |
| 260 | #endif |
| 261 | |
| 262 | /** |
| 263 | * htt_htc_credit_flow_disable() - disable flow control for |
| 264 | * HTT data message service |
| 265 | * |
| 266 | * @pdev: pointer to htt device. |
| 267 | * @connect_req: pointer to service connection request information |
| 268 | * |
| 269 | * HTC Credit mechanism is disabled based on |
| 270 | * default_tx_comp_req as throughput will be lower |
| 271 | * if we disable htc credit mechanism with default_tx_comp_req |
| 272 | * set since txrx download packet will be limited by ota |
| 273 | * completion. |
| 274 | * |
| 275 | * Return: None |
| 276 | */ |
| 277 | static |
| 278 | void htt_htc_credit_flow_disable(struct htt_pdev_t *pdev, |
Manikandan Mohan | 83c939c | 2017-04-13 20:23:07 -0700 | [diff] [blame] | 279 | struct htc_service_connect_req *connect_req) |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 280 | { |
| 281 | if (pdev->osdev->bus_type == QDF_BUS_TYPE_SDIO) { |
| 282 | /* |
| 283 | * TODO:Conditional disabling will be removed once firmware |
| 284 | * with reduced tx completion is pushed into release builds. |
| 285 | */ |
| 286 | if (!pdev->cfg.default_tx_comp_req) |
| 287 | connect_req->ConnectionFlags |= |
| 288 | HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; |
| 289 | } else { |
| 290 | connect_req->ConnectionFlags |= |
| 291 | HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | #if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) |
| 296 | |
| 297 | /** |
| 298 | * htt_dump_bundle_stats() - dump wlan stats |
| 299 | * @pdev: handle to the HTT instance |
| 300 | * |
| 301 | * Return: None |
| 302 | */ |
| 303 | void htt_dump_bundle_stats(htt_pdev_handle pdev) |
| 304 | { |
| 305 | htc_dump_bundle_stats(pdev->htc_pdev); |
| 306 | } |
| 307 | |
| 308 | /** |
| 309 | * htt_clear_bundle_stats() - clear wlan stats |
| 310 | * @pdev: handle to the HTT instance |
| 311 | * |
| 312 | * Return: None |
| 313 | */ |
| 314 | void htt_clear_bundle_stats(htt_pdev_handle pdev) |
| 315 | { |
| 316 | htc_clear_bundle_stats(pdev->htc_pdev); |
| 317 | } |
| 318 | #endif |
| 319 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 320 | #if defined(QCA_WIFI_3_0_ADRASTEA) |
| 321 | /** |
| 322 | * htt_htc_attach_all() - Connect to HTC service for HTT |
| 323 | * @pdev: pdev ptr |
| 324 | * |
| 325 | * Return: 0 for success or error code. |
| 326 | */ |
Nirav Shah | 23054cf | 2018-06-21 17:01:10 +0530 | [diff] [blame] | 327 | |
| 328 | #if defined(QCN7605_SUPPORT) && defined(IPA_OFFLOAD) |
| 329 | |
| 330 | /* In case of QCN7605 with IPA offload only 2 CE |
| 331 | * are used for RFS |
| 332 | */ |
| 333 | static int |
| 334 | htt_htc_attach_all(struct htt_pdev_t *pdev) |
| 335 | { |
| 336 | if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC)) |
| 337 | goto flush_endpoint; |
| 338 | |
| 339 | if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC)) |
| 340 | goto flush_endpoint; |
| 341 | |
| 342 | return 0; |
| 343 | |
| 344 | flush_endpoint: |
| 345 | htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL); |
| 346 | |
| 347 | return -EIO; |
| 348 | } |
| 349 | |
| 350 | #else |
| 351 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 352 | static int |
| 353 | htt_htc_attach_all(struct htt_pdev_t *pdev) |
| 354 | { |
| 355 | if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC)) |
Dustin Brown | 95ff87c | 2018-04-04 16:10:00 -0700 | [diff] [blame] | 356 | goto flush_endpoint; |
| 357 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 358 | if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC)) |
Dustin Brown | 95ff87c | 2018-04-04 16:10:00 -0700 | [diff] [blame] | 359 | goto flush_endpoint; |
| 360 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 361 | if (htt_htc_attach(pdev, HTT_DATA3_MSG_SVC)) |
Dustin Brown | 95ff87c | 2018-04-04 16:10:00 -0700 | [diff] [blame] | 362 | goto flush_endpoint; |
| 363 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 364 | return 0; |
Dustin Brown | 95ff87c | 2018-04-04 16:10:00 -0700 | [diff] [blame] | 365 | |
| 366 | flush_endpoint: |
| 367 | htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL); |
| 368 | |
| 369 | return -EIO; |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 370 | } |
Nirav Shah | 23054cf | 2018-06-21 17:01:10 +0530 | [diff] [blame] | 371 | |
| 372 | #endif |
| 373 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 374 | #else |
| 375 | /** |
| 376 | * htt_htc_attach_all() - Connect to HTC service for HTT |
| 377 | * @pdev: pdev ptr |
| 378 | * |
| 379 | * Return: 0 for success or error code. |
| 380 | */ |
| 381 | static int |
| 382 | htt_htc_attach_all(struct htt_pdev_t *pdev) |
| 383 | { |
| 384 | return htt_htc_attach(pdev, HTT_DATA_MSG_SVC); |
| 385 | } |
| 386 | #endif |
| 387 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 388 | /** |
| 389 | * htt_pdev_alloc() - allocate HTT pdev |
| 390 | * @txrx_pdev: txrx pdev |
| 391 | * @ctrl_pdev: cfg pdev |
| 392 | * @htc_pdev: HTC pdev |
| 393 | * @osdev: os device |
| 394 | * |
| 395 | * Return: HTT pdev handle |
| 396 | */ |
| 397 | htt_pdev_handle |
| 398 | htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev, |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 399 | struct cdp_cfg *ctrl_pdev, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 400 | HTC_HANDLE htc_pdev, qdf_device_t osdev) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 401 | { |
| 402 | struct htt_pdev_t *pdev; |
Houston Hoffman | 90e24d8 | 2016-04-27 17:15:44 -0700 | [diff] [blame] | 403 | struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF); |
| 404 | |
| 405 | if (!osc) |
| 406 | goto fail1; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 407 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 408 | pdev = qdf_mem_malloc(sizeof(*pdev)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 409 | if (!pdev) |
| 410 | goto fail1; |
| 411 | |
| 412 | pdev->osdev = osdev; |
| 413 | pdev->ctrl_pdev = ctrl_pdev; |
| 414 | pdev->txrx_pdev = txrx_pdev; |
| 415 | pdev->htc_pdev = htc_pdev; |
| 416 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 417 | pdev->htt_htc_pkt_freelist = NULL; |
| 418 | #ifdef ATH_11AC_TXCOMPACT |
| 419 | pdev->htt_htc_pkt_misclist = NULL; |
| 420 | #endif |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 421 | |
| 422 | /* for efficiency, store a local copy of the is_high_latency flag */ |
| 423 | pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev); |
Ajit Pal Singh | c31d101 | 2018-06-07 19:47:22 +0530 | [diff] [blame] | 424 | /* |
| 425 | * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND |
| 426 | * enabled or not. |
| 427 | */ |
| 428 | pdev->cfg.credit_update_enabled = |
| 429 | ol_cfg_is_credit_update_enabled(pdev->ctrl_pdev); |
| 430 | |
| 431 | pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() || |
| 432 | cds_is_packet_log_enabled(); |
| 433 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 434 | pdev->cfg.default_tx_comp_req = |
| 435 | !ol_cfg_tx_free_at_download(pdev->ctrl_pdev); |
| 436 | |
| 437 | pdev->cfg.is_full_reorder_offload = |
| 438 | ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev); |
Manjunathappa Prakash | 1bc742d | 2018-08-14 18:13:43 -0700 | [diff] [blame] | 439 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW, |
Rajeev Kumar | b4b7f5c | 2018-01-18 14:49:54 -0800 | [diff] [blame] | 440 | "full_reorder_offloaded %d", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 441 | (int)pdev->cfg.is_full_reorder_offload); |
| 442 | |
| 443 | pdev->cfg.ce_classify_enabled = |
| 444 | ol_cfg_is_ce_classify_enabled(ctrl_pdev); |
Manjunathappa Prakash | 1bc742d | 2018-08-14 18:13:43 -0700 | [diff] [blame] | 445 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW, |
Rajeev Kumar | b4b7f5c | 2018-01-18 14:49:54 -0800 | [diff] [blame] | 446 | "ce_classify %d", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 447 | pdev->cfg.ce_classify_enabled); |
| 448 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 449 | if (pdev->cfg.is_high_latency) { |
| 450 | qdf_atomic_init(&pdev->htt_tx_credit.target_delta); |
| 451 | qdf_atomic_init(&pdev->htt_tx_credit.bus_delta); |
| 452 | qdf_atomic_add(HTT_MAX_BUS_CREDIT, |
| 453 | &pdev->htt_tx_credit.bus_delta); |
| 454 | } |
| 455 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 456 | pdev->targetdef = htc_get_targetdef(htc_pdev); |
Himanshu Agarwal | 18d6b8c | 2017-03-01 16:41:04 +0530 | [diff] [blame] | 457 | #if defined(HELIUMPLUS) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 458 | HTT_SET_WIFI_IP(pdev, 2, 0); |
Himanshu Agarwal | 18d6b8c | 2017-03-01 16:41:04 +0530 | [diff] [blame] | 459 | #endif /* defined(HELIUMPLUS) */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 460 | |
Houston Hoffman | 90e24d8 | 2016-04-27 17:15:44 -0700 | [diff] [blame] | 461 | if (NO_HTT_NEEDED) |
| 462 | goto success; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 463 | /* |
| 464 | * Connect to HTC service. |
| 465 | * This has to be done before calling htt_rx_attach, |
| 466 | * since htt_rx_attach involves sending a rx ring configure |
| 467 | * message to the target. |
| 468 | */ |
wadesong | eda4091 | 2017-09-14 14:58:34 +0800 | [diff] [blame] | 469 | HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex); |
| 470 | HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev); |
| 471 | HTT_TX_MUTEX_INIT(&pdev->credit_mutex); |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 472 | if (htt_htc_attach_all(pdev)) |
| 473 | goto htt_htc_attach_fail; |
Houston Hoffman | 90e24d8 | 2016-04-27 17:15:44 -0700 | [diff] [blame] | 474 | if (hif_ce_fastpath_cb_register(osc, htt_t2h_msg_handler_fast, pdev)) |
Manjunathappa Prakash | 585178d | 2016-04-14 01:11:18 -0700 | [diff] [blame] | 475 | qdf_print("failed to register fastpath callback\n"); |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 476 | |
Houston Hoffman | 90e24d8 | 2016-04-27 17:15:44 -0700 | [diff] [blame] | 477 | success: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 478 | return pdev; |
| 479 | |
Govind Singh | 9785416 | 2017-03-20 11:39:37 +0530 | [diff] [blame] | 480 | htt_htc_attach_fail: |
wadesong | eda4091 | 2017-09-14 14:58:34 +0800 | [diff] [blame] | 481 | HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex); |
| 482 | HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex); |
| 483 | HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 484 | qdf_mem_free(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 485 | |
| 486 | fail1: |
| 487 | return NULL; |
| 488 | |
| 489 | } |
| 490 | |
| 491 | /** |
| 492 | * htt_attach() - Allocate and setup HTT TX/RX descriptors |
| 493 | * @pdev: pdev ptr |
| 494 | * @desc_pool_size: size of tx descriptors |
| 495 | * |
| 496 | * Return: 0 for success or error code. |
| 497 | */ |
| 498 | int |
| 499 | htt_attach(struct htt_pdev_t *pdev, int desc_pool_size) |
| 500 | { |
| 501 | int i; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 502 | int ret = 0; |
| 503 | |
Sravan Kumar Kairam | 4329c5f | 2018-03-02 11:26:29 +0530 | [diff] [blame] | 504 | pdev->is_ipa_uc_enabled = false; |
| 505 | if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) |
| 506 | pdev->is_ipa_uc_enabled = true; |
| 507 | |
Rakshith Suresh Patkar | 0f6375c | 2018-12-04 20:59:07 +0530 | [diff] [blame] | 508 | pdev->new_htt_format_enabled = false; |
| 509 | if (ol_cfg_is_htt_new_format_enabled(pdev->ctrl_pdev)) |
| 510 | pdev->new_htt_format_enabled = true; |
| 511 | |
| 512 | htc_enable_hdr_length_check(pdev->htc_pdev, |
| 513 | pdev->new_htt_format_enabled); |
| 514 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 515 | ret = htt_tx_attach(pdev, desc_pool_size); |
| 516 | if (ret) |
| 517 | goto fail1; |
| 518 | |
| 519 | ret = htt_rx_attach(pdev); |
| 520 | if (ret) |
| 521 | goto fail2; |
| 522 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 523 | /* pre-allocate some HTC_PACKET objects */ |
| 524 | for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { |
| 525 | struct htt_htc_pkt_union *pkt; |
Yun Park | 56e32d9 | 2017-04-04 13:58:17 -0700 | [diff] [blame] | 526 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 527 | pkt = qdf_mem_malloc(sizeof(*pkt)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 528 | if (!pkt) |
| 529 | break; |
| 530 | htt_htc_pkt_free(pdev, &pkt->u.pkt); |
| 531 | } |
| 532 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 533 | if (pdev->cfg.is_high_latency) { |
| 534 | /* |
| 535 | * HL - download the whole frame. |
| 536 | * Specify a download length greater than the max MSDU size, |
| 537 | * so the downloads will be limited by the actual frame sizes. |
| 538 | */ |
| 539 | pdev->download_len = 5000; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 540 | |
Ajit Pal Singh | c31d101 | 2018-06-07 19:47:22 +0530 | [diff] [blame] | 541 | if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev) && |
| 542 | !pdev->cfg.request_tx_comp) |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 543 | pdev->tx_send_complete_part2 = |
| 544 | ol_tx_download_done_hl_free; |
| 545 | else |
| 546 | pdev->tx_send_complete_part2 = |
| 547 | ol_tx_download_done_hl_retain; |
| 548 | |
| 549 | /* |
| 550 | * CHECK THIS LATER: does the HL HTT version of |
| 551 | * htt_rx_mpdu_desc_list_next |
| 552 | * (which is not currently implemented) present the |
| 553 | * adf_nbuf_data(rx_ind_msg) |
| 554 | * as the abstract rx descriptor? |
| 555 | * If not, the rx_fw_desc_offset initialization |
| 556 | * here will have to be adjusted accordingly. |
| 557 | * NOTE: for HL, because fw rx desc is in ind msg, |
| 558 | * not in rx desc, so the |
| 559 | * offset should be negtive value |
| 560 | */ |
| 561 | pdev->rx_fw_desc_offset = |
| 562 | HTT_ENDIAN_BYTE_IDX_SWAP( |
| 563 | HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET |
| 564 | - HTT_RX_IND_HL_BYTES); |
| 565 | |
| 566 | htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl; |
Manjunathappa Prakash | fff753c | 2016-09-01 19:34:56 -0700 | [diff] [blame] | 567 | htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_hl; |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 568 | |
| 569 | /* initialize the txrx credit count */ |
| 570 | ol_tx_target_credit_update( |
| 571 | pdev->txrx_pdev, ol_cfg_target_tx_credit( |
| 572 | pdev->ctrl_pdev)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 573 | } else { |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 574 | enum wlan_frm_fmt frm_type; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 575 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 576 | /* |
| 577 | * LL - download just the initial portion of the frame. |
| 578 | * Download enough to cover the encapsulation headers checked |
| 579 | * by the target's tx classification descriptor engine. |
| 580 | * |
| 581 | * For LL, the FW rx desc directly referenced at its location |
| 582 | * inside the rx indication message. |
| 583 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 584 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 585 | /* account for the 802.3 or 802.11 header */ |
| 586 | frm_type = ol_cfg_frame_type(pdev->ctrl_pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 587 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 588 | if (frm_type == wlan_frm_fmt_native_wifi) { |
| 589 | pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI; |
| 590 | } else if (frm_type == wlan_frm_fmt_802_3) { |
| 591 | pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET; |
| 592 | } else { |
Poddar, Siddarth | 1626447 | 2017-03-14 19:39:43 +0530 | [diff] [blame] | 593 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 594 | "Unexpected frame type spec: %d", frm_type); |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 595 | HTT_ASSERT0(0); |
| 596 | } |
| 597 | |
| 598 | /* |
| 599 | * Account for the optional L2 / ethernet header fields: |
| 600 | * 802.1Q, LLC/SNAP |
| 601 | */ |
| 602 | pdev->download_len += |
| 603 | HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP; |
| 604 | |
| 605 | /* |
| 606 | * Account for the portion of the L3 (IP) payload that the |
| 607 | * target needs for its tx classification. |
| 608 | */ |
| 609 | pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev); |
| 610 | |
| 611 | /* |
| 612 | * Account for the HTT tx descriptor, including the |
| 613 | * HTC header + alignment padding. |
| 614 | */ |
| 615 | pdev->download_len += sizeof(struct htt_host_tx_desc_t); |
| 616 | |
| 617 | /* |
| 618 | * The TXCOMPACT htt_tx_sched function uses pdev->download_len |
| 619 | * to apply for all requeued tx frames. Thus, |
| 620 | * pdev->download_len has to be the largest download length of |
| 621 | * any tx frame that will be downloaded. |
| 622 | * This maximum download length is for management tx frames, |
| 623 | * which have an 802.11 header. |
| 624 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 625 | #ifdef ATH_11AC_TXCOMPACT |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 626 | pdev->download_len = sizeof(struct htt_host_tx_desc_t) |
| 627 | + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */ |
| 628 | + HTT_TX_HDR_SIZE_802_1Q |
| 629 | + HTT_TX_HDR_SIZE_LLC_SNAP |
| 630 | + ol_cfg_tx_download_size(pdev->ctrl_pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 631 | #endif |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 632 | pdev->tx_send_complete_part2 = ol_tx_download_done_ll; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 633 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 634 | /* |
| 635 | * For LL, the FW rx desc is alongside the HW rx desc fields in |
| 636 | * the htt_host_rx_desc_base struct/. |
| 637 | */ |
| 638 | pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 639 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 640 | htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll; |
Manjunathappa Prakash | fff753c | 2016-09-01 19:34:56 -0700 | [diff] [blame] | 641 | htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_ll; |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 642 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 643 | |
| 644 | return 0; |
| 645 | |
| 646 | fail2: |
| 647 | htt_tx_detach(pdev); |
| 648 | |
| 649 | fail1: |
| 650 | return ret; |
| 651 | } |
| 652 | |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 653 | QDF_STATUS htt_attach_target(htt_pdev_handle pdev) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 654 | { |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 655 | QDF_STATUS status; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 656 | |
| 657 | status = htt_h2t_ver_req_msg(pdev); |
Orhan K AKYILDIZ | a4f58e9 | 2017-06-21 13:36:10 -0700 | [diff] [blame] | 658 | if (status != QDF_STATUS_SUCCESS) { |
| 659 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 660 | "%s:%d: could not send h2t_ver_req msg", |
| 661 | __func__, __LINE__); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 662 | return status; |
Orhan K AKYILDIZ | a4f58e9 | 2017-06-21 13:36:10 -0700 | [diff] [blame] | 663 | } |
Himanshu Agarwal | 18d6b8c | 2017-03-01 16:41:04 +0530 | [diff] [blame] | 664 | #if defined(HELIUMPLUS) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 665 | /* |
| 666 | * Send the frag_desc info to target. |
| 667 | */ |
Orhan K AKYILDIZ | a4f58e9 | 2017-06-21 13:36:10 -0700 | [diff] [blame] | 668 | status = htt_h2t_frag_desc_bank_cfg_msg(pdev); |
| 669 | if (status != QDF_STATUS_SUCCESS) { |
| 670 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 671 | "%s:%d: could not send h2t_frag_desc_bank_cfg msg", |
| 672 | __func__, __LINE__); |
| 673 | return status; |
| 674 | } |
Himanshu Agarwal | 18d6b8c | 2017-03-01 16:41:04 +0530 | [diff] [blame] | 675 | #endif /* defined(HELIUMPLUS) */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 676 | |
| 677 | |
| 678 | /* |
| 679 | * If applicable, send the rx ring config message to the target. |
| 680 | * The host could wait for the HTT version number confirmation message |
| 681 | * from the target before sending any further HTT messages, but it's |
| 682 | * reasonable to assume that the host and target HTT version numbers |
| 683 | * match, and proceed immediately with the remaining configuration |
| 684 | * handshaking. |
| 685 | */ |
| 686 | |
Manjunathappa Prakash | fff753c | 2016-09-01 19:34:56 -0700 | [diff] [blame] | 687 | status = htt_h2t_rx_ring_rfs_cfg_msg(pdev); |
Orhan K AKYILDIZ | a4f58e9 | 2017-06-21 13:36:10 -0700 | [diff] [blame] | 688 | if (status != QDF_STATUS_SUCCESS) { |
| 689 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 690 | "%s:%d: could not send h2t_rx_ring_rfs_cfg msg", |
| 691 | __func__, __LINE__); |
| 692 | return status; |
| 693 | } |
| 694 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 695 | status = htt_h2t_rx_ring_cfg_msg(pdev); |
Orhan K AKYILDIZ | a4f58e9 | 2017-06-21 13:36:10 -0700 | [diff] [blame] | 696 | if (status != QDF_STATUS_SUCCESS) { |
| 697 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 698 | "%s:%d: could not send h2t_rx_ring_cfg msg", |
| 699 | __func__, __LINE__); |
| 700 | return status; |
| 701 | } |
| 702 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 703 | status = HTT_IPA_CONFIG(pdev, status); |
Orhan K AKYILDIZ | a4f58e9 | 2017-06-21 13:36:10 -0700 | [diff] [blame] | 704 | if (status != QDF_STATUS_SUCCESS) { |
| 705 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 706 | "%s:%d: could not send h2t_ipa_uc_rsc_cfg msg", |
| 707 | __func__, __LINE__); |
| 708 | return status; |
| 709 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 710 | |
| 711 | return status; |
| 712 | } |
| 713 | |
| 714 | void htt_detach(htt_pdev_handle pdev) |
| 715 | { |
| 716 | htt_rx_detach(pdev); |
| 717 | htt_tx_detach(pdev); |
| 718 | htt_htc_pkt_pool_free(pdev); |
| 719 | #ifdef ATH_11AC_TXCOMPACT |
| 720 | htt_htc_misc_pkt_pool_free(pdev); |
| 721 | #endif |
Chris Guo | 9e293a9 | 2017-08-08 16:24:47 +0800 | [diff] [blame] | 722 | HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 723 | HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex); |
| 724 | HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | /** |
| 728 | * htt_pdev_free() - Free HTT pdev |
| 729 | * @pdev: htt pdev |
| 730 | * |
| 731 | * Return: none |
| 732 | */ |
| 733 | void htt_pdev_free(htt_pdev_handle pdev) |
| 734 | { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 735 | qdf_mem_free(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 736 | } |
| 737 | |
| 738 | void htt_detach_target(htt_pdev_handle pdev) |
| 739 | { |
| 740 | } |
| 741 | |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 742 | static inline |
| 743 | int htt_update_endpoint(struct htt_pdev_t *pdev, |
| 744 | uint16_t service_id, HTC_ENDPOINT_ID ep) |
| 745 | { |
| 746 | struct hif_opaque_softc *hif_ctx; |
| 747 | uint8_t ul = 0xff, dl = 0xff; |
| 748 | int ul_polled, dl_polled; |
| 749 | int tx_service = 0; |
| 750 | int rc = 0; |
| 751 | |
| 752 | hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); |
Jeff Johnson | 6795c3a | 2019-03-18 13:43:04 -0700 | [diff] [blame] | 753 | if (qdf_unlikely(!hif_ctx)) { |
| 754 | QDF_ASSERT(hif_ctx); |
Poddar, Siddarth | 1626447 | 2017-03-14 19:39:43 +0530 | [diff] [blame] | 755 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 756 | "%s:%d: assuming non-tx service.", |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 757 | __func__, __LINE__); |
| 758 | } else { |
| 759 | ul = dl = 0xff; |
| 760 | if (QDF_STATUS_SUCCESS != |
| 761 | hif_map_service_to_pipe(hif_ctx, service_id, |
| 762 | &ul, &dl, |
| 763 | &ul_polled, &dl_polled)) |
Poddar, Siddarth | 1626447 | 2017-03-14 19:39:43 +0530 | [diff] [blame] | 764 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, |
| 765 | "%s:%d: assuming non-tx srv.", |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 766 | __func__, __LINE__); |
| 767 | else |
| 768 | tx_service = (ul != 0xff); |
| 769 | } |
| 770 | if (tx_service) { |
| 771 | /* currently we have only one OUT htt tx service */ |
| 772 | QDF_BUG(service_id == HTT_DATA_MSG_SVC); |
| 773 | |
| 774 | pdev->htc_tx_endpoint = ep; |
Nirav Shah | 24e697f | 2016-04-22 10:49:45 +0530 | [diff] [blame] | 775 | hif_save_htc_htt_config_endpoint(hif_ctx, ep); |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 776 | rc = 1; |
| 777 | } |
Nirav Shah | 24e697f | 2016-04-22 10:49:45 +0530 | [diff] [blame] | 778 | return rc; |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 779 | } |
| 780 | |
| 781 | int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 782 | { |
Manikandan Mohan | 83c939c | 2017-04-13 20:23:07 -0700 | [diff] [blame] | 783 | struct htc_service_connect_req connect; |
| 784 | struct htc_service_connect_resp response; |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 785 | QDF_STATUS status; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 786 | |
hangtian | 127c953 | 2019-01-12 13:29:07 +0800 | [diff] [blame] | 787 | qdf_mem_zero(&connect, sizeof(connect)); |
| 788 | qdf_mem_zero(&response, sizeof(response)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 789 | |
| 790 | connect.pMetaData = NULL; |
| 791 | connect.MetaDataLength = 0; |
| 792 | connect.EpCallbacks.pContext = pdev; |
| 793 | connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete; |
| 794 | connect.EpCallbacks.EpTxCompleteMultiple = NULL; |
| 795 | connect.EpCallbacks.EpRecv = htt_t2h_msg_handler; |
Houston Hoffman | 5be9bac | 2015-10-20 17:04:42 -0700 | [diff] [blame] | 796 | connect.EpCallbacks.ep_resume_tx_queue = htt_tx_resume_handler; |
Visweswara Tanuku | 2e839e5 | 2019-06-11 10:16:30 +0530 | [diff] [blame^] | 797 | connect.EpCallbacks.ep_padding_credit_update = |
| 798 | htt_tx_padding_credit_update_handler; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 799 | |
| 800 | /* rx buffers currently are provided by HIF, not by EpRecvRefill */ |
| 801 | connect.EpCallbacks.EpRecvRefill = NULL; |
| 802 | connect.EpCallbacks.RecvRefillWaterMark = 1; |
| 803 | /* N/A, fill is done by HIF */ |
| 804 | |
| 805 | connect.EpCallbacks.EpSendFull = htt_h2t_full; |
| 806 | /* |
| 807 | * Specify how deep to let a queue get before htc_send_pkt will |
| 808 | * call the EpSendFull function due to excessive send queue depth. |
| 809 | */ |
| 810 | connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH; |
| 811 | |
| 812 | /* disable flow control for HTT data message service */ |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 813 | htt_htc_credit_flow_disable(pdev, &connect); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 814 | |
| 815 | /* connect to control service */ |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 816 | connect.service_id = service_id; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 817 | |
| 818 | status = htc_connect_service(pdev->htc_pdev, &connect, &response); |
| 819 | |
Rakesh Pillai | 7fb7a1f | 2017-06-23 14:46:36 +0530 | [diff] [blame] | 820 | if (status != QDF_STATUS_SUCCESS) { |
Houston Hoffman | fa60ff5 | 2017-08-22 18:50:14 -0700 | [diff] [blame] | 821 | if (cds_is_fw_down()) |
| 822 | return -EIO; |
Nachiket Kukade | 8003d25 | 2017-03-30 15:55:58 +0530 | [diff] [blame] | 823 | |
Houston Hoffman | fa60ff5 | 2017-08-22 18:50:14 -0700 | [diff] [blame] | 824 | if (status == QDF_STATUS_E_NOMEM || |
| 825 | cds_is_self_recovery_enabled()) |
| 826 | return qdf_status_to_os_return(status); |
| 827 | |
| 828 | QDF_BUG(0); |
Nachiket Kukade | 8003d25 | 2017-03-30 15:55:58 +0530 | [diff] [blame] | 829 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 830 | |
Houston Hoffman | 23e76f9 | 2016-02-26 12:19:11 -0800 | [diff] [blame] | 831 | htt_update_endpoint(pdev, service_id, response.Endpoint); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 832 | |
Siddarth Poddar | 1df1cd8 | 2016-04-27 17:32:21 +0530 | [diff] [blame] | 833 | /* Start TX HTT2 service if the target support it. */ |
| 834 | htt_htc_tx_htt2_service_start(pdev, &connect, &response); |
| 835 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 836 | return 0; /* success */ |
| 837 | } |
| 838 | |
Poddar, Siddarth | b904759 | 2017-10-05 15:48:28 +0530 | [diff] [blame] | 839 | void htt_log_rx_ring_info(htt_pdev_handle pdev) |
| 840 | { |
| 841 | if (!pdev) { |
| 842 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 843 | "%s: htt pdev is NULL", __func__); |
| 844 | return; |
| 845 | } |
| 846 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, |
| 847 | "%s: Data Stall Detected with reason 4 (=FW_RX_REFILL_FAILED)." |
| 848 | "src htt rx ring: space for %d elements, filled with %d buffers, buffers in the ring %d, refill debt %d", |
| 849 | __func__, pdev->rx_ring.size, pdev->rx_ring.fill_level, |
| 850 | pdev->rx_ring.fill_cnt, |
| 851 | qdf_atomic_read(&pdev->rx_ring.refill_debt)); |
| 852 | } |
| 853 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 854 | #if HTT_DEBUG_LEVEL > 5 |
| 855 | void htt_display(htt_pdev_handle pdev, int indent) |
| 856 | { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 857 | qdf_print("%*s%s:\n", indent, " ", "HTT"); |
| 858 | qdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 859 | indent + 4, " ", |
| 860 | pdev->tx_descs.pool_elems, |
| 861 | pdev->tx_descs.size, pdev->tx_descs.alloc_cnt); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 862 | qdf_print("%*srx ring: space for %d elems, filled with %d buffers\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 863 | indent + 4, " ", |
| 864 | pdev->rx_ring.size, pdev->rx_ring.fill_level); |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 865 | qdf_print("%*sat %pK (%llx paddr)\n", indent + 8, " ", |
Orhan K AKYILDIZ | 5a36de3 | 2016-08-06 19:43:33 -0700 | [diff] [blame] | 866 | pdev->rx_ring.buf.paddrs_ring, |
| 867 | (unsigned long long)pdev->rx_ring.base_paddr); |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 868 | qdf_print("%*snetbuf ring @ %pK\n", indent + 8, " ", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 869 | pdev->rx_ring.buf.netbufs_ring); |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 870 | qdf_print("%*sFW_IDX shadow register: vaddr = %pK, paddr = %llx\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 871 | indent + 8, " ", |
Orhan K AKYILDIZ | 5a36de3 | 2016-08-06 19:43:33 -0700 | [diff] [blame] | 872 | pdev->rx_ring.alloc_idx.vaddr, |
| 873 | (unsigned long long)pdev->rx_ring.alloc_idx.paddr); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 874 | qdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 875 | indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr, |
| 876 | pdev->rx_ring.sw_rd_idx.msdu_desc, |
| 877 | pdev->rx_ring.sw_rd_idx.msdu_payld); |
| 878 | } |
| 879 | #endif |
| 880 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 881 | #ifdef IPA_OFFLOAD |
Leo Chang | 8e07361 | 2015-11-13 10:55:34 -0800 | [diff] [blame] | 882 | /** |
| 883 | * htt_ipa_uc_attach() - Allocate UC data path resources |
| 884 | * @pdev: handle to the HTT instance |
| 885 | * |
| 886 | * Return: 0 success |
| 887 | * none 0 fail |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 888 | */ |
| 889 | int htt_ipa_uc_attach(struct htt_pdev_t *pdev) |
| 890 | { |
| 891 | int error; |
| 892 | |
Rajeev Kumar | 3887f9b | 2018-01-10 11:24:01 -0800 | [diff] [blame] | 893 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter", |
Yun Park | 199c2ed | 2017-10-02 11:24:22 -0700 | [diff] [blame] | 894 | __func__); |
| 895 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 896 | /* TX resource attach */ |
| 897 | error = htt_tx_ipa_uc_attach( |
| 898 | pdev, |
| 899 | ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev), |
| 900 | ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev), |
| 901 | ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev)); |
| 902 | if (error) { |
Poddar, Siddarth | 1626447 | 2017-03-14 19:39:43 +0530 | [diff] [blame] | 903 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 904 | "HTT IPA UC TX attach fail code %d", error); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 905 | HTT_ASSERT0(0); |
| 906 | return error; |
| 907 | } |
| 908 | |
| 909 | /* RX resource attach */ |
| 910 | error = htt_rx_ipa_uc_attach( |
Leo Chang | bc24e61 | 2016-07-05 17:19:55 -0700 | [diff] [blame] | 911 | pdev, qdf_get_pwr2(pdev->rx_ring.fill_level)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 912 | if (error) { |
Poddar, Siddarth | 1626447 | 2017-03-14 19:39:43 +0530 | [diff] [blame] | 913 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 914 | "HTT IPA UC RX attach fail code %d", error); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 915 | htt_tx_ipa_uc_detach(pdev); |
| 916 | HTT_ASSERT0(0); |
| 917 | return error; |
| 918 | } |
| 919 | |
Rajeev Kumar | 3887f9b | 2018-01-10 11:24:01 -0800 | [diff] [blame] | 920 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit", |
Yun Park | 199c2ed | 2017-10-02 11:24:22 -0700 | [diff] [blame] | 921 | __func__); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 922 | return 0; /* success */ |
| 923 | } |
| 924 | |
Leo Chang | 8e07361 | 2015-11-13 10:55:34 -0800 | [diff] [blame] | 925 | /** |
| 926 | * htt_ipa_uc_attach() - Remove UC data path resources |
| 927 | * @pdev: handle to the HTT instance |
| 928 | * |
| 929 | * Return: None |
| 930 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 931 | void htt_ipa_uc_detach(struct htt_pdev_t *pdev) |
| 932 | { |
Yun Park | e423980 | 2018-01-09 11:01:40 -0800 | [diff] [blame] | 933 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter", |
Yun Park | 199c2ed | 2017-10-02 11:24:22 -0700 | [diff] [blame] | 934 | __func__); |
| 935 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 936 | /* TX IPA micro controller detach */ |
| 937 | htt_tx_ipa_uc_detach(pdev); |
| 938 | |
| 939 | /* RX IPA micro controller detach */ |
| 940 | htt_rx_ipa_uc_detach(pdev); |
Yun Park | 199c2ed | 2017-10-02 11:24:22 -0700 | [diff] [blame] | 941 | |
Yun Park | e423980 | 2018-01-09 11:01:40 -0800 | [diff] [blame] | 942 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit", |
Yun Park | 199c2ed | 2017-10-02 11:24:22 -0700 | [diff] [blame] | 943 | __func__); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 944 | } |
| 945 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 946 | int |
| 947 | htt_ipa_uc_get_resource(htt_pdev_handle pdev, |
Sravan Kumar Kairam | b664b6c | 2018-02-27 17:43:10 +0530 | [diff] [blame] | 948 | qdf_shared_mem_t **ce_sr, |
| 949 | qdf_shared_mem_t **tx_comp_ring, |
| 950 | qdf_shared_mem_t **rx_rdy_ring, |
| 951 | qdf_shared_mem_t **rx2_rdy_ring, |
| 952 | qdf_shared_mem_t **rx_proc_done_idx, |
| 953 | qdf_shared_mem_t **rx2_proc_done_idx, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 954 | uint32_t *ce_sr_ring_size, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 955 | qdf_dma_addr_t *ce_reg_paddr, |
Sravan Kumar Kairam | b664b6c | 2018-02-27 17:43:10 +0530 | [diff] [blame] | 956 | uint32_t *tx_num_alloc_buffer) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 957 | { |
| 958 | /* Release allocated resource to client */ |
Sravan Kumar Kairam | b664b6c | 2018-02-27 17:43:10 +0530 | [diff] [blame] | 959 | *tx_comp_ring = pdev->ipa_uc_tx_rsc.tx_comp_ring; |
| 960 | *rx_rdy_ring = pdev->ipa_uc_rx_rsc.rx_ind_ring; |
| 961 | *rx2_rdy_ring = pdev->ipa_uc_rx_rsc.rx2_ind_ring; |
| 962 | *rx_proc_done_idx = pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx; |
| 963 | *rx2_proc_done_idx = pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx; |
| 964 | *tx_num_alloc_buffer = (uint32_t)pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 965 | |
| 966 | /* Get copy engine, bus resource */ |
Sravan Kumar Kairam | b664b6c | 2018-02-27 17:43:10 +0530 | [diff] [blame] | 967 | htc_ipa_get_ce_resource(pdev->htc_pdev, ce_sr, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 968 | ce_sr_ring_size, ce_reg_paddr); |
| 969 | |
| 970 | return 0; |
| 971 | } |
| 972 | |
Leo Chang | 8e07361 | 2015-11-13 10:55:34 -0800 | [diff] [blame] | 973 | /** |
| 974 | * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address |
| 975 | * @pdev: handle to the HTT instance |
| 976 | * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address |
| 977 | * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address |
| 978 | * |
| 979 | * Return: 0 success |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 980 | */ |
| 981 | int |
| 982 | htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 983 | qdf_dma_addr_t ipa_uc_tx_doorbell_paddr, |
| 984 | qdf_dma_addr_t ipa_uc_rx_doorbell_paddr) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 985 | { |
| 986 | pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr; |
| 987 | pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr; |
| 988 | return 0; |
| 989 | } |
| 990 | #endif /* IPA_OFFLOAD */ |
Himanshu Agarwal | 19141bb | 2016-07-20 20:15:48 +0530 | [diff] [blame] | 991 | |
| 992 | /** |
| 993 | * htt_mark_first_wakeup_packet() - set flag to indicate that |
| 994 | * fw is compatible for marking first packet after wow wakeup |
| 995 | * @pdev: pointer to htt pdev |
| 996 | * @value: 1 for enabled/ 0 for disabled |
| 997 | * |
| 998 | * Return: None |
| 999 | */ |
| 1000 | void htt_mark_first_wakeup_packet(htt_pdev_handle pdev, |
| 1001 | uint8_t value) |
| 1002 | { |
| 1003 | if (!pdev) { |
Poddar, Siddarth | 1626447 | 2017-03-14 19:39:43 +0530 | [diff] [blame] | 1004 | QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR, |
| 1005 | "%s: htt pdev is NULL", __func__); |
Himanshu Agarwal | 19141bb | 2016-07-20 20:15:48 +0530 | [diff] [blame] | 1006 | return; |
| 1007 | } |
| 1008 | |
| 1009 | pdev->cfg.is_first_wakeup_packet = value; |
| 1010 | } |
| 1011 | |