blob: 8481221dd933f5c2431193718f52ba061bb1ef53 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
hangtian127c9532019-01-12 13:29:07 +08002 * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * @file htt.c
21 * @brief Provide functions to create+init and destroy a HTT instance.
22 * @details
23 * This file contains functions for creating a HTT instance; initializing
24 * the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
25 * connecting the HTT service with HTC; and deleting a HTT instance.
26 */
27
Anurag Chouhan600c3a02016-03-01 10:33:54 +053028#include <qdf_mem.h> /* qdf_mem_malloc */
Anurag Chouhan6d760662016-02-20 16:05:43 +053029#include <qdf_types.h> /* qdf_device_t, qdf_print */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080030
31#include <htt.h> /* htt_tx_msdu_desc_t */
32#include <ol_cfg.h>
33#include <ol_txrx_htt_api.h> /* ol_tx_dowload_done_ll, etc. */
34#include <ol_htt_api.h>
35
36#include <htt_internal.h>
Houston Hoffman5be9bac2015-10-20 17:04:42 -070037#include <ol_htt_tx_api.h>
Houston Hoffman23e76f92016-02-26 12:19:11 -080038#include <cds_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include "hif.h"
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080040#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041
42#define HTT_HTC_PKT_POOL_INIT_SIZE 100 /* enough for a large A-MPDU */
43
Siddarth Poddar1df1cd82016-04-27 17:32:21 +053044QDF_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
Manjunathappa Prakashfff753c2016-09-01 19:34:56 -070045QDF_STATUS(*htt_h2t_rx_ring_rfs_cfg_msg)(struct htt_pdev_t *pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046
47#ifdef IPA_OFFLOAD
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +053048static QDF_STATUS htt_ipa_config(htt_pdev_handle pdev, QDF_STATUS status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049{
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +053050 if ((QDF_STATUS_SUCCESS == status) &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051 ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
52 status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
53 return status;
54}
55
56#define HTT_IPA_CONFIG htt_ipa_config
57#else
58#define HTT_IPA_CONFIG(pdev, status) status /* no-op */
59#endif /* IPA_OFFLOAD */
60
61struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
62{
63 struct htt_htc_pkt_union *pkt = NULL;
64
65 HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
66 if (pdev->htt_htc_pkt_freelist) {
67 pkt = pdev->htt_htc_pkt_freelist;
68 pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
69 }
70 HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
71
Jeff Johnson6795c3a2019-03-18 13:43:04 -070072 if (!pkt)
Anurag Chouhan600c3a02016-03-01 10:33:54 +053073 pkt = qdf_mem_malloc(sizeof(*pkt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080074
Nirav Shah7c8c1712018-09-10 16:01:31 +053075 if (!pkt)
Himanshu Agarwal289e40b2017-03-08 21:06:20 +053076 return NULL;
Nirav Shah7c8c1712018-09-10 16:01:31 +053077
Himanshu Agarwal289e40b2017-03-08 21:06:20 +053078 htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079 return &pkt->u.pkt; /* not actually a dereference */
80}
81
82void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
83{
84 struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
85
Himanshu Agarwal289e40b2017-03-08 21:06:20 +053086 if (!u_pkt) {
Nirav Shah7c8c1712018-09-10 16:01:31 +053087 qdf_print("HTC packet is NULL");
Himanshu Agarwal289e40b2017-03-08 21:06:20 +053088 return;
89 }
90
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080091 HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
Himanshu Agarwal289e40b2017-03-08 21:06:20 +053092 htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080093 u_pkt->u.next = pdev->htt_htc_pkt_freelist;
94 pdev->htt_htc_pkt_freelist = u_pkt;
95 HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
96}
97
98void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
99{
100 struct htt_htc_pkt_union *pkt, *next;
Yun Park56e32d92017-04-04 13:58:17 -0700101
wadesong43468c42017-10-28 07:15:51 +0800102 HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800103 pkt = pdev->htt_htc_pkt_freelist;
wadesong43468c42017-10-28 07:15:51 +0800104 pdev->htt_htc_pkt_freelist = NULL;
105 HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
106
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800107 while (pkt) {
108 next = pkt->u.next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530109 qdf_mem_free(pkt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800110 pkt = next;
111 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800112}
113
114#ifdef ATH_11AC_TXCOMPACT
Yun Parkeea1c9c2017-03-08 11:26:37 -0800115
116void
117htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level)
118{
119 struct htt_htc_pkt_union *pkt, *next, *prev = NULL;
120 int i = 0;
121 qdf_nbuf_t netbuf;
122
123 HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
124 pkt = pdev->htt_htc_pkt_misclist;
125 while (pkt) {
126 next = pkt->u.next;
127 /* trim the out grown list*/
128 if (++i > level) {
Yun Park56e32d92017-04-04 13:58:17 -0700129 netbuf =
130 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
Yun Parkeea1c9c2017-03-08 11:26:37 -0800131 qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
132 qdf_nbuf_free(netbuf);
133 qdf_mem_free(pkt);
134 pkt = NULL;
135 if (prev)
136 prev->u.next = NULL;
137 }
138 prev = pkt;
139 pkt = next;
140 }
141 HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
142}
143
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
145{
146 struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
Houston Hoffman13f4be52017-03-14 16:17:03 -0700147 int misclist_trim_level = htc_get_tx_queue_depth(pdev->htc_pdev,
148 pkt->htc_pkt.Endpoint)
149 + HTT_HTC_PKT_MISCLIST_SIZE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800150
151 HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
152 if (pdev->htt_htc_pkt_misclist) {
153 u_pkt->u.next = pdev->htt_htc_pkt_misclist;
154 pdev->htt_htc_pkt_misclist = u_pkt;
155 } else {
156 pdev->htt_htc_pkt_misclist = u_pkt;
157 }
158 HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
Yun Parkeea1c9c2017-03-08 11:26:37 -0800159
Houston Hoffman13f4be52017-03-14 16:17:03 -0700160 /* only ce pipe size + tx_queue_depth could possibly be in use
161 * free older packets in the msiclist
162 */
163 htt_htc_misc_pkt_list_trim(pdev, misclist_trim_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164}
165
166void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
167{
168 struct htt_htc_pkt_union *pkt, *next;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530169 qdf_nbuf_t netbuf;
Yun Park56e32d92017-04-04 13:58:17 -0700170
wadesong43468c42017-10-28 07:15:51 +0800171 HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800172 pkt = pdev->htt_htc_pkt_misclist;
wadesong43468c42017-10-28 07:15:51 +0800173 pdev->htt_htc_pkt_misclist = NULL;
174 HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175
176 while (pkt) {
177 next = pkt->u.next;
Himanshu Agarwal289e40b2017-03-08 21:06:20 +0530178 if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
179 HTC_PACKET_MAGIC_COOKIE) {
wadesong43468c42017-10-28 07:15:51 +0800180 QDF_ASSERT(0);
Himanshu Agarwal289e40b2017-03-08 21:06:20 +0530181 pkt = next;
182 continue;
183 }
184
Nirav Shahcbc6d722016-03-01 16:24:53 +0530185 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
186 qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
187 qdf_nbuf_free(netbuf);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530188 qdf_mem_free(pkt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800189 pkt = next;
190 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191}
192#endif
193
Houston Hoffman90e24d82016-04-27 17:15:44 -0700194
195/* AR6004 don't need HTT layer. */
196#ifdef AR6004_HW
197#define NO_HTT_NEEDED true
198#else
199#define NO_HTT_NEEDED false
200#endif
201
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530202#if defined(QCA_TX_HTT2_SUPPORT) && defined(CONFIG_HL_SUPPORT)
203
204/**
205 * htt_htc_tx_htt2_service_start() - Start TX HTT2 service
206 *
207 * @pdev: pointer to htt device.
208 * @connect_req: pointer to service connection request information
209 * @connect_resp: pointer to service connection response information
210 *
211 *
212 * Return: None
213 */
214static void
215htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
Manikandan Mohan83c939c2017-04-13 20:23:07 -0700216 struct htc_service_connect_req *connect_req,
217 struct htc_service_connect_resp *connect_resp)
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530218{
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530219 QDF_STATUS status;
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530220
hangtian127c9532019-01-12 13:29:07 +0800221 qdf_mem_zero(connect_req, sizeof(struct htc_service_connect_req));
222 qdf_mem_zero(connect_resp, sizeof(struct htc_service_connect_resp));
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530223
224 /* The same as HTT service but no RX. */
225 connect_req->EpCallbacks.pContext = pdev;
226 connect_req->EpCallbacks.EpTxComplete = htt_h2t_send_complete;
227 connect_req->EpCallbacks.EpSendFull = htt_h2t_full;
228 connect_req->MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
229 /* Should NOT support credit flow control. */
230 connect_req->ConnectionFlags |=
231 HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
232 /* Enable HTC schedule mechanism for TX HTT2 service. */
233 connect_req->ConnectionFlags |= HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE;
234
Mohit Khannac6f03982016-05-15 20:37:55 -0700235 connect_req->service_id = HTT_DATA2_MSG_SVC;
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530236
237 status = htc_connect_service(pdev->htc_pdev, connect_req, connect_resp);
238
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530239 if (status != QDF_STATUS_SUCCESS) {
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530240 pdev->htc_tx_htt2_endpoint = ENDPOINT_UNUSED;
241 pdev->htc_tx_htt2_max_size = 0;
242 } else {
243 pdev->htc_tx_htt2_endpoint = connect_resp->Endpoint;
244 pdev->htc_tx_htt2_max_size = HTC_TX_HTT2_MAX_SIZE;
245 }
246
247 qdf_print("TX HTT %s, ep %d size %d\n",
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530248 (status == QDF_STATUS_SUCCESS ? "ON" : "OFF"),
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530249 pdev->htc_tx_htt2_endpoint,
250 pdev->htc_tx_htt2_max_size);
251}
252#else
253
254static inline void
255htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
Manikandan Mohan83c939c2017-04-13 20:23:07 -0700256 struct htc_service_connect_req *connect_req,
257 struct htc_service_connect_resp *connect_resp)
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530258{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530259}
260#endif
261
262/**
263 * htt_htc_credit_flow_disable() - disable flow control for
264 * HTT data message service
265 *
266 * @pdev: pointer to htt device.
267 * @connect_req: pointer to service connection request information
268 *
269 * HTC Credit mechanism is disabled based on
270 * default_tx_comp_req as throughput will be lower
271 * if we disable htc credit mechanism with default_tx_comp_req
272 * set since txrx download packet will be limited by ota
273 * completion.
274 *
275 * Return: None
276 */
277static
278void htt_htc_credit_flow_disable(struct htt_pdev_t *pdev,
Manikandan Mohan83c939c2017-04-13 20:23:07 -0700279 struct htc_service_connect_req *connect_req)
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530280{
281 if (pdev->osdev->bus_type == QDF_BUS_TYPE_SDIO) {
282 /*
283 * TODO:Conditional disabling will be removed once firmware
284 * with reduced tx completion is pushed into release builds.
285 */
286 if (!pdev->cfg.default_tx_comp_req)
287 connect_req->ConnectionFlags |=
288 HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
289 } else {
290 connect_req->ConnectionFlags |=
291 HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
292 }
293}
294
295#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
296
297/**
298 * htt_dump_bundle_stats() - dump wlan stats
299 * @pdev: handle to the HTT instance
300 *
301 * Return: None
302 */
303void htt_dump_bundle_stats(htt_pdev_handle pdev)
304{
305 htc_dump_bundle_stats(pdev->htc_pdev);
306}
307
308/**
309 * htt_clear_bundle_stats() - clear wlan stats
310 * @pdev: handle to the HTT instance
311 *
312 * Return: None
313 */
314void htt_clear_bundle_stats(htt_pdev_handle pdev)
315{
316 htc_clear_bundle_stats(pdev->htc_pdev);
317}
318#endif
319
Govind Singh97854162017-03-20 11:39:37 +0530320#if defined(QCA_WIFI_3_0_ADRASTEA)
321/**
322 * htt_htc_attach_all() - Connect to HTC service for HTT
323 * @pdev: pdev ptr
324 *
325 * Return: 0 for success or error code.
326 */
Nirav Shah23054cf2018-06-21 17:01:10 +0530327
328#if defined(QCN7605_SUPPORT) && defined(IPA_OFFLOAD)
329
330/* In case of QCN7605 with IPA offload only 2 CE
331 * are used for RFS
332 */
333static int
334htt_htc_attach_all(struct htt_pdev_t *pdev)
335{
336 if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
337 goto flush_endpoint;
338
339 if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
340 goto flush_endpoint;
341
342 return 0;
343
344flush_endpoint:
345 htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL);
346
347 return -EIO;
348}
349
350#else
351
Govind Singh97854162017-03-20 11:39:37 +0530352static int
353htt_htc_attach_all(struct htt_pdev_t *pdev)
354{
355 if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
Dustin Brown95ff87c2018-04-04 16:10:00 -0700356 goto flush_endpoint;
357
Govind Singh97854162017-03-20 11:39:37 +0530358 if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
Dustin Brown95ff87c2018-04-04 16:10:00 -0700359 goto flush_endpoint;
360
Govind Singh97854162017-03-20 11:39:37 +0530361 if (htt_htc_attach(pdev, HTT_DATA3_MSG_SVC))
Dustin Brown95ff87c2018-04-04 16:10:00 -0700362 goto flush_endpoint;
363
Govind Singh97854162017-03-20 11:39:37 +0530364 return 0;
Dustin Brown95ff87c2018-04-04 16:10:00 -0700365
366flush_endpoint:
367 htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL);
368
369 return -EIO;
Govind Singh97854162017-03-20 11:39:37 +0530370}
Nirav Shah23054cf2018-06-21 17:01:10 +0530371
372#endif
373
Govind Singh97854162017-03-20 11:39:37 +0530374#else
375/**
376 * htt_htc_attach_all() - Connect to HTC service for HTT
377 * @pdev: pdev ptr
378 *
379 * Return: 0 for success or error code.
380 */
381static int
382htt_htc_attach_all(struct htt_pdev_t *pdev)
383{
384 return htt_htc_attach(pdev, HTT_DATA_MSG_SVC);
385}
386#endif
387
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388/**
389 * htt_pdev_alloc() - allocate HTT pdev
390 * @txrx_pdev: txrx pdev
391 * @ctrl_pdev: cfg pdev
392 * @htc_pdev: HTC pdev
393 * @osdev: os device
394 *
395 * Return: HTT pdev handle
396 */
397htt_pdev_handle
398htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800399 struct cdp_cfg *ctrl_pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530400 HTC_HANDLE htc_pdev, qdf_device_t osdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401{
402 struct htt_pdev_t *pdev;
Houston Hoffman90e24d82016-04-27 17:15:44 -0700403 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
404
405 if (!osc)
406 goto fail1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800407
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530408 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800409 if (!pdev)
410 goto fail1;
411
412 pdev->osdev = osdev;
413 pdev->ctrl_pdev = ctrl_pdev;
414 pdev->txrx_pdev = txrx_pdev;
415 pdev->htc_pdev = htc_pdev;
416
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417 pdev->htt_htc_pkt_freelist = NULL;
418#ifdef ATH_11AC_TXCOMPACT
419 pdev->htt_htc_pkt_misclist = NULL;
420#endif
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530421
422 /* for efficiency, store a local copy of the is_high_latency flag */
423 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530424 /*
425 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
426 * enabled or not.
427 */
428 pdev->cfg.credit_update_enabled =
429 ol_cfg_is_credit_update_enabled(pdev->ctrl_pdev);
430
431 pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
432 cds_is_packet_log_enabled();
433
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800434 pdev->cfg.default_tx_comp_req =
435 !ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
436
437 pdev->cfg.is_full_reorder_offload =
438 ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
Manjunathappa Prakash1bc742d2018-08-14 18:13:43 -0700439 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
Rajeev Kumarb4b7f5c2018-01-18 14:49:54 -0800440 "full_reorder_offloaded %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441 (int)pdev->cfg.is_full_reorder_offload);
442
443 pdev->cfg.ce_classify_enabled =
444 ol_cfg_is_ce_classify_enabled(ctrl_pdev);
Manjunathappa Prakash1bc742d2018-08-14 18:13:43 -0700445 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
Rajeev Kumarb4b7f5c2018-01-18 14:49:54 -0800446 "ce_classify %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447 pdev->cfg.ce_classify_enabled);
448
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530449 if (pdev->cfg.is_high_latency) {
450 qdf_atomic_init(&pdev->htt_tx_credit.target_delta);
451 qdf_atomic_init(&pdev->htt_tx_credit.bus_delta);
452 qdf_atomic_add(HTT_MAX_BUS_CREDIT,
453 &pdev->htt_tx_credit.bus_delta);
454 }
455
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456 pdev->targetdef = htc_get_targetdef(htc_pdev);
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +0530457#if defined(HELIUMPLUS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800458 HTT_SET_WIFI_IP(pdev, 2, 0);
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +0530459#endif /* defined(HELIUMPLUS) */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460
Houston Hoffman90e24d82016-04-27 17:15:44 -0700461 if (NO_HTT_NEEDED)
462 goto success;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463 /*
464 * Connect to HTC service.
465 * This has to be done before calling htt_rx_attach,
466 * since htt_rx_attach involves sending a rx ring configure
467 * message to the target.
468 */
wadesongeda40912017-09-14 14:58:34 +0800469 HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
470 HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
471 HTT_TX_MUTEX_INIT(&pdev->credit_mutex);
Govind Singh97854162017-03-20 11:39:37 +0530472 if (htt_htc_attach_all(pdev))
473 goto htt_htc_attach_fail;
Houston Hoffman90e24d82016-04-27 17:15:44 -0700474 if (hif_ce_fastpath_cb_register(osc, htt_t2h_msg_handler_fast, pdev))
Manjunathappa Prakash585178d2016-04-14 01:11:18 -0700475 qdf_print("failed to register fastpath callback\n");
Houston Hoffman23e76f92016-02-26 12:19:11 -0800476
Houston Hoffman90e24d82016-04-27 17:15:44 -0700477success:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478 return pdev;
479
Govind Singh97854162017-03-20 11:39:37 +0530480htt_htc_attach_fail:
wadesongeda40912017-09-14 14:58:34 +0800481 HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
482 HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
483 HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530484 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485
486fail1:
487 return NULL;
488
489}
490
491/**
492 * htt_attach() - Allocate and setup HTT TX/RX descriptors
493 * @pdev: pdev ptr
494 * @desc_pool_size: size of tx descriptors
495 *
496 * Return: 0 for success or error code.
497 */
498int
499htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
500{
501 int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 int ret = 0;
503
Sravan Kumar Kairam4329c5f2018-03-02 11:26:29 +0530504 pdev->is_ipa_uc_enabled = false;
505 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
506 pdev->is_ipa_uc_enabled = true;
507
Rakshith Suresh Patkar0f6375c2018-12-04 20:59:07 +0530508 pdev->new_htt_format_enabled = false;
509 if (ol_cfg_is_htt_new_format_enabled(pdev->ctrl_pdev))
510 pdev->new_htt_format_enabled = true;
511
512 htc_enable_hdr_length_check(pdev->htc_pdev,
513 pdev->new_htt_format_enabled);
514
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800515 ret = htt_tx_attach(pdev, desc_pool_size);
516 if (ret)
517 goto fail1;
518
519 ret = htt_rx_attach(pdev);
520 if (ret)
521 goto fail2;
522
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523 /* pre-allocate some HTC_PACKET objects */
524 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
525 struct htt_htc_pkt_union *pkt;
Yun Park56e32d92017-04-04 13:58:17 -0700526
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530527 pkt = qdf_mem_malloc(sizeof(*pkt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800528 if (!pkt)
529 break;
530 htt_htc_pkt_free(pdev, &pkt->u.pkt);
531 }
532
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530533 if (pdev->cfg.is_high_latency) {
534 /*
535 * HL - download the whole frame.
536 * Specify a download length greater than the max MSDU size,
537 * so the downloads will be limited by the actual frame sizes.
538 */
539 pdev->download_len = 5000;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530541 if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev) &&
542 !pdev->cfg.request_tx_comp)
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530543 pdev->tx_send_complete_part2 =
544 ol_tx_download_done_hl_free;
545 else
546 pdev->tx_send_complete_part2 =
547 ol_tx_download_done_hl_retain;
548
549 /*
550 * CHECK THIS LATER: does the HL HTT version of
551 * htt_rx_mpdu_desc_list_next
552 * (which is not currently implemented) present the
553 * adf_nbuf_data(rx_ind_msg)
554 * as the abstract rx descriptor?
555 * If not, the rx_fw_desc_offset initialization
556 * here will have to be adjusted accordingly.
557 * NOTE: for HL, because fw rx desc is in ind msg,
558 * not in rx desc, so the
559 * offset should be negtive value
560 */
561 pdev->rx_fw_desc_offset =
562 HTT_ENDIAN_BYTE_IDX_SWAP(
563 HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
564 - HTT_RX_IND_HL_BYTES);
565
566 htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
Manjunathappa Prakashfff753c2016-09-01 19:34:56 -0700567 htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_hl;
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530568
569 /* initialize the txrx credit count */
570 ol_tx_target_credit_update(
571 pdev->txrx_pdev, ol_cfg_target_tx_credit(
572 pdev->ctrl_pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 } else {
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530574 enum wlan_frm_fmt frm_type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800575
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530576 /*
577 * LL - download just the initial portion of the frame.
578 * Download enough to cover the encapsulation headers checked
579 * by the target's tx classification descriptor engine.
580 *
581 * For LL, the FW rx desc directly referenced at its location
582 * inside the rx indication message.
583 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530585 /* account for the 802.3 or 802.11 header */
586 frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800587
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530588 if (frm_type == wlan_frm_fmt_native_wifi) {
589 pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
590 } else if (frm_type == wlan_frm_fmt_802_3) {
591 pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
592 } else {
Poddar, Siddarth16264472017-03-14 19:39:43 +0530593 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
594 "Unexpected frame type spec: %d", frm_type);
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530595 HTT_ASSERT0(0);
596 }
597
598 /*
599 * Account for the optional L2 / ethernet header fields:
600 * 802.1Q, LLC/SNAP
601 */
602 pdev->download_len +=
603 HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
604
605 /*
606 * Account for the portion of the L3 (IP) payload that the
607 * target needs for its tx classification.
608 */
609 pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
610
611 /*
612 * Account for the HTT tx descriptor, including the
613 * HTC header + alignment padding.
614 */
615 pdev->download_len += sizeof(struct htt_host_tx_desc_t);
616
617 /*
618 * The TXCOMPACT htt_tx_sched function uses pdev->download_len
619 * to apply for all requeued tx frames. Thus,
620 * pdev->download_len has to be the largest download length of
621 * any tx frame that will be downloaded.
622 * This maximum download length is for management tx frames,
623 * which have an 802.11 header.
624 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625#ifdef ATH_11AC_TXCOMPACT
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530626 pdev->download_len = sizeof(struct htt_host_tx_desc_t)
627 + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
628 + HTT_TX_HDR_SIZE_802_1Q
629 + HTT_TX_HDR_SIZE_LLC_SNAP
630 + ol_cfg_tx_download_size(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631#endif
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530632 pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800633
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530634 /*
635 * For LL, the FW rx desc is alongside the HW rx desc fields in
636 * the htt_host_rx_desc_base struct/.
637 */
638 pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800639
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530640 htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
Manjunathappa Prakashfff753c2016-09-01 19:34:56 -0700641 htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_ll;
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530642 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800643
644 return 0;
645
646fail2:
647 htt_tx_detach(pdev);
648
649fail1:
650 return ret;
651}
652
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530653QDF_STATUS htt_attach_target(htt_pdev_handle pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800654{
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530655 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800656
657 status = htt_h2t_ver_req_msg(pdev);
Orhan K AKYILDIZa4f58e92017-06-21 13:36:10 -0700658 if (status != QDF_STATUS_SUCCESS) {
659 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
660 "%s:%d: could not send h2t_ver_req msg",
661 __func__, __LINE__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800662 return status;
Orhan K AKYILDIZa4f58e92017-06-21 13:36:10 -0700663 }
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +0530664#if defined(HELIUMPLUS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800665 /*
666 * Send the frag_desc info to target.
667 */
Orhan K AKYILDIZa4f58e92017-06-21 13:36:10 -0700668 status = htt_h2t_frag_desc_bank_cfg_msg(pdev);
669 if (status != QDF_STATUS_SUCCESS) {
670 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
671 "%s:%d: could not send h2t_frag_desc_bank_cfg msg",
672 __func__, __LINE__);
673 return status;
674 }
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +0530675#endif /* defined(HELIUMPLUS) */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676
677
678 /*
679 * If applicable, send the rx ring config message to the target.
680 * The host could wait for the HTT version number confirmation message
681 * from the target before sending any further HTT messages, but it's
682 * reasonable to assume that the host and target HTT version numbers
683 * match, and proceed immediately with the remaining configuration
684 * handshaking.
685 */
686
Manjunathappa Prakashfff753c2016-09-01 19:34:56 -0700687 status = htt_h2t_rx_ring_rfs_cfg_msg(pdev);
Orhan K AKYILDIZa4f58e92017-06-21 13:36:10 -0700688 if (status != QDF_STATUS_SUCCESS) {
689 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
690 "%s:%d: could not send h2t_rx_ring_rfs_cfg msg",
691 __func__, __LINE__);
692 return status;
693 }
694
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695 status = htt_h2t_rx_ring_cfg_msg(pdev);
Orhan K AKYILDIZa4f58e92017-06-21 13:36:10 -0700696 if (status != QDF_STATUS_SUCCESS) {
697 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
698 "%s:%d: could not send h2t_rx_ring_cfg msg",
699 __func__, __LINE__);
700 return status;
701 }
702
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800703 status = HTT_IPA_CONFIG(pdev, status);
Orhan K AKYILDIZa4f58e92017-06-21 13:36:10 -0700704 if (status != QDF_STATUS_SUCCESS) {
705 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
706 "%s:%d: could not send h2t_ipa_uc_rsc_cfg msg",
707 __func__, __LINE__);
708 return status;
709 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800710
711 return status;
712}
713
714void htt_detach(htt_pdev_handle pdev)
715{
716 htt_rx_detach(pdev);
717 htt_tx_detach(pdev);
718 htt_htc_pkt_pool_free(pdev);
719#ifdef ATH_11AC_TXCOMPACT
720 htt_htc_misc_pkt_pool_free(pdev);
721#endif
Chris Guo9e293a92017-08-08 16:24:47 +0800722 HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723 HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
724 HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800725}
726
727/**
728 * htt_pdev_free() - Free HTT pdev
729 * @pdev: htt pdev
730 *
731 * Return: none
732 */
733void htt_pdev_free(htt_pdev_handle pdev)
734{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530735 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736}
737
738void htt_detach_target(htt_pdev_handle pdev)
739{
740}
741
Houston Hoffman23e76f92016-02-26 12:19:11 -0800742static inline
743int htt_update_endpoint(struct htt_pdev_t *pdev,
744 uint16_t service_id, HTC_ENDPOINT_ID ep)
745{
746 struct hif_opaque_softc *hif_ctx;
747 uint8_t ul = 0xff, dl = 0xff;
748 int ul_polled, dl_polled;
749 int tx_service = 0;
750 int rc = 0;
751
752 hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700753 if (qdf_unlikely(!hif_ctx)) {
754 QDF_ASSERT(hif_ctx);
Poddar, Siddarth16264472017-03-14 19:39:43 +0530755 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
756 "%s:%d: assuming non-tx service.",
Houston Hoffman23e76f92016-02-26 12:19:11 -0800757 __func__, __LINE__);
758 } else {
759 ul = dl = 0xff;
760 if (QDF_STATUS_SUCCESS !=
761 hif_map_service_to_pipe(hif_ctx, service_id,
762 &ul, &dl,
763 &ul_polled, &dl_polled))
Poddar, Siddarth16264472017-03-14 19:39:43 +0530764 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
765 "%s:%d: assuming non-tx srv.",
Houston Hoffman23e76f92016-02-26 12:19:11 -0800766 __func__, __LINE__);
767 else
768 tx_service = (ul != 0xff);
769 }
770 if (tx_service) {
771 /* currently we have only one OUT htt tx service */
772 QDF_BUG(service_id == HTT_DATA_MSG_SVC);
773
774 pdev->htc_tx_endpoint = ep;
Nirav Shah24e697f2016-04-22 10:49:45 +0530775 hif_save_htc_htt_config_endpoint(hif_ctx, ep);
Houston Hoffman23e76f92016-02-26 12:19:11 -0800776 rc = 1;
777 }
Nirav Shah24e697f2016-04-22 10:49:45 +0530778 return rc;
Houston Hoffman23e76f92016-02-26 12:19:11 -0800779}
780
781int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800782{
Manikandan Mohan83c939c2017-04-13 20:23:07 -0700783 struct htc_service_connect_req connect;
784 struct htc_service_connect_resp response;
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530785 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800786
hangtian127c9532019-01-12 13:29:07 +0800787 qdf_mem_zero(&connect, sizeof(connect));
788 qdf_mem_zero(&response, sizeof(response));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789
790 connect.pMetaData = NULL;
791 connect.MetaDataLength = 0;
792 connect.EpCallbacks.pContext = pdev;
793 connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
794 connect.EpCallbacks.EpTxCompleteMultiple = NULL;
795 connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
Houston Hoffman5be9bac2015-10-20 17:04:42 -0700796 connect.EpCallbacks.ep_resume_tx_queue = htt_tx_resume_handler;
Visweswara Tanuku2e839e52019-06-11 10:16:30 +0530797 connect.EpCallbacks.ep_padding_credit_update =
798 htt_tx_padding_credit_update_handler;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800799
800 /* rx buffers currently are provided by HIF, not by EpRecvRefill */
801 connect.EpCallbacks.EpRecvRefill = NULL;
802 connect.EpCallbacks.RecvRefillWaterMark = 1;
803 /* N/A, fill is done by HIF */
804
805 connect.EpCallbacks.EpSendFull = htt_h2t_full;
806 /*
807 * Specify how deep to let a queue get before htc_send_pkt will
808 * call the EpSendFull function due to excessive send queue depth.
809 */
810 connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
811
812 /* disable flow control for HTT data message service */
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530813 htt_htc_credit_flow_disable(pdev, &connect);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800814
815 /* connect to control service */
Houston Hoffman23e76f92016-02-26 12:19:11 -0800816 connect.service_id = service_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800817
818 status = htc_connect_service(pdev->htc_pdev, &connect, &response);
819
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530820 if (status != QDF_STATUS_SUCCESS) {
Houston Hoffmanfa60ff52017-08-22 18:50:14 -0700821 if (cds_is_fw_down())
822 return -EIO;
Nachiket Kukade8003d252017-03-30 15:55:58 +0530823
Houston Hoffmanfa60ff52017-08-22 18:50:14 -0700824 if (status == QDF_STATUS_E_NOMEM ||
825 cds_is_self_recovery_enabled())
826 return qdf_status_to_os_return(status);
827
828 QDF_BUG(0);
Nachiket Kukade8003d252017-03-30 15:55:58 +0530829 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800830
Houston Hoffman23e76f92016-02-26 12:19:11 -0800831 htt_update_endpoint(pdev, service_id, response.Endpoint);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800832
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530833 /* Start TX HTT2 service if the target support it. */
834 htt_htc_tx_htt2_service_start(pdev, &connect, &response);
835
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800836 return 0; /* success */
837}
838
Poddar, Siddarthb9047592017-10-05 15:48:28 +0530839void htt_log_rx_ring_info(htt_pdev_handle pdev)
840{
841 if (!pdev) {
842 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
843 "%s: htt pdev is NULL", __func__);
844 return;
845 }
846 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG,
847 "%s: Data Stall Detected with reason 4 (=FW_RX_REFILL_FAILED)."
848 "src htt rx ring: space for %d elements, filled with %d buffers, buffers in the ring %d, refill debt %d",
849 __func__, pdev->rx_ring.size, pdev->rx_ring.fill_level,
850 pdev->rx_ring.fill_cnt,
851 qdf_atomic_read(&pdev->rx_ring.refill_debt));
852}
853
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800854#if HTT_DEBUG_LEVEL > 5
855void htt_display(htt_pdev_handle pdev, int indent)
856{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530857 qdf_print("%*s%s:\n", indent, " ", "HTT");
858 qdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859 indent + 4, " ",
860 pdev->tx_descs.pool_elems,
861 pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
Anurag Chouhan6d760662016-02-20 16:05:43 +0530862 qdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800863 indent + 4, " ",
864 pdev->rx_ring.size, pdev->rx_ring.fill_level);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700865 qdf_print("%*sat %pK (%llx paddr)\n", indent + 8, " ",
Orhan K AKYILDIZ5a36de32016-08-06 19:43:33 -0700866 pdev->rx_ring.buf.paddrs_ring,
867 (unsigned long long)pdev->rx_ring.base_paddr);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700868 qdf_print("%*snetbuf ring @ %pK\n", indent + 8, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800869 pdev->rx_ring.buf.netbufs_ring);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700870 qdf_print("%*sFW_IDX shadow register: vaddr = %pK, paddr = %llx\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800871 indent + 8, " ",
Orhan K AKYILDIZ5a36de32016-08-06 19:43:33 -0700872 pdev->rx_ring.alloc_idx.vaddr,
873 (unsigned long long)pdev->rx_ring.alloc_idx.paddr);
Anurag Chouhan6d760662016-02-20 16:05:43 +0530874 qdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800875 indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
876 pdev->rx_ring.sw_rd_idx.msdu_desc,
877 pdev->rx_ring.sw_rd_idx.msdu_payld);
878}
879#endif
880
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800881#ifdef IPA_OFFLOAD
Leo Chang8e073612015-11-13 10:55:34 -0800882/**
883 * htt_ipa_uc_attach() - Allocate UC data path resources
884 * @pdev: handle to the HTT instance
885 *
886 * Return: 0 success
887 * none 0 fail
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888 */
889int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
890{
891 int error;
892
Rajeev Kumar3887f9b2018-01-10 11:24:01 -0800893 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
Yun Park199c2ed2017-10-02 11:24:22 -0700894 __func__);
895
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800896 /* TX resource attach */
897 error = htt_tx_ipa_uc_attach(
898 pdev,
899 ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
900 ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
901 ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
902 if (error) {
Poddar, Siddarth16264472017-03-14 19:39:43 +0530903 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
904 "HTT IPA UC TX attach fail code %d", error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800905 HTT_ASSERT0(0);
906 return error;
907 }
908
909 /* RX resource attach */
910 error = htt_rx_ipa_uc_attach(
Leo Changbc24e612016-07-05 17:19:55 -0700911 pdev, qdf_get_pwr2(pdev->rx_ring.fill_level));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800912 if (error) {
Poddar, Siddarth16264472017-03-14 19:39:43 +0530913 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
914 "HTT IPA UC RX attach fail code %d", error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800915 htt_tx_ipa_uc_detach(pdev);
916 HTT_ASSERT0(0);
917 return error;
918 }
919
Rajeev Kumar3887f9b2018-01-10 11:24:01 -0800920 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
Yun Park199c2ed2017-10-02 11:24:22 -0700921 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800922 return 0; /* success */
923}
924
Leo Chang8e073612015-11-13 10:55:34 -0800925/**
926 * htt_ipa_uc_attach() - Remove UC data path resources
927 * @pdev: handle to the HTT instance
928 *
929 * Return: None
930 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800931void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
932{
Yun Parke4239802018-01-09 11:01:40 -0800933 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
Yun Park199c2ed2017-10-02 11:24:22 -0700934 __func__);
935
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800936 /* TX IPA micro controller detach */
937 htt_tx_ipa_uc_detach(pdev);
938
939 /* RX IPA micro controller detach */
940 htt_rx_ipa_uc_detach(pdev);
Yun Park199c2ed2017-10-02 11:24:22 -0700941
Yun Parke4239802018-01-09 11:01:40 -0800942 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
Yun Park199c2ed2017-10-02 11:24:22 -0700943 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944}
945
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800946int
947htt_ipa_uc_get_resource(htt_pdev_handle pdev,
Sravan Kumar Kairamb664b6c2018-02-27 17:43:10 +0530948 qdf_shared_mem_t **ce_sr,
949 qdf_shared_mem_t **tx_comp_ring,
950 qdf_shared_mem_t **rx_rdy_ring,
951 qdf_shared_mem_t **rx2_rdy_ring,
952 qdf_shared_mem_t **rx_proc_done_idx,
953 qdf_shared_mem_t **rx2_proc_done_idx,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800954 uint32_t *ce_sr_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530955 qdf_dma_addr_t *ce_reg_paddr,
Sravan Kumar Kairamb664b6c2018-02-27 17:43:10 +0530956 uint32_t *tx_num_alloc_buffer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957{
958 /* Release allocated resource to client */
Sravan Kumar Kairamb664b6c2018-02-27 17:43:10 +0530959 *tx_comp_ring = pdev->ipa_uc_tx_rsc.tx_comp_ring;
960 *rx_rdy_ring = pdev->ipa_uc_rx_rsc.rx_ind_ring;
961 *rx2_rdy_ring = pdev->ipa_uc_rx_rsc.rx2_ind_ring;
962 *rx_proc_done_idx = pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx;
963 *rx2_proc_done_idx = pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx;
964 *tx_num_alloc_buffer = (uint32_t)pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800965
966 /* Get copy engine, bus resource */
Sravan Kumar Kairamb664b6c2018-02-27 17:43:10 +0530967 htc_ipa_get_ce_resource(pdev->htc_pdev, ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800968 ce_sr_ring_size, ce_reg_paddr);
969
970 return 0;
971}
972
Leo Chang8e073612015-11-13 10:55:34 -0800973/**
974 * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
975 * @pdev: handle to the HTT instance
976 * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address
977 * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address
978 *
979 * Return: 0 success
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 */
981int
982htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530983 qdf_dma_addr_t ipa_uc_tx_doorbell_paddr,
984 qdf_dma_addr_t ipa_uc_rx_doorbell_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800985{
986 pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
987 pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
988 return 0;
989}
990#endif /* IPA_OFFLOAD */
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530991
992/**
993 * htt_mark_first_wakeup_packet() - set flag to indicate that
994 * fw is compatible for marking first packet after wow wakeup
995 * @pdev: pointer to htt pdev
996 * @value: 1 for enabled/ 0 for disabled
997 *
998 * Return: None
999 */
1000void htt_mark_first_wakeup_packet(htt_pdev_handle pdev,
1001 uint8_t value)
1002{
1003 if (!pdev) {
Poddar, Siddarth16264472017-03-14 19:39:43 +05301004 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1005 "%s: htt pdev is NULL", __func__);
Himanshu Agarwal19141bb2016-07-20 20:15:48 +05301006 return;
1007 }
1008
1009 pdev->cfg.is_first_wakeup_packet = value;
1010}
1011