blob: 796a08b9c7ddc98c3a4f6a824a3120944605b43d [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
c_cgodavfda96ad2017-09-07 16:16:00 +05302 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * DOC: hif_napi.c
21 *
22 * HIF NAPI interface implementation
23 */
24
Dustin Brown49a8f6e2017-08-17 15:47:48 -070025#include <linux/string.h> /* memset */
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -070026
27/* Linux headers */
28#include <linux/cpumask.h>
29#include <linux/cpufreq.h>
30#include <linux/cpu.h>
31#include <linux/topology.h>
32#include <linux/interrupt.h>
Mohit Khanna012bfe32017-01-19 21:15:35 -080033#include <linux/irq.h>
Mohit Khanna4a76dde2017-03-10 11:48:12 -080034#ifdef CONFIG_SCHED_CORE_CTL
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -070035#include <linux/sched/core_ctl.h>
Mohit Khanna4a76dde2017-03-10 11:48:12 -080036#endif
Houston Hoffmanb3497c02017-04-22 18:27:00 -070037#include <pld_common.h>
Manjunathappa Prakash21196d22016-09-12 13:39:41 -070038#include <linux/pm.h>
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -070039
40/* Driver headers */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041#include <hif_napi.h>
42#include <hif_debug.h>
43#include <hif_io32.h>
44#include <ce_api.h>
Dhanashri Atre8d978172015-10-30 15:12:03 -070045#include <ce_internal.h>
Houston Hoffmanb3497c02017-04-22 18:27:00 -070046#include <hif_irq_affinity.h>
Dustin Brown5d0d1042017-10-24 16:10:23 -070047#include "qdf_cpuhp.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053048#include "qdf_module.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049
50enum napi_decision_vector {
51 HIF_NAPI_NOEVENT = 0,
52 HIF_NAPI_INITED = 1,
53 HIF_NAPI_CONF_UP = 2
54};
55#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
56
Manjunathappa Prakash56023f52018-03-28 20:05:56 -070057#ifdef RECEIVE_OFFLOAD
58/**
59 * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
60 * @napi: Rx_thread NAPI
61 * @budget: NAPI BUDGET
62 *
63 * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
64 */
65static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
66{
67 HIF_ERROR("This napi_poll should not be polled as we don't schedule it");
68 QDF_ASSERT(0);
69 return 0;
70}
71
72/**
73 * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
74 * @napii: Handle to napi_info holding rx_thread napi
75 *
76 * Return: None
77 */
78static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
79{
80 init_dummy_netdev(&napii->rx_thread_netdev);
81 netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
82 hif_rxthread_napi_poll, 64);
83 napi_enable(&napii->rx_thread_napi);
84}
Karthik Kantamnenic1b9dcf2018-07-27 13:30:04 +053085
86/**
87 * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI
88 * @napii: Handle to napi_info holding rx_thread napi
89 *
90 * Return: None
91 */
92static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
93{
94 netif_napi_del(&napii->rx_thread_napi);
95}
Manjunathappa Prakash56023f52018-03-28 20:05:56 -070096#else /* RECEIVE_OFFLOAD */
97static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
98{
99}
Karthik Kantamnenic1b9dcf2018-07-27 13:30:04 +0530100
101static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
102{
103}
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700104#endif
105
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800106/**
107 * hif_napi_create() - creates the NAPI structures for a given CE
108 * @hif : pointer to hif context
109 * @pipe_id: the CE id on which the instance will be created
110 * @poll : poll function to be used for this NAPI instance
111 * @budget : budget to be registered with the NAPI instance
112 * @scale : scale factor on the weight (to scaler budget to 1000)
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700113 * @flags : feature flags
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800114 *
115 * Description:
116 * Creates NAPI instances. This function is called
117 * unconditionally during initialization. It creates
118 * napi structures through the proper HTC/HIF calls.
119 * The structures are disabled on creation.
120 * Note that for each NAPI instance a separate dummy netdev is used
121 *
122 * Return:
123 * < 0: error
124 * = 0: <should never happen>
125 * > 0: id of the created object (for multi-NAPI, number of objects created)
126 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530127int hif_napi_create(struct hif_opaque_softc *hif_ctx,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128 int (*poll)(struct napi_struct *, int),
129 int budget,
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700130 int scale,
131 uint8_t flags)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800132{
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700133 int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800134 struct qca_napi_data *napid;
135 struct qca_napi_info *napii;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700136 struct CE_state *ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530137 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700138 int rc = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800139
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700140 NAPI_DEBUG("-->(budget=%d, scale=%d)",
141 budget, scale);
Houston Hoffman56936832016-03-16 12:16:24 -0700142 NAPI_DEBUG("hif->napi_data.state = 0x%08x",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143 hif->napi_data.state);
Houston Hoffman56936832016-03-16 12:16:24 -0700144 NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800145 hif->napi_data.ce_map);
146
147 napid = &(hif->napi_data);
148 if (0 == (napid->state & HIF_NAPI_INITED)) {
149 memset(napid, 0, sizeof(struct qca_napi_data));
Houston Hoffman3c841052016-12-12 12:52:45 -0800150 qdf_spinlock_create(&(napid->lock));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800151
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800152 napid->state |= HIF_NAPI_INITED;
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700153 napid->flags = flags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800154
Houston Hoffmanfec8ed12016-11-15 10:42:27 -0800155 rc = hif_napi_cpu_init(hif_ctx);
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700156 if (rc != 0 && rc != -EALREADY) {
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700157 HIF_ERROR("NAPI_initialization failed,. %d", rc);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800158 rc = napid->ce_map;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700159 goto hnc_err;
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700160 } else
161 rc = 0;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700162
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800163 HIF_DBG("%s: NAPI structures initialized, rc=%d",
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700164 __func__, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165 }
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700166 for (i = 0; i < hif->ce_count; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700167 ce_state = hif->ce_id_to_state[i];
Venkateswara Swamy Bandaru16334362016-08-23 15:38:10 +0530168 NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700169 i, ce_state->htt_rx_data,
170 ce_state->htt_tx_data);
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700171 if (ce_srng_based(hif))
172 continue;
173
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700174 if (!ce_state->htt_rx_data)
175 continue;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700177 /* Now this is a CE where we need NAPI on */
178 NAPI_DEBUG("Creating NAPI on pipe %d", i);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800179 napii = qdf_mem_malloc(sizeof(*napii));
180 napid->napis[i] = napii;
181 if (!napii) {
182 NAPI_DEBUG("NAPI alloc failure %d", i);
183 rc = -ENOMEM;
Sathish Kumar2318e0f2018-06-15 15:49:46 +0530184 goto napii_free;
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800185 }
186 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800188 for (i = 0; i < hif->ce_count; i++) {
189 napii = napid->napis[i];
190 if (!napii)
191 continue;
192
193 NAPI_DEBUG("initializing NAPI for pipe %d", i);
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700194 memset(napii, 0, sizeof(struct qca_napi_info));
195 napii->scale = scale;
196 napii->id = NAPI_PIPE2ID(i);
Venkateswara Swamy Bandaru16334362016-08-23 15:38:10 +0530197 napii->hif_ctx = hif_ctx;
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700198 napii->irq = pld_get_irq(hif->qdf_dev->dev, i);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700199
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700200 if (napii->irq < 0)
201 HIF_WARN("%s: bad IRQ value for CE %d: %d",
202 __func__, i, napii->irq);
203
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700204 init_dummy_netdev(&(napii->netdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205
Jeff Johnsonb9450212017-09-18 10:12:38 -0700206 NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)",
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700207 &(napii->napi), &(napii->netdev), poll, budget);
208 netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800209
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700210 NAPI_DEBUG("after napi_add");
Jeff Johnsonb9450212017-09-18 10:12:38 -0700211 NAPI_DEBUG("napi=0x%pK, netdev=0x%pK",
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700212 &(napii->napi), &(napii->netdev));
Jeff Johnsonb9450212017-09-18 10:12:38 -0700213 NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK",
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700214 napii->napi.dev_list.prev,
215 napii->napi.dev_list.next);
Jeff Johnsonb9450212017-09-18 10:12:38 -0700216 NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK",
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700217 napii->netdev.napi_list.prev,
218 napii->netdev.napi_list.next);
219
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700220 hif_init_rx_thread_napi(napii);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700221 napii->lro_ctx = qdf_lro_init();
Jeff Johnsonb9450212017-09-18 10:12:38 -0700222 NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700223 i, napii->id, napii->lro_ctx);
224
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700225 /* It is OK to change the state variable below without
226 * protection as there should be no-one around yet
227 */
228 napid->ce_map |= (0x01 << i);
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800229 HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700230 napii->id, i);
231 }
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700232
233 /* no ces registered with the napi */
234 if (!ce_srng_based(hif) && napid->ce_map == 0) {
235 HIF_WARN("%s: no napis created for copy engines", __func__);
Sathish Kumar2318e0f2018-06-15 15:49:46 +0530236 rc = -EFAULT;
237 goto napii_free;
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700238 }
239
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800240 NAPI_DEBUG("napi map = %x", napid->ce_map);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700241 NAPI_DEBUG("NAPI ids created for all applicable pipes");
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800242 return napid->ce_map;
243
Sathish Kumar2318e0f2018-06-15 15:49:46 +0530244napii_free:
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800245 for (i = 0; i < hif->ce_count; i++) {
246 napii = napid->napis[i];
247 napid->napis[i] = NULL;
248 if (napii)
249 qdf_mem_free(napii);
250 }
251
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700252hnc_err:
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700253 NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800254 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530256qdf_export_symbol(hif_napi_create);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800257
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700258#ifdef RECEIVE_OFFLOAD
259void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
260 void (offld_flush_handler)(void *))
261{
262 int i;
263 struct CE_state *ce_state;
264 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
265 struct qca_napi_data *napid;
266 struct qca_napi_info *napii;
267
268 if (!scn) {
269 HIF_ERROR("%s: hif_state NULL!", __func__);
270 QDF_ASSERT(0);
271 return;
272 }
273
274 napid = hif_napi_get_all(hif_hdl);
275 for (i = 0; i < scn->ce_count; i++) {
276 ce_state = scn->ce_id_to_state[i];
277 if (ce_state && (ce_state->htt_rx_data)) {
278 napii = napid->napis[i];
279 napii->offld_flush_cb = offld_flush_handler;
280 HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %p\n",
281 i, napii->id, napii->offld_flush_cb);
282 }
283 }
284}
285
286void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
287{
288 int i;
289 struct CE_state *ce_state;
290 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
291 struct qca_napi_data *napid;
292 struct qca_napi_info *napii;
293
294 if (!scn) {
295 HIF_ERROR("%s: hif_state NULL!", __func__);
296 QDF_ASSERT(0);
297 return;
298 }
299
300 napid = hif_napi_get_all(hif_hdl);
301 for (i = 0; i < scn->ce_count; i++) {
302 ce_state = scn->ce_id_to_state[i];
303 if (ce_state && (ce_state->htt_rx_data)) {
304 napii = napid->napis[i];
305 HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n",
306 i, napii->id, napii->offld_flush_cb);
307 /* Not required */
308 napii->offld_flush_cb = NULL;
309 }
310 }
311}
312#endif /* RECEIVE_OFFLOAD */
313
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314/**
315 *
316 * hif_napi_destroy() - destroys the NAPI structures for a given instance
317 * @hif : pointer to hif context
318 * @ce_id : the CE id whose napi instance will be destroyed
319 * @force : if set, will destroy even if entry is active (de-activates)
320 *
321 * Description:
322 * Destroy a given NAPI instance. This function is called
323 * unconditionally during cleanup.
324 * Refuses to destroy an entry of it is still enabled (unless force=1)
325 * Marks the whole napi_data invalid if all instances are destroyed.
326 *
327 * Return:
328 * -EINVAL: specific entry has not been created
329 * -EPERM : specific entry is still active
330 * 0 < : error
331 * 0 = : success
332 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530333int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800334 uint8_t id,
335 int force)
336{
337 uint8_t ce = NAPI_ID2PIPE(id);
338 int rc = 0;
Komal Seelam644263d2016-02-22 20:45:49 +0530339 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340
Houston Hoffman56936832016-03-16 12:16:24 -0700341 NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800342
343 if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -0700344 HIF_ERROR("%s: NAPI not initialized or entry %d not created",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 __func__, id);
346 rc = -EINVAL;
347 } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -0700348 HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800349 __func__, id, ce);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800350 if (hif->napi_data.napis[ce])
351 HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)",
352 __func__, id, ce);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353 rc = -EINVAL;
354 } else {
355 struct qca_napi_data *napid;
356 struct qca_napi_info *napii;
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800357
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358 napid = &(hif->napi_data);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800359 napii = napid->napis[ce];
360 if (!napii) {
361 if (napid->ce_map & (0x01 << ce))
362 HIF_ERROR("%s: napii & ce_map out of sync(ce %d)",
363 __func__, ce);
364 return -EINVAL;
365 }
366
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367
368 if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
369 if (force) {
370 napi_disable(&(napii->napi));
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800371 HIF_DBG("%s: NAPI entry %d force disabled",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372 __func__, id);
Houston Hoffman56936832016-03-16 12:16:24 -0700373 NAPI_DEBUG("NAPI %d force disabled", id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374 } else {
Houston Hoffmanc50572b2016-06-08 19:49:46 -0700375 HIF_ERROR("%s: Cannot destroy active NAPI %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 __func__, id);
377 rc = -EPERM;
378 }
379 }
380 if (0 == rc) {
Houston Hoffman56936832016-03-16 12:16:24 -0700381 NAPI_DEBUG("before napi_del");
Jeff Johnsonb9450212017-09-18 10:12:38 -0700382 NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800383 napii->napi.dev_list.prev,
384 napii->napi.dev_list.next);
Jeff Johnsonb9450212017-09-18 10:12:38 -0700385 NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK",
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700386 napii->netdev.napi_list.prev,
387 napii->netdev.napi_list.next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700389 qdf_lro_deinit(napii->lro_ctx);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390 netif_napi_del(&(napii->napi));
Karthik Kantamnenic1b9dcf2018-07-27 13:30:04 +0530391 hif_deinit_rx_thread_napi(napii);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800392
393 napid->ce_map &= ~(0x01 << ce);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800394 napid->napis[ce] = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 napii->scale = 0;
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800396 qdf_mem_free(napii);
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800397 HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398
399 /* if there are no active instances and
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800400 * if they are all destroyed,
401 * set the whole structure to uninitialized state
402 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403 if (napid->ce_map == 0) {
Houston Hoffmanfec8ed12016-11-15 10:42:27 -0800404 rc = hif_napi_cpu_deinit(hif_ctx);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700405 /* caller is tolerant to receiving !=0 rc */
406
Houston Hoffman3c841052016-12-12 12:52:45 -0800407 qdf_spinlock_destroy(&(napid->lock));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408 memset(napid,
409 0, sizeof(struct qca_napi_data));
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800410 HIF_DBG("%s: no NAPI instances. Zapped.",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411 __func__);
412 }
413 }
414 }
415
416 return rc;
417}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530418qdf_export_symbol(hif_napi_destroy);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800419
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700420#ifdef FEATURE_LRO
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700421void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
422{
423 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
424 struct qca_napi_data *napid;
425 struct qca_napi_info *napii;
426
427 napid = &(scn->napi_data);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800428 napii = napid->napis[NAPI_ID2PIPE(napi_id)];
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700429
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800430 if (napii)
431 return napii->lro_ctx;
432 return 0;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700433}
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700434#endif
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700435
436/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437 *
438 * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
439 * @hif: pointer to hif context
440 *
441 * Description:
442 * Returns the address of the whole structure
443 *
444 * Return:
445 * <addr>: address of the whole HIF NAPI structure
446 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530447inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448{
Komal Seelam644263d2016-02-22 20:45:49 +0530449 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
450
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800451 return &(hif->napi_data);
452}
453
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700454struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
Manjunathappa Prakashc3aeffb2018-04-15 00:42:41 -0700455{
456 int id = NAPI_ID2PIPE(napi_id);
457
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700458 return napid->napis[id];
Manjunathappa Prakashc3aeffb2018-04-15 00:42:41 -0700459}
460
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461/**
462 *
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700463 * hif_napi_event() - reacts to events that impact NAPI
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464 * @hif : pointer to hif context
465 * @evnt: event that has been detected
466 * @data: more data regarding the event
467 *
468 * Description:
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700469 * This function handles two types of events:
470 * 1- Events that change the state of NAPI (enabled/disabled):
471 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
472 * The state is retrievable by "hdd_napi_enabled(-1)"
473 * - NAPI will be on if either INI file is on and it has not been disabled
474 * by a subsequent vendor CMD,
475 * or it has been enabled by a vendor CMD.
476 * 2- Events that change the CPU affinity of a NAPI instance/IRQ:
477 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
478 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
479 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
480 * - In LO tput mode, NAPI will yield control if its interrupts to the system
481 * management functions. However in HI throughput mode, NAPI will actively
482 * manage its interrupts/instances (by trying to disperse them out to
483 * separate performance cores).
484 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 *
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700486 * + In some cases (roaming peer management is the only case so far), a
487 * a client can trigger a "SERIALIZE" event. Basically, this means that the
488 * users is asking NAPI to go into a truly single execution context state.
489 * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
490 * (if called for the first time) and then moves all IRQs (for NAPI
491 * instances) to be collapsed to a single core. If called multiple times,
492 * it will just re-collapse the CPUs. This is because blacklist-on() API
493 * is reference-counted, and because the API has already been called.
494 *
495 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
496 * to its "normal" operation. Optionally, they can give a timeout value (in
497 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
498 * case, NAPI will just set the current throughput state to uninitialized
499 * and set the delay period. Once policy handler is called, it would skip
500 * applying the policy delay period times, and otherwise apply the policy.
501 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 * Return:
503 * < 0: some error
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700504 * = 0: event handled successfully
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800505 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530506int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
Komal Seelam644263d2016-02-22 20:45:49 +0530507 void *data)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800508{
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700509 int rc = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800510 uint32_t prev_state;
511 int i;
Poddar, Siddarth0efe2892017-07-18 16:10:07 +0530512 bool state_changed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513 struct napi_struct *napi;
Komal Seelam644263d2016-02-22 20:45:49 +0530514 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700515 struct qca_napi_data *napid = &(hif->napi_data);
516 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700517 enum {
518 BLACKLIST_NOT_PENDING,
519 BLACKLIST_ON_PENDING,
520 BLACKLIST_OFF_PENDING
521 } blacklist_pending = BLACKLIST_NOT_PENDING;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522
Jeff Johnsonb9450212017-09-18 10:12:38 -0700523 NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800524
Houston Hoffmanb3497c02017-04-22 18:27:00 -0700525 if (ce_srng_based(hif))
526 return hif_exec_event(hif_ctx, event, data);
527
Orhan K AKYILDIZ0f521bf2016-11-29 19:30:04 -0800528 if ((napid->state & HIF_NAPI_INITED) == 0) {
529 NAPI_DEBUG("%s: got event when NAPI not initialized",
530 __func__);
531 return -EINVAL;
532 }
Houston Hoffman3c841052016-12-12 12:52:45 -0800533 qdf_spin_lock_bh(&(napid->lock));
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700534 prev_state = napid->state;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800535 switch (event) {
536 case NAPI_EVT_INI_FILE:
Orhan K AKYILDIZf006e932016-11-14 00:35:44 -0800537 case NAPI_EVT_CMD_STATE:
538 case NAPI_EVT_INT_STATE: {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800539 int on = (data != ((void *)0));
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800540
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800541 HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
Orhan K AKYILDIZf006e932016-11-14 00:35:44 -0800542 __func__, event,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800543 on, prev_state);
544 if (on)
545 if (prev_state & HIF_NAPI_CONF_UP) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800546 HIF_DBG("%s: duplicate NAPI conf ON msg",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800547 __func__);
548 } else {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800549 HIF_DBG("%s: setting state to ON",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800550 __func__);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700551 napid->state |= HIF_NAPI_CONF_UP;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800552 }
553 else /* off request */
554 if (prev_state & HIF_NAPI_CONF_UP) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800555 HIF_DBG("%s: setting state to OFF",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 __func__);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700557 napid->state &= ~HIF_NAPI_CONF_UP;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800558 } else {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800559 HIF_DBG("%s: duplicate NAPI conf OFF msg",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560 __func__);
561 }
562 break;
563 }
564 /* case NAPI_INIT_FILE/CMD_STATE */
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700565
566 case NAPI_EVT_CPU_STATE: {
567 int cpu = ((unsigned long int)data >> 16);
568 int val = ((unsigned long int)data & 0x0ff);
569
570 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
571 __func__, cpu, val);
572
573 /* state has already been set by hnc_cpu_notify_cb */
574 if ((val == QCA_NAPI_CPU_DOWN) &&
575 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
576 (napid->napi_cpu[cpu].napis != 0)) {
577 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
578 __func__, cpu);
579 rc = hif_napi_cpu_migrate(napid,
580 cpu,
581 HNC_ACT_RELOCATE);
582 napid->napi_cpu[cpu].napis = 0;
583 }
584 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
585 break;
586 }
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700587
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700588 case NAPI_EVT_TPUT_STATE: {
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700589 tput_mode = (enum qca_napi_tput_state)data;
590 if (tput_mode == QCA_NAPI_TPUT_LO) {
591 /* from TPUT_HI -> TPUT_LO */
592 NAPI_DEBUG("%s: Moving to napi_tput_LO state",
593 __func__);
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700594 blacklist_pending = BLACKLIST_OFF_PENDING;
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700595 /*
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -0700596 * Ideally we should "collapse" interrupts here, since
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700597 * we are "dispersing" interrupts in the "else" case.
598 * This allows the possibility that our interrupts may
599 * still be on the perf cluster the next time we enter
600 * high tput mode. However, the irq_balancer is free
601 * to move our interrupts to power cluster once
602 * blacklisting has been turned off in the "else" case.
603 */
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700604 } else {
605 /* from TPUT_LO -> TPUT->HI */
606 NAPI_DEBUG("%s: Moving to napi_tput_HI state",
607 __func__);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700608 rc = hif_napi_cpu_migrate(napid,
609 HNC_ANY_CPU,
610 HNC_ACT_DISPERSE);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700611
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700612 blacklist_pending = BLACKLIST_ON_PENDING;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700613 }
614 napid->napi_mode = tput_mode;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700615 break;
616 }
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700617
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700618 case NAPI_EVT_USR_SERIAL: {
619 unsigned long users = (unsigned long)data;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700620
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700621 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
622 __func__, users);
623
624 rc = hif_napi_cpu_migrate(napid,
625 HNC_ANY_CPU,
626 HNC_ACT_COLLAPSE);
627 if ((users == 0) && (rc == 0))
628 blacklist_pending = BLACKLIST_ON_PENDING;
629 break;
630 }
631 case NAPI_EVT_USR_NORMAL: {
632 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
633 /*
634 * Deserialization timeout is handled at hdd layer;
635 * just mark current mode to uninitialized to ensure
636 * it will be set when the delay is over
637 */
638 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
639 break;
640 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800641 default: {
Houston Hoffmanc50572b2016-06-08 19:49:46 -0700642 HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800643 __func__, event, (unsigned long) data);
644 break;
645 } /* default */
646 }; /* switch */
647
648
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700649 switch (blacklist_pending) {
650 case BLACKLIST_ON_PENDING:
651 /* assume the control of WLAN IRQs */
Mohit Khanna012bfe32017-01-19 21:15:35 -0800652 hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700653 break;
654 case BLACKLIST_OFF_PENDING:
655 /* yield the control of WLAN IRQs */
Mohit Khanna012bfe32017-01-19 21:15:35 -0800656 hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -0700657 break;
658 default: /* nothing to do */
659 break;
660 } /* switch blacklist_pending */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661
Poddar, Siddarth0efe2892017-07-18 16:10:07 +0530662 /* we want to perform the comparison in lock:
663 * there is a possiblity of hif_napi_event get called
664 * from two different contexts (driver unload and cpu hotplug
665 * notification) and napid->state get changed
666 * in driver unload context and can lead to race condition
667 * in cpu hotplug context. Therefore, perform the napid->state
668 * comparison before releasing lock.
669 */
670 state_changed = (prev_state != napid->state);
Mohit Khanna012bfe32017-01-19 21:15:35 -0800671 qdf_spin_unlock_bh(&(napid->lock));
672
Poddar, Siddarth0efe2892017-07-18 16:10:07 +0530673 if (state_changed) {
Orhan K AKYILDIZf006e932016-11-14 00:35:44 -0800674 if (napid->state == ENABLE_NAPI_MASK) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800675 rc = 1;
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800676 for (i = 0; i < CE_COUNT_MAX; i++) {
677 struct qca_napi_info *napii = napid->napis[i];
678 if (napii) {
679 napi = &(napii->napi);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700680 NAPI_DEBUG("%s: enabling NAPI %d",
681 __func__, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800682 napi_enable(napi);
683 }
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800684 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800685 } else {
686 rc = 0;
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800687 for (i = 0; i < CE_COUNT_MAX; i++) {
688 struct qca_napi_info *napii = napid->napis[i];
689 if (napii) {
690 napi = &(napii->napi);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700691 NAPI_DEBUG("%s: disabling NAPI %d",
692 __func__, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693 napi_disable(napi);
Orhan K AKYILDIZf006e932016-11-14 00:35:44 -0800694 /* in case it is affined, remove it */
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800695 irq_set_affinity_hint(napii->irq, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800696 }
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800697 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800698 }
699 } else {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -0800700 HIF_DBG("%s: no change in hif napi state (still %d)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 __func__, prev_state);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800702 }
703
Houston Hoffman56936832016-03-16 12:16:24 -0700704 NAPI_DEBUG("<--[rc=%d]", rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800705 return rc;
706}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530707qdf_export_symbol(hif_napi_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800708
709/**
710 * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
711 * @hif: hif context
712 * @ce : CE instance (or -1, to check if any CEs are enabled)
713 *
714 * Return: bool
715 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530716int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800717{
718 int rc;
Komal Seelam644263d2016-02-22 20:45:49 +0530719 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800720
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800721 if (-1 == ce)
722 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
723 else
724 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
725 (hif->napi_data.ce_map & (0x01 << ce)));
726 return rc;
Pratik Gandhidc82a772018-01-30 18:57:05 +0530727}
728qdf_export_symbol(hif_napi_enabled);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800729
730/**
Sathish Kumar2318e0f2018-06-15 15:49:46 +0530731 * hif_napi_created() - checks whether NAPI is created for given ce or not
732 * @hif: hif context
733 * @ce : CE instance
734 *
735 * Return: bool
736 */
737bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce)
738{
739 int rc;
740 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
741
742 rc = (hif->napi_data.ce_map & (0x01 << ce));
743
744 return !!rc;
745}
746qdf_export_symbol(hif_napi_created);
747
748/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800749 * hif_napi_enable_irq() - enables bus interrupts after napi_complete
750 *
751 * @hif: hif context
752 * @id : id of NAPI instance calling this (used to determine the CE)
753 *
754 * Return: void
755 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530756inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800757{
Komal Seelam644263d2016-02-22 20:45:49 +0530758 struct hif_softc *scn = HIF_GET_SOFTC(hif);
759
Houston Hoffman8f239f62016-03-14 21:12:05 -0700760 hif_irq_enable(scn, NAPI_ID2PIPE(id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761}
762
763
764/**
765 * hif_napi_schedule() - schedules napi, updates stats
766 * @scn: hif context
767 * @ce_id: index of napi instance
768 *
Will Huangd6c3b872018-06-26 10:53:59 +0800769 * Return: false if napi didn't enable or already scheduled, otherwise true
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800770 */
Will Huangd6c3b872018-06-26 10:53:59 +0800771bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800772{
773 int cpu = smp_processor_id();
Komal Seelam644263d2016-02-22 20:45:49 +0530774 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800775 struct qca_napi_info *napii;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800776
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800777 napii = scn->napi_data.napis[ce_id];
778 if (qdf_unlikely(!napii)) {
779 HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
780 __func__, ce_id);
781 qdf_atomic_dec(&scn->active_tasklet_cnt);
782 return false;
783 }
784
Will Huangd6c3b872018-06-26 10:53:59 +0800785 if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) {
786 NAPI_DEBUG("napi scheduled, return");
787 qdf_atomic_dec(&scn->active_tasklet_cnt);
788 return false;
789 }
790
791 hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
792 NULL, NULL, 0, 0);
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800793 napii->stats[cpu].napi_schedules++;
794 NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
795 napi_schedule(&(napii->napi));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800796
797 return true;
798}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530799qdf_export_symbol(hif_napi_schedule);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800
801/**
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700802 * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
803 * @napi_info: pointer to qca_napi_info for the napi instance
804 *
805 * Return: true => interrupt already on correct cpu, no correction needed
806 * false => interrupt on wrong cpu, correction done for cpu affinity
807 * of the interrupt
808 */
809static inline
810bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
811{
812 bool right_cpu = true;
813 int rc = 0;
814 cpumask_t cpumask;
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -0700815 int cpu;
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700816 struct qca_napi_data *napid;
817
818 napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
819
820 if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
821
822 cpu = qdf_get_cpu();
Mohit Khanna012bfe32017-01-19 21:15:35 -0800823 if (unlikely((hif_napi_cpu_blacklist(napid,
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700824 BLACKLIST_QUERY) > 0) &&
825 (cpu != napi_info->cpu))) {
826 right_cpu = false;
827
828 NAPI_DEBUG("interrupt on wrong CPU, correcting");
829 cpumask.bits[0] = (0x01 << napi_info->cpu);
Mohit Khanna012bfe32017-01-19 21:15:35 -0800830
831 irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700832 rc = irq_set_affinity_hint(napi_info->irq,
833 &cpumask);
Mohit Khanna012bfe32017-01-19 21:15:35 -0800834 irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
835
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700836 if (rc)
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -0700837 HIF_ERROR("error setting irq affinity hint: %d",
838 rc);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700839 else
840 napi_info->stats[cpu].cpu_corrected++;
841 }
842 }
843 return right_cpu;
844}
845
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700846#ifdef RECEIVE_OFFLOAD
847/**
848 * hif_napi_offld_flush_cb() - Call upper layer flush callback
849 * @napi_info: Handle to hif_napi_info
850 *
851 * Return: None
852 */
853static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
854{
855 if (napi_info->offld_flush_cb)
856 napi_info->offld_flush_cb(napi_info);
857}
858#else
859static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
860{
861}
862#endif
863
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700864/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800865 * hif_napi_poll() - NAPI poll routine
866 * @napi : pointer to NAPI struct as kernel holds it
867 * @budget:
868 *
869 * This is the body of the poll function.
870 * The poll function is called by kernel. So, there is a wrapper
871 * function in HDD, which in turn calls this function.
872 * Two main reasons why the whole thing is not implemented in HDD:
873 * a) references to things like ce_service that HDD is not aware of
874 * b) proximity to the implementation of ce_tasklet, which the body
875 * of this function should be very close to.
876 *
877 * NOTE TO THE MAINTAINER:
878 * Consider this function and ce_tasklet very tightly coupled pairs.
879 * Any changes to ce_tasklet or this function may likely need to be
880 * reflected in the counterpart.
881 *
882 * Returns:
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700883 * int: the amount of work done in this poll (<= budget)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800884 */
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700885int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
886 struct napi_struct *napi,
887 int budget)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888{
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800889 int rc = 0; /* default: no work done, also takes care of error */
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700890 int normalized = 0;
891 int bucket;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 int cpu = smp_processor_id();
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700893 bool poll_on_right_cpu;
Komal Seelam644263d2016-02-22 20:45:49 +0530894 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895 struct qca_napi_info *napi_info;
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700896 struct CE_state *ce_state = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800897
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700898 if (unlikely(NULL == hif)) {
899 HIF_ERROR("%s: hif context is NULL", __func__);
900 QDF_ASSERT(0);
901 goto out;
902 }
903
904 napi_info = (struct qca_napi_info *)
905 container_of(napi, struct qca_napi_info, napi);
906
Houston Hoffmaneab19b32017-03-08 15:57:54 -0800907 NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
908 __func__, napi_info->id, napi_info->irq, budget);
909
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800910 napi_info->stats[cpu].napi_polls++;
911
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700912 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
c_cgodavfda96ad2017-09-07 16:16:00 +0530913 NAPI_POLL_ENTER, NULL, NULL, cpu, 0);
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700914
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700915 rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
916 NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
917 __func__, rc);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700918
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700919 hif_napi_offld_flush_cb(napi_info);
Dhanashri Atre8d978172015-10-30 15:12:03 -0700920
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921 /* do not return 0, if there was some work done,
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800922 * even if it is below the scale
923 */
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700924 if (rc) {
925 napi_info->stats[cpu].napi_workdone += rc;
926 normalized = (rc / napi_info->scale);
927 if (normalized == 0)
928 normalized++;
929 bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
930 if (bucket >= QCA_NAPI_NUM_BUCKETS) {
931 bucket = QCA_NAPI_NUM_BUCKETS - 1;
932 HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
933 bucket, QCA_NAPI_NUM_BUCKETS);
934 }
935 napi_info->stats[cpu].napi_budget_uses[bucket]++;
936 } else {
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800937 /* if ce_per engine reports 0, then poll should be terminated */
Houston Hoffman56936832016-03-16 12:16:24 -0700938 NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800939 __func__, __LINE__);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700940 }
941
942 ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
943
944 /*
945 * Not using the API hif_napi_correct_cpu directly in the if statement
946 * below since the API may not get evaluated if put at the end if any
947 * prior condition would evaluate to be true. The CPU correction
948 * check should kick in every poll.
949 */
Nandha Kishore Easwarancbff9c92016-11-24 13:31:19 +0530950#ifdef NAPI_YIELD_BUDGET_BASED
951 if (ce_state && (ce_state->force_break || 0 == rc)) {
952#else
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700953 poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
954 if ((ce_state) &&
955 (!ce_check_rx_pending(ce_state) || (0 == rc) ||
956 !poll_on_right_cpu)) {
Nandha Kishore Easwarancbff9c92016-11-24 13:31:19 +0530957#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800958 napi_info->stats[cpu].napi_completes++;
Nandha Kishore Easwaran858a7692016-11-29 11:01:15 +0530959#ifdef NAPI_YIELD_BUDGET_BASED
960 ce_state->force_break = 0;
961#endif
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700962
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700963 hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
c_cgodavfda96ad2017-09-07 16:16:00 +0530964 NULL, NULL, 0, 0);
Houston Hoffmana757eda2016-05-12 21:19:50 -0700965 if (normalized >= budget)
966 normalized = budget - 1;
967
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800968 napi_complete(napi);
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700969 /* enable interrupts */
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700970 hif_napi_enable_irq(hif_ctx, napi_info->id);
971 /* support suspend/resume */
972 qdf_atomic_dec(&(hif->active_tasklet_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800973
Houston Hoffman56936832016-03-16 12:16:24 -0700974 NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800975 __func__, __LINE__);
Houston Hoffmana757eda2016-05-12 21:19:50 -0700976 } else {
977 /* 4.4 kernel NAPI implementation requires drivers to
978 * return full work when they ask to be re-scheduled,
979 * or napi_complete and re-start with a fresh interrupt
980 */
981 normalized = budget;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800982 }
983
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700984 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
c_cgodavfda96ad2017-09-07 16:16:00 +0530985 NAPI_POLL_EXIT, NULL, NULL, normalized, 0);
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700986
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700987 NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800988 return normalized;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700989out:
990 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800991}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530992qdf_export_symbol(hif_napi_poll);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -0700993
Himanshu Agarwald9d0e522017-05-23 11:06:12 +0530994void hif_update_napi_max_poll_time(struct CE_state *ce_state,
Kiran Venkatappad4a55e02018-05-24 22:34:46 +0530995 int ce_id,
Himanshu Agarwald9d0e522017-05-23 11:06:12 +0530996 int cpu_id)
997{
Kiran Venkatappad4a55e02018-05-24 22:34:46 +0530998 struct hif_softc *hif;
999 struct qca_napi_info *napi_info;
Himanshu Agarwald9d0e522017-05-23 11:06:12 +05301000 unsigned long long napi_poll_time = sched_clock() -
1001 ce_state->ce_service_start_time;
1002
Kiran Venkatappad4a55e02018-05-24 22:34:46 +05301003 hif = ce_state->scn;
1004 napi_info = hif->napi_data.napis[ce_id];
Himanshu Agarwald9d0e522017-05-23 11:06:12 +05301005 if (napi_poll_time >
1006 napi_info->stats[cpu_id].napi_max_poll_time)
1007 napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
1008}
1009
Houston Hoffmanb3497c02017-04-22 18:27:00 -07001010#ifdef HIF_IRQ_AFFINITY
Mohit Khanna518eb502016-10-06 19:58:02 -07001011/**
1012 *
1013 * hif_napi_update_yield_stats() - update NAPI yield related stats
1014 * @cpu_id: CPU ID for which stats needs to be updates
1015 * @ce_id: Copy Engine ID for which yield stats needs to be updates
1016 * @time_limit_reached: indicates whether the time limit was reached
1017 * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
1018 *
1019 * Return: None
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001020 */
Mohit Khanna518eb502016-10-06 19:58:02 -07001021void hif_napi_update_yield_stats(struct CE_state *ce_state,
1022 bool time_limit_reached,
1023 bool rxpkt_thresh_reached)
1024{
1025 struct hif_softc *hif;
1026 struct qca_napi_data *napi_data = NULL;
1027 int ce_id = 0;
1028 int cpu_id = 0;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001029
Mohit Khanna518eb502016-10-06 19:58:02 -07001030 if (unlikely(NULL == ce_state)) {
1031 QDF_ASSERT(NULL != ce_state);
1032 return;
1033 }
1034
1035 hif = ce_state->scn;
1036
1037 if (unlikely(NULL == hif)) {
1038 QDF_ASSERT(NULL != hif);
1039 return;
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07001040 }
1041 napi_data = &(hif->napi_data);
1042 if (unlikely(NULL == napi_data)) {
1043 QDF_ASSERT(NULL != napi_data);
1044 return;
Mohit Khanna518eb502016-10-06 19:58:02 -07001045 }
1046
1047 ce_id = ce_state->id;
1048 cpu_id = qdf_get_cpu();
1049
Himanshu Agarwald9d0e522017-05-23 11:06:12 +05301050 if (unlikely(!napi_data->napis[ce_id])) {
1051 HIF_INFO("%s: NAPI info is NULL for ce id: %d",
1052 __func__, ce_id);
1053 return;
1054 }
1055
Mohit Khanna518eb502016-10-06 19:58:02 -07001056 if (time_limit_reached)
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001057 napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
Mohit Khanna518eb502016-10-06 19:58:02 -07001058 else
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001059 napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
Himanshu Agarwald9d0e522017-05-23 11:06:12 +05301060
Kiran Venkatappad4a55e02018-05-24 22:34:46 +05301061 hif_update_napi_max_poll_time(ce_state, ce_id,
Himanshu Agarwald9d0e522017-05-23 11:06:12 +05301062 cpu_id);
Mohit Khanna518eb502016-10-06 19:58:02 -07001063}
1064
1065/**
1066 *
1067 * hif_napi_stats() - display NAPI CPU statistics
1068 * @napid: pointer to qca_napi_data
1069 *
1070 * Description:
1071 * Prints the various CPU cores on which the NAPI instances /CEs interrupts
1072 * are being executed. Can be called from outside NAPI layer.
1073 *
1074 * Return: None
1075 */
1076void hif_napi_stats(struct qca_napi_data *napid)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001077{
1078 int i;
Mohit Khanna518eb502016-10-06 19:58:02 -07001079 struct qca_napi_cpu *cpu;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001080
Mohit Khanna518eb502016-10-06 19:58:02 -07001081 if (napid == NULL) {
Houston Hoffman5645dd22017-08-10 15:19:13 -07001082 qdf_debug("%s: napiid struct is null", __func__);
Mohit Khanna518eb502016-10-06 19:58:02 -07001083 return;
1084 }
1085
1086 cpu = napid->napi_cpu;
Houston Hoffman5645dd22017-08-10 15:19:13 -07001087 qdf_debug("NAPI CPU TABLE");
1088 qdf_debug("lilclhead=%d, bigclhead=%d",
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001089 napid->lilcl_head, napid->bigcl_head);
1090 for (i = 0; i < NR_CPUS; i++) {
Houston Hoffman5645dd22017-08-10 15:19:13 -07001091 qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001092 i,
1093 cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
1094 cpu[i].core_mask.bits[0],
1095 cpu[i].thread_mask.bits[0],
Govind Singh861d5da2016-09-27 23:04:17 +05301096 cpu[i].max_freq, cpu[i].napis,
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001097 cpu[i].cluster_nxt);
1098 }
Mohit Khanna518eb502016-10-06 19:58:02 -07001099}
1100
1101#ifdef FEATURE_NAPI_DEBUG
1102/*
1103 * Local functions
1104 * - no argument checks, all internal/trusted callers
1105 */
1106static void hnc_dump_cpus(struct qca_napi_data *napid)
1107{
1108 hif_napi_stats(napid);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001109}
1110#else
1111static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
1112#endif /* FEATURE_NAPI_DEBUG */
1113/**
1114 * hnc_link_clusters() - partitions to cpu table into clusters
1115 * @napid: pointer to NAPI data
1116 *
1117 * Takes in a CPU topology table and builds two linked lists
1118 * (big cluster cores, list-head at bigcl_head, and little cluster
1119 * cores, list-head at lilcl_head) out of it.
1120 *
1121 * If there are more than two clusters:
1122 * - bigcl_head and lilcl_head will be different,
1123 * - the cluster with highest cpufreq will be considered the "big" cluster.
1124 * If there are more than one with the highest frequency, the *last* of such
1125 * clusters will be designated as the "big cluster"
1126 * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
1127 * If there are more than one clusters with the lowest cpu freq, the *first*
1128 * of such clusters will be designated as the "little cluster"
1129 * - We only support up to 32 clusters
1130 * Return: 0 : OK
1131 * !0: error (at least one of lil/big clusters could not be found)
1132 */
1133#define HNC_MIN_CLUSTER 0
Manjunathappa Prakash617ff242018-05-29 19:17:25 -07001134#define HNC_MAX_CLUSTER 1
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001135static int hnc_link_clusters(struct qca_napi_data *napid)
1136{
1137 int rc = 0;
1138
1139 int i;
1140 int it = 0;
1141 uint32_t cl_done = 0x0;
Sarada Prasanna Garnayak8d9eba12016-11-04 18:36:19 +05301142 int cl, curcl, curclhead = 0;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001143 int more;
1144 unsigned int lilfrq = INT_MAX;
1145 unsigned int bigfrq = 0;
Sarada Prasanna Garnayak8d9eba12016-11-04 18:36:19 +05301146 unsigned int clfrq = 0;
1147 int prev = 0;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001148 struct qca_napi_cpu *cpus = napid->napi_cpu;
1149
1150 napid->lilcl_head = napid->bigcl_head = -1;
1151
1152 do {
1153 more = 0;
1154 it++; curcl = -1;
1155 for (i = 0; i < NR_CPUS; i++) {
1156 cl = cpus[i].cluster_id;
1157 NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1158 i, cl);
1159 if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1160 NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001161 /* continue if ASSERTs are disabled */
1162 continue;
1163 };
1164 if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1165 NAPI_DEBUG("Core mask 0. SKIPPED\n");
1166 continue;
1167 }
1168 if (cl_done & (0x01 << cl)) {
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07001169 NAPI_DEBUG("Cluster already processed. SKIPPED\n");
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001170 continue;
1171 } else {
1172 if (more == 0) {
1173 more = 1;
1174 curcl = cl;
1175 curclhead = i; /* row */
1176 clfrq = cpus[i].max_freq;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001177 prev = -1;
1178 };
1179 if ((curcl >= 0) && (curcl != cl)) {
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07001180 NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001181 cl, curcl);
1182 continue;
1183 }
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001184 if (cpus[i].max_freq != clfrq)
1185 NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1186 cpus[i].max_freq, clfrq);
1187 if (clfrq >= bigfrq) {
1188 bigfrq = clfrq;
1189 napid->bigcl_head = curclhead;
1190 NAPI_DEBUG("bigcl=%d\n", curclhead);
1191 }
1192 if (clfrq < lilfrq) {
1193 lilfrq = clfrq;
1194 napid->lilcl_head = curclhead;
1195 NAPI_DEBUG("lilcl=%d\n", curclhead);
1196 }
1197 if (prev != -1)
1198 cpus[prev].cluster_nxt = i;
1199
1200 prev = i;
1201 }
1202 }
1203 if (curcl >= 0)
1204 cl_done |= (0x01 << curcl);
1205
1206 } while (more);
1207
1208 if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1209 rc = -EFAULT;
1210
1211 hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1212 return rc;
1213}
1214#undef HNC_MIN_CLUSTER
1215#undef HNC_MAX_CLUSTER
1216
1217/*
1218 * hotplug function group
1219 */
1220
1221/**
Dustin Brown5d0d1042017-10-24 16:10:23 -07001222 * hnc_cpu_online_cb() - handles CPU hotplug "up" events
1223 * @context: the associated HIF context
1224 * @cpu: the CPU Id of the CPU the event happened on
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001225 *
Dustin Brown5d0d1042017-10-24 16:10:23 -07001226 * Return: None
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001227 */
Dustin Brown5d0d1042017-10-24 16:10:23 -07001228static void hnc_cpu_online_cb(void *context, uint32_t cpu)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001229{
Dustin Brown5d0d1042017-10-24 16:10:23 -07001230 struct hif_softc *hif = context;
1231 struct qca_napi_data *napid = &hif->napi_data;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001232
Dustin Brown5d0d1042017-10-24 16:10:23 -07001233 if (cpu >= NR_CPUS)
1234 return;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001235
Dustin Brown5d0d1042017-10-24 16:10:23 -07001236 NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001237
Dustin Brown5d0d1042017-10-24 16:10:23 -07001238 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1239 NAPI_DEBUG("%s: CPU %u marked %d",
1240 __func__, cpu, napid->napi_cpu[cpu].state);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001241
Dustin Brown5d0d1042017-10-24 16:10:23 -07001242 NAPI_DEBUG("<--%s", __func__);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001243}
1244
1245/**
Dustin Brown5d0d1042017-10-24 16:10:23 -07001246 * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
1247 * @context: the associated HIF context
1248 * @cpu: the CPU Id of the CPU the event happened on
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001249 *
Dustin Brown5d0d1042017-10-24 16:10:23 -07001250 * On transtion to offline, we act on PREP events, because we may need to move
1251 * the irqs/NAPIs to another CPU before it is actually off-lined.
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001252 *
Dustin Brown5d0d1042017-10-24 16:10:23 -07001253 * Return: None
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001254 */
Dustin Brown5d0d1042017-10-24 16:10:23 -07001255static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001256{
Dustin Brown5d0d1042017-10-24 16:10:23 -07001257 struct hif_softc *hif = context;
1258 struct qca_napi_data *napid = &hif->napi_data;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001259
Dustin Brown5d0d1042017-10-24 16:10:23 -07001260 if (cpu >= NR_CPUS)
1261 return;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001262
Dustin Brown5d0d1042017-10-24 16:10:23 -07001263 NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001264
Dustin Brown5d0d1042017-10-24 16:10:23 -07001265 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1266
1267 NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
1268 __func__, cpu, napid->napi_cpu[cpu].state);
1269
1270 /**
1271 * we need to move any NAPIs on this CPU out.
1272 * if we are in LO throughput mode, then this is valid
1273 * if the CPU is the the low designated CPU.
1274 */
1275 hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
1276 NAPI_EVT_CPU_STATE,
1277 (void *)
1278 ((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
1279
1280 NAPI_DEBUG("<--%s", __func__);
1281}
1282
1283static int hnc_hotplug_register(struct hif_softc *hif_sc)
1284{
1285 QDF_STATUS status;
1286
1287 NAPI_DEBUG("-->%s", __func__);
1288
1289 status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
1290 hif_sc,
1291 hnc_cpu_online_cb,
1292 hnc_cpu_before_offline_cb);
1293
1294 NAPI_DEBUG("<--%s [%d]", __func__, status);
1295
1296 return qdf_status_to_os_return(status);
1297}
1298
1299static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
1300{
1301 NAPI_DEBUG("-->%s", __func__);
1302
1303 if (hif_sc->napi_data.cpuhp_handler)
1304 qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
1305
1306 NAPI_DEBUG("<--%s", __func__);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001307}
1308
1309/**
1310 * hnc_install_tput() - installs a callback in the throughput detector
1311 * @register: !0 => register; =0: unregister
1312 *
1313 * installs a callback to be called when wifi driver throughput (tx+rx)
1314 * crosses a threshold. Currently, we are using the same criteria as
1315 * TCP ack suppression (500 packets/100ms by default).
1316 *
1317 * Return: 0 : success
1318 * <0: failure
1319 */
1320
1321static int hnc_tput_hook(int install)
1322{
1323 int rc = 0;
1324
1325 /*
1326 * Nothing, until the bw_calculation accepts registration
1327 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1328 * hdd_napi_throughput_policy(...)
1329 */
1330 return rc;
1331}
1332
1333/*
1334 * Implementation of hif_napi_cpu API
1335 */
1336
Houston Hoffmanb3497c02017-04-22 18:27:00 -07001337#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1338static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1339{
1340 cpumask_copy(&(cpus[i].thread_mask),
1341 topology_sibling_cpumask(i));
1342}
1343#else
1344static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1345{
1346}
1347#endif
1348
1349
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001350/**
1351 * hif_napi_cpu_init() - initialization of irq affinity block
1352 * @ctx: pointer to qca_napi_data
1353 *
1354 * called by hif_napi_create, after the first instance is called
1355 * - builds napi_rss_cpus table from cpu topology
1356 * - links cores of the same clusters together
1357 * - installs hot-plug notifier
1358 * - installs throughput trigger notifier (when such mechanism exists)
1359 *
1360 * Return: 0: OK
1361 * <0: error code
1362 */
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001363int hif_napi_cpu_init(struct hif_opaque_softc *hif)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001364{
1365 int rc = 0;
1366 int i;
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001367 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001368 struct qca_napi_cpu *cpus = napid->napi_cpu;
1369
1370 NAPI_DEBUG("--> ");
1371
1372 if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1373 NAPI_DEBUG("NAPI RSS table already initialized.\n");
1374 rc = -EALREADY;
1375 goto lab_rss_init;
1376 }
1377
1378 /* build CPU topology table */
1379 for_each_possible_cpu(i) {
1380 cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask)
1381 ? QCA_NAPI_CPU_UP
1382 : QCA_NAPI_CPU_DOWN));
1383 cpus[i].core_id = topology_core_id(i);
1384 cpus[i].cluster_id = topology_physical_package_id(i);
1385 cpumask_copy(&(cpus[i].core_mask),
1386 topology_core_cpumask(i));
Houston Hoffmanb3497c02017-04-22 18:27:00 -07001387 record_sibling_cpumask(cpus, i);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001388 cpus[i].max_freq = cpufreq_quick_get_max(i);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001389 cpus[i].napis = 0x0;
1390 cpus[i].cluster_nxt = -1; /* invalid */
1391 }
1392
1393 /* link clusters together */
1394 rc = hnc_link_clusters(napid);
1395 if (0 != rc)
1396 goto lab_err_topology;
1397
1398 /* install hotplug notifier */
Dustin Brown5d0d1042017-10-24 16:10:23 -07001399 rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001400 if (0 != rc)
1401 goto lab_err_hotplug;
1402
1403 /* install throughput notifier */
1404 rc = hnc_tput_hook(1);
1405 if (0 == rc)
1406 goto lab_rss_init;
1407
1408lab_err_hotplug:
1409 hnc_tput_hook(0);
Dustin Brown5d0d1042017-10-24 16:10:23 -07001410 hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001411lab_err_topology:
Orhan K AKYILDIZ84deb972016-10-25 12:54:21 -07001412 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001413lab_rss_init:
1414 NAPI_DEBUG("<-- [rc=%d]", rc);
1415 return rc;
1416}
1417
1418/**
1419 * hif_napi_cpu_deinit() - clean-up of irq affinity block
1420 *
1421 * called by hif_napi_destroy, when the last instance is removed
1422 * - uninstalls throughput and hotplug notifiers
1423 * - clears cpu topology table
1424 * Return: 0: OK
1425 */
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001426int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001427{
1428 int rc = 0;
Houston Hoffmanfec8ed12016-11-15 10:42:27 -08001429 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001430
1431 NAPI_DEBUG("-->%s(...)", __func__);
1432
1433 /* uninstall tput notifier */
1434 rc = hnc_tput_hook(0);
1435
1436 /* uninstall hotplug notifier */
Dustin Brown5d0d1042017-10-24 16:10:23 -07001437 hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001438
1439 /* clear the topology table */
Orhan K AKYILDIZ84deb972016-10-25 12:54:21 -07001440 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001441
1442 NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1443
1444 return rc;
1445}
1446
1447/**
1448 * hncm_migrate_to() - migrates a NAPI to a CPU
1449 * @napid: pointer to NAPI block
1450 * @ce_id: CE_id of the NAPI instance
1451 * @didx : index in the CPU topology table for the CPU to migrate to
1452 *
1453 * Migrates NAPI (identified by the CE_id) to the destination core
1454 * Updates the napi_map of the destination entry
1455 *
1456 * Return:
1457 * =0 : success
1458 * <0 : error
1459 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001460static int hncm_migrate_to(struct qca_napi_data *napid,
1461 int napi_ce,
1462 int didx)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001463{
1464 int rc = 0;
1465 cpumask_t cpumask;
1466
1467 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1468
Mohit Khanna865d8ff2016-10-06 19:58:02 -07001469 cpumask.bits[0] = (1 << didx);
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001470 if (!napid->napis[napi_ce])
1471 return -EINVAL;
Mohit Khanna012bfe32017-01-19 21:15:35 -08001472
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001473 irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0);
1474 rc = irq_set_affinity_hint(napid->napis[napi_ce]->irq, &cpumask);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001475
1476 /* unmark the napis bitmap in the cpu table */
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001477 napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001478 /* mark the napis bitmap for the new designated cpu */
1479 napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001480 napid->napis[napi_ce]->cpu = didx;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001481
1482 NAPI_DEBUG("<--%s[%d]", __func__, rc);
1483 return rc;
1484}
1485/**
1486 * hncm_dest_cpu() - finds a destination CPU for NAPI
1487 * @napid: pointer to NAPI block
1488 * @act : RELOCATE | COLLAPSE | DISPERSE
1489 *
1490 * Finds the designated destionation for the next IRQ.
1491 * RELOCATE: translated to either COLLAPSE or DISPERSE based
1492 * on napid->napi_mode (throughput state)
1493 * COLLAPSE: All have the same destination: the first online CPU in lilcl
1494 * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1495 * NAPIs on it
1496 *
1497 * Return: >=0 : index in the cpu topology table
1498 * : < 0 : error
1499 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001500static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001501{
1502 int destidx = -1;
1503 int head, i;
1504
1505 NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1506 if (act == HNC_ACT_RELOCATE) {
1507 if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1508 act = HNC_ACT_COLLAPSE;
1509 else
1510 act = HNC_ACT_DISPERSE;
1511 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1512 __func__, act);
1513 }
1514 if (act == HNC_ACT_COLLAPSE) {
1515 head = i = napid->lilcl_head;
1516retry_collapse:
1517 while (i >= 0) {
1518 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1519 destidx = i;
1520 break;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001521 }
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07001522 i = napid->napi_cpu[i].cluster_nxt;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001523 }
1524 if ((destidx < 0) && (head == napid->lilcl_head)) {
1525 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1526 __func__);
1527 head = i = napid->bigcl_head;
1528 goto retry_collapse;
1529 }
1530 } else { /* HNC_ACT_DISPERSE */
1531 int smallest = 99; /* all 32 bits full */
1532 int smallidx = -1;
1533
1534 head = i = napid->bigcl_head;
1535retry_disperse:
1536 while (i >= 0) {
1537 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
Orhan K AKYILDIZ6d9b7572016-09-25 21:31:50 -07001538 (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001539 smallest = napid->napi_cpu[i].napis;
1540 smallidx = i;
1541 }
1542 i = napid->napi_cpu[i].cluster_nxt;
1543 }
Manjunathappa Prakash617ff242018-05-29 19:17:25 -07001544 /* Check if matches with user sepecified CPU mask */
1545 smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ?
1546 smallidx : -1;
1547
1548 if ((smallidx < 0) && (head == napid->bigcl_head)) {
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001549 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1550 __func__);
1551 head = i = napid->lilcl_head;
1552 goto retry_disperse;
1553 }
Manjunathappa Prakash617ff242018-05-29 19:17:25 -07001554 destidx = smallidx;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001555 }
1556 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1557 return destidx;
1558}
1559/**
1560 * hif_napi_cpu_migrate() - migrate IRQs away
1561 * @cpu: -1: all CPUs <n> specific CPU
1562 * @act: COLLAPSE | DISPERSE
1563 *
1564 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1565 * cores. Eligible cores are:
1566 * act=COLLAPSE -> the first online core of the little cluster
1567 * act=DISPERSE -> separate cores of the big cluster, so that each core will
1568 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1569 *
1570 * Note that this function is called with a spinlock acquired already.
1571 *
1572 * Return: =0: success
1573 * <0: error
1574 */
1575
1576int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1577{
1578 int rc = 0;
1579 struct qca_napi_cpu *cpup;
1580 int i, dind;
1581 uint32_t napis;
1582
1583 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1584 __func__, cpu, action);
1585 /* the following is really: hif_napi_enabled() with less overhead */
1586 if (napid->ce_map == 0) {
1587 NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1588 goto hncm_return;
1589 }
1590
1591 cpup = napid->napi_cpu;
1592
1593 switch (action) {
1594 case HNC_ACT_RELOCATE:
1595 case HNC_ACT_DISPERSE:
1596 case HNC_ACT_COLLAPSE: {
1597 /* first find the src napi set */
1598 if (cpu == HNC_ANY_CPU)
1599 napis = napid->ce_map;
1600 else
1601 napis = cpup[cpu].napis;
1602 /* then clear the napi bitmap on each CPU */
1603 for (i = 0; i < NR_CPUS; i++)
1604 cpup[i].napis = 0;
1605 /* then for each of the NAPIs to disperse: */
1606 for (i = 0; i < CE_COUNT_MAX; i++)
1607 if (napis & (1 << i)) {
1608 /* find a destination CPU */
1609 dind = hncm_dest_cpu(napid, action);
1610 if (dind >= 0) {
1611 NAPI_DEBUG("Migrating NAPI ce%d to %d",
1612 i, dind);
1613 rc = hncm_migrate_to(napid, i, dind);
1614 } else {
1615 NAPI_DEBUG("No dest for NAPI ce%d", i);
1616 hnc_dump_cpus(napid);
1617 rc = -1;
1618 }
1619 }
1620 break;
1621 }
1622 default: {
1623 NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1624 QDF_BUG(0);
1625 break;
1626 }
1627 } /* switch action */
1628
1629hncm_return:
1630 hnc_dump_cpus(napid);
1631 return rc;
1632}
1633
Mohit Khanna012bfe32017-01-19 21:15:35 -08001634
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001635/**
Mohit Khanna012bfe32017-01-19 21:15:35 -08001636 * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1637 * @napid: pointer to qca_napi_data structure
1638 * @bl_flag: blacklist flag to enable/disable blacklisting
1639 *
1640 * The function enables/disables blacklisting for all the copy engine
1641 * interrupts on which NAPI is enabled.
1642 *
1643 * Return: None
1644 */
1645static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1646{
1647 int i;
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001648 struct qca_napi_info *napii;
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07001649
Mohit Khanna012bfe32017-01-19 21:15:35 -08001650 for (i = 0; i < CE_COUNT_MAX; i++) {
1651 /* check if NAPI is enabled on the CE */
1652 if (!(napid->ce_map & (0x01 << i)))
1653 continue;
1654
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001655 /*double check that NAPI is allocated for the CE */
1656 napii = napid->napis[i];
1657 if (!(napii))
1658 continue;
1659
Mohit Khanna012bfe32017-01-19 21:15:35 -08001660 if (bl_flag == true)
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001661 irq_modify_status(napii->irq,
Mohit Khanna012bfe32017-01-19 21:15:35 -08001662 0, IRQ_NO_BALANCING);
1663 else
Houston Hoffmaneab19b32017-03-08 15:57:54 -08001664 irq_modify_status(napii->irq,
Mohit Khanna012bfe32017-01-19 21:15:35 -08001665 IRQ_NO_BALANCING, 0);
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001666 HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
Mohit Khanna012bfe32017-01-19 21:15:35 -08001667 }
1668}
1669
Mohit Khanna4a76dde2017-03-10 11:48:12 -08001670#ifdef CONFIG_SCHED_CORE_CTL
1671/* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
1672static inline int hif_napi_core_ctl_set_boost(bool boost)
1673{
1674 return core_ctl_set_boost(boost);
1675}
1676#else
1677static inline int hif_napi_core_ctl_set_boost(bool boost)
1678{
1679 return 0;
1680}
1681#endif
Mohit Khanna012bfe32017-01-19 21:15:35 -08001682/**
1683 * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1684 * @napid: pointer to qca_napi_data structure
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001685 * @op: blacklist operation to perform
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001686 *
Mohit Khanna012bfe32017-01-19 21:15:35 -08001687 * The function enables/disables/queries blacklisting for all CE RX
1688 * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1689 * core_ctl_set_boost.
1690 * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1691 * balancer.
1692 *
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001693 * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1694 * for BLACKLIST_QUERY op - blacklist refcount
Mohit Khanna012bfe32017-01-19 21:15:35 -08001695 * for BLACKLIST_ON op - return value from core_ctl_set_boost API
1696 * for BLACKLIST_OFF op - return value from core_ctl_set_boost API
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001697 */
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07001698int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
1699 enum qca_blacklist_op op)
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001700{
1701 int rc = 0;
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -07001702 static int ref_count; /* = 0 by the compiler */
Mohit Khanna012bfe32017-01-19 21:15:35 -08001703 uint8_t flags = napid->flags;
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001704 bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1705 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001706
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001707 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1708
1709 if (!(bl_en && ccb_en)) {
1710 rc = -EINVAL;
1711 goto out;
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -07001712 }
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001713
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001714 switch (op) {
1715 case BLACKLIST_QUERY:
1716 rc = ref_count;
1717 break;
1718 case BLACKLIST_ON:
1719 ref_count++;
Mohit Khanna012bfe32017-01-19 21:15:35 -08001720 rc = 0;
1721 if (ref_count == 1) {
Mohit Khanna4a76dde2017-03-10 11:48:12 -08001722 rc = hif_napi_core_ctl_set_boost(true);
Mohit Khanna012bfe32017-01-19 21:15:35 -08001723 NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1724 rc, ref_count);
1725 hif_napi_bl_irq(napid, true);
1726 }
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001727 break;
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001728 case BLACKLIST_OFF:
Mohit Khanna5d6386f2017-04-17 16:24:05 -07001729 if (ref_count) {
Mohit Khanna012bfe32017-01-19 21:15:35 -08001730 ref_count--;
Mohit Khanna5d6386f2017-04-17 16:24:05 -07001731 rc = 0;
1732 if (ref_count == 0) {
1733 rc = hif_napi_core_ctl_set_boost(false);
1734 NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1735 rc, ref_count);
1736 hif_napi_bl_irq(napid, false);
1737 }
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -07001738 }
1739 break;
1740 default:
1741 NAPI_DEBUG("Invalid blacklist op: %d", op);
1742 rc = -EINVAL;
1743 } /* switch */
1744out:
Orhan K AKYILDIZ7ce54e72016-05-16 12:36:32 -07001745 NAPI_DEBUG("<--%s[%d]", __func__, rc);
1746 return rc;
1747}
1748
Orhan K AKYILDIZ458fefc2016-09-16 00:43:38 -07001749/**
1750 * hif_napi_serialize() - [de-]serialize NAPI operations
1751 * @hif: context
1752 * @is_on: 1: serialize, 0: deserialize
1753 *
1754 * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1755 * following steps (see hif_napi_event for code):
1756 * - put irqs of all NAPI instances on the same CPU
1757 * - only for the first serialize call: blacklist
1758 *
1759 * hif_napi_serialize(hif, 0):
1760 * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1761 * - at the end of the timer, check the current throughput state and
1762 * implement it.
1763 */
1764static unsigned long napi_serialize_reqs;
1765int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1766{
1767 int rc = -EINVAL;
1768
1769 if (hif != NULL)
1770 switch (is_on) {
1771 case 0: { /* de-serialize */
1772 rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1773 (void *) 0);
1774 napi_serialize_reqs = 0;
1775 break;
1776 } /* end de-serialize */
1777 case 1: { /* serialize */
1778 rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1779 (void *)napi_serialize_reqs++);
1780 break;
1781 } /* end serialize */
1782 default:
1783 break; /* no-op */
1784 } /* switch */
1785 return rc;
1786}
1787
Houston Hoffmanb3497c02017-04-22 18:27:00 -07001788#endif /* ifdef HIF_IRQ_AFFINITY */