blob: 1ad3b418e71a0c45ffbd3c89bf6865b66f235542 [file] [log] [blame]
Houston Hoffmana0ecf332017-04-22 17:41:58 -07001/*
2 * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/**
20 * DOC: hif_napi.c
21 *
22 * HIF NAPI interface implementation
23 */
24
25#include <string.h> /* memset */
26
27/* Linux headers */
28#include <linux/cpumask.h>
29#include <linux/cpufreq.h>
30#include <linux/cpu.h>
31#include <linux/topology.h>
32#include <linux/interrupt.h>
33#include <linux/irq.h>
34#ifdef HELIUMPLUS
35#ifdef CONFIG_SCHED_CORE_CTL
36#include <linux/sched/core_ctl.h>
37#endif
38#include <pld_snoc.h>
39#endif
40#include <linux/pm.h>
41
42/* Driver headers */
43#include <hif_napi.h>
44#include <hif_debug.h>
45#include <hif_io32.h>
46#include <ce_api.h>
47#include <ce_internal.h>
48
49enum napi_decision_vector {
50 HIF_NAPI_NOEVENT = 0,
51 HIF_NAPI_INITED = 1,
52 HIF_NAPI_CONF_UP = 2
53};
54#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
55
56#ifdef HELIUMPLUS
57static inline int hif_get_irq_for_ce(int ce_id)
58{
59 return pld_snoc_get_irq(ce_id);
60}
61#else /* HELIUMPLUS */
62static inline int hif_get_irq_for_ce(int ce_id)
63{
64 return -EINVAL;
65}
66static int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu,
67 int action)
68{
69 return 0;
70}
71
72int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
73 enum qca_blacklist_op op)
74{
75 return 0;
76}
77#endif /* HELIUMPLUS */
78
79/**
80 * hif_napi_create() - creates the NAPI structures for a given CE
81 * @hif : pointer to hif context
82 * @pipe_id: the CE id on which the instance will be created
83 * @poll : poll function to be used for this NAPI instance
84 * @budget : budget to be registered with the NAPI instance
85 * @scale : scale factor on the weight (to scaler budget to 1000)
86 * @flags : feature flags
87 *
88 * Description:
89 * Creates NAPI instances. This function is called
90 * unconditionally during initialization. It creates
91 * napi structures through the proper HTC/HIF calls.
92 * The structures are disabled on creation.
93 * Note that for each NAPI instance a separate dummy netdev is used
94 *
95 * Return:
96 * < 0: error
97 * = 0: <should never happen>
98 * > 0: id of the created object (for multi-NAPI, number of objects created)
99 */
100int hif_napi_create(struct hif_opaque_softc *hif_ctx,
101 int (*poll)(struct napi_struct *, int),
102 int budget,
103 int scale,
104 uint8_t flags)
105{
106 int i;
107 struct qca_napi_data *napid;
108 struct qca_napi_info *napii;
109 struct CE_state *ce_state;
110 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
111 int rc = 0;
112
113 NAPI_DEBUG("-->(budget=%d, scale=%d)",
114 budget, scale);
115 NAPI_DEBUG("hif->napi_data.state = 0x%08x",
116 hif->napi_data.state);
117 NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
118 hif->napi_data.ce_map);
119
120 napid = &(hif->napi_data);
121 if (0 == (napid->state & HIF_NAPI_INITED)) {
122 memset(napid, 0, sizeof(struct qca_napi_data));
123 qdf_spinlock_create(&(napid->lock));
124
125 napid->state |= HIF_NAPI_INITED;
126 napid->flags = flags;
127
128 rc = hif_napi_cpu_init(hif_ctx);
129 if (rc != 0) {
130 HIF_ERROR("NAPI_initialization failed,. %d", rc);
131 rc = napid->ce_map;
132 goto hnc_err;
133 }
134
135 HIF_DBG("%s: NAPI structures initialized, rc=%d",
136 __func__, rc);
137 }
138 for (i = 0; i < hif->ce_count; i++) {
139 ce_state = hif->ce_id_to_state[i];
140 NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
141 i, ce_state->htt_rx_data,
142 ce_state->htt_tx_data);
143 if (!ce_state->htt_rx_data)
144 continue;
145
146 /* Now this is a CE where we need NAPI on */
147 NAPI_DEBUG("Creating NAPI on pipe %d", i);
148 napii = qdf_mem_malloc(sizeof(*napii));
149 napid->napis[i] = napii;
150 if (!napii) {
151 NAPI_DEBUG("NAPI alloc failure %d", i);
152 rc = -ENOMEM;
153 goto napii_alloc_failure;
154 }
155 }
156
157 for (i = 0; i < hif->ce_count; i++) {
158 napii = napid->napis[i];
159 if (!napii)
160 continue;
161
162 NAPI_DEBUG("initializing NAPI for pipe %d", i);
163 memset(napii, 0, sizeof(struct qca_napi_info));
164 napii->scale = scale;
165 napii->id = NAPI_PIPE2ID(i);
166 napii->hif_ctx = hif_ctx;
167 napii->irq = hif_get_irq_for_ce(i);
168
169 if (napii->irq < 0)
170 HIF_WARN("%s: bad IRQ value for CE %d: %d",
171 __func__, i, napii->irq);
172
173 qdf_spinlock_create(&napii->lro_unloading_lock);
174 init_dummy_netdev(&(napii->netdev));
175
176 NAPI_DEBUG("adding napi=%p to netdev=%p (poll=%p, bdgt=%d)",
177 &(napii->napi), &(napii->netdev), poll, budget);
178 netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
179
180 NAPI_DEBUG("after napi_add");
181 NAPI_DEBUG("napi=0x%p, netdev=0x%p",
182 &(napii->napi), &(napii->netdev));
183 NAPI_DEBUG("napi.dev_list.prev=0x%p, next=0x%p",
184 napii->napi.dev_list.prev,
185 napii->napi.dev_list.next);
186 NAPI_DEBUG("dev.napi_list.prev=0x%p, next=0x%p",
187 napii->netdev.napi_list.prev,
188 napii->netdev.napi_list.next);
189
190 /* It is OK to change the state variable below without
191 * protection as there should be no-one around yet
192 */
193 napid->ce_map |= (0x01 << i);
194 HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
195 napii->id, i);
196 }
197 NAPI_DEBUG("napi map = %x", napid->ce_map);
198 NAPI_DEBUG("NAPI ids created for all applicable pipes");
199 return napid->ce_map;
200
201napii_alloc_failure:
202 for (i = 0; i < hif->ce_count; i++) {
203 napii = napid->napis[i];
204 napid->napis[i] = NULL;
205 if (napii)
206 qdf_mem_free(napii);
207 }
208
209hnc_err:
210 NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
211 return rc;
212}
213
214/**
215 *
216 * hif_napi_destroy() - destroys the NAPI structures for a given instance
217 * @hif : pointer to hif context
218 * @ce_id : the CE id whose napi instance will be destroyed
219 * @force : if set, will destroy even if entry is active (de-activates)
220 *
221 * Description:
222 * Destroy a given NAPI instance. This function is called
223 * unconditionally during cleanup.
224 * Refuses to destroy an entry of it is still enabled (unless force=1)
225 * Marks the whole napi_data invalid if all instances are destroyed.
226 *
227 * Return:
228 * -EINVAL: specific entry has not been created
229 * -EPERM : specific entry is still active
230 * 0 < : error
231 * 0 = : success
232 */
233int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
234 uint8_t id,
235 int force)
236{
237 uint8_t ce = NAPI_ID2PIPE(id);
238 int rc = 0;
239 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
240
241 NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
242
243 if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
244 HIF_ERROR("%s: NAPI not initialized or entry %d not created",
245 __func__, id);
246 rc = -EINVAL;
247 } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
248 HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
249 __func__, id, ce);
250 if (hif->napi_data.napis[ce])
251 HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)",
252 __func__, id, ce);
253 rc = -EINVAL;
254 } else {
255 struct qca_napi_data *napid;
256 struct qca_napi_info *napii;
257
258 napid = &(hif->napi_data);
259 napii = napid->napis[ce];
260 if (!napii) {
261 if (napid->ce_map & (0x01 << ce))
262 HIF_ERROR("%s: napii & ce_map out of sync(ce %d)",
263 __func__, ce);
264 return -EINVAL;
265 }
266
267
268 if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
269 if (force) {
270 napi_disable(&(napii->napi));
271 HIF_DBG("%s: NAPI entry %d force disabled",
272 __func__, id);
273 NAPI_DEBUG("NAPI %d force disabled", id);
274 } else {
275 HIF_ERROR("%s: Cannot destroy active NAPI %d",
276 __func__, id);
277 rc = -EPERM;
278 }
279 }
280 if (0 == rc) {
281 NAPI_DEBUG("before napi_del");
282 NAPI_DEBUG("napi.dlist.prv=0x%p, next=0x%p",
283 napii->napi.dev_list.prev,
284 napii->napi.dev_list.next);
285 NAPI_DEBUG("dev.napi_l.prv=0x%p, next=0x%p",
286 napii->netdev.napi_list.prev,
287 napii->netdev.napi_list.next);
288
289 qdf_spinlock_destroy(&napii->lro_unloading_lock);
290 netif_napi_del(&(napii->napi));
291
292 napid->ce_map &= ~(0x01 << ce);
293 napid->napis[ce] = NULL;
294 napii->scale = 0;
295 qdf_mem_free(napii);
296 HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
297
298 /* if there are no active instances and
299 * if they are all destroyed,
300 * set the whole structure to uninitialized state
301 */
302 if (napid->ce_map == 0) {
303 rc = hif_napi_cpu_deinit(hif_ctx);
304 /* caller is tolerant to receiving !=0 rc */
305
306 qdf_spinlock_destroy(&(napid->lock));
307 memset(napid,
308 0, sizeof(struct qca_napi_data));
309 HIF_DBG("%s: no NAPI instances. Zapped.",
310 __func__);
311 }
312 }
313 }
314
315 return rc;
316}
317
318/**
319 * hif_napi_lro_flush_cb_register() - init and register flush callback for LRO
320 * @hif_hdl: pointer to hif context
321 * @lro_flush_handler: register LRO flush callback
322 * @lro_init_handler: Callback for initializing LRO
323 *
324 * Return: positive value on success and 0 on failure
325 */
326int hif_napi_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
327 void (lro_flush_handler)(void *),
328 void *(lro_init_handler)(void))
329{
330 int rc = 0;
331 int i;
332 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
333 void *data = NULL;
334 struct qca_napi_data *napid;
335 struct qca_napi_info *napii;
336
337 QDF_ASSERT(scn != NULL);
338
339 napid = hif_napi_get_all(hif_hdl);
340 if (scn != NULL) {
341 for (i = 0; i < scn->ce_count; i++) {
342 napii = napid->napis[i];
343 if (napii) {
344 data = lro_init_handler();
345 if (data == NULL) {
346 HIF_ERROR("%s: Failed to init LRO for CE %d",
347 __func__, i);
348 continue;
349 }
350 napii->lro_flush_cb = lro_flush_handler;
351 napii->lro_ctx = data;
352 HIF_DBG("Registering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
353 i, napii->id, napii->lro_flush_cb,
354 napii->lro_ctx);
355 rc++;
356 }
357 }
358 } else {
359 HIF_ERROR("%s: hif_state NULL!", __func__);
360 }
361 return rc;
362}
363
364/**
365 * hif_napi_lro_flush_cb_deregister() - Degregister and free LRO.
366 * @hif: pointer to hif context
367 * @lro_deinit_cb: LRO deinit callback
368 *
369 * Return: NONE
370 */
371void hif_napi_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
372 void (lro_deinit_cb)(void *))
373{
374 int i;
375 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
376 struct qca_napi_data *napid;
377 struct qca_napi_info *napii;
378
379 QDF_ASSERT(scn != NULL);
380
381 napid = hif_napi_get_all(hif_hdl);
382 if (scn != NULL) {
383 for (i = 0; i < scn->ce_count; i++) {
384 napii = napid->napis[i];
385 if (napii) {
386 HIF_DBG("deRegistering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
387 i, napii->id, napii->lro_flush_cb,
388 napii->lro_ctx);
389 qdf_spin_lock_bh(&napii->lro_unloading_lock);
390 napii->lro_flush_cb = NULL;
391 lro_deinit_cb(napii->lro_ctx);
392 napii->lro_ctx = NULL;
393 qdf_spin_unlock_bh(
394 &napii->lro_unloading_lock);
395 }
396 }
397 } else {
398 HIF_ERROR("%s: hif_state NULL!", __func__);
399 }
400}
401
402/**
403 * hif_napi_get_lro_info() - returns the address LRO data for napi_id
404 * @hif: pointer to hif context
405 * @napi_id: napi instance
406 *
407 * Description:
408 * Returns the address of the LRO structure
409 *
410 * Return:
411 * <addr>: address of the LRO structure
412 */
413void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
414{
415 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
416 struct qca_napi_data *napid;
417 struct qca_napi_info *napii;
418
419 napid = &(scn->napi_data);
420 napii = napid->napis[NAPI_ID2PIPE(napi_id)];
421
422 if (napii)
423 return napii->lro_ctx;
424 return 0;
425}
426
427/**
428 *
429 * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
430 * @hif: pointer to hif context
431 *
432 * Description:
433 * Returns the address of the whole structure
434 *
435 * Return:
436 * <addr>: address of the whole HIF NAPI structure
437 */
438inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
439{
440 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
441
442 return &(hif->napi_data);
443}
444
445/**
446 *
447 * hif_napi_event() - reacts to events that impact NAPI
448 * @hif : pointer to hif context
449 * @evnt: event that has been detected
450 * @data: more data regarding the event
451 *
452 * Description:
453 * This function handles two types of events:
454 * 1- Events that change the state of NAPI (enabled/disabled):
455 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
456 * The state is retrievable by "hdd_napi_enabled(-1)"
457 * - NAPI will be on if either INI file is on and it has not been disabled
458 * by a subsequent vendor CMD,
459 * or it has been enabled by a vendor CMD.
460 * 2- Events that change the CPU affinity of a NAPI instance/IRQ:
461 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
462 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
463 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
464 * - In LO tput mode, NAPI will yield control if its interrupts to the system
465 * management functions. However in HI throughput mode, NAPI will actively
466 * manage its interrupts/instances (by trying to disperse them out to
467 * separate performance cores).
468 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
469 *
470 * + In some cases (roaming peer management is the only case so far), a
471 * a client can trigger a "SERIALIZE" event. Basically, this means that the
472 * users is asking NAPI to go into a truly single execution context state.
473 * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
474 * (if called for the first time) and then moves all IRQs (for NAPI
475 * instances) to be collapsed to a single core. If called multiple times,
476 * it will just re-collapse the CPUs. This is because blacklist-on() API
477 * is reference-counted, and because the API has already been called.
478 *
479 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
480 * to its "normal" operation. Optionally, they can give a timeout value (in
481 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
482 * case, NAPI will just set the current throughput state to uninitialized
483 * and set the delay period. Once policy handler is called, it would skip
484 * applying the policy delay period times, and otherwise apply the policy.
485 *
486 * Return:
487 * < 0: some error
488 * = 0: event handled successfully
489 */
490int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
491 void *data)
492{
493 int rc = 0;
494 uint32_t prev_state;
495 int i;
496 struct napi_struct *napi;
497 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
498 struct qca_napi_data *napid = &(hif->napi_data);
499 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
500 enum {
501 BLACKLIST_NOT_PENDING,
502 BLACKLIST_ON_PENDING,
503 BLACKLIST_OFF_PENDING
504 } blacklist_pending = BLACKLIST_NOT_PENDING;
505
506 NAPI_DEBUG("%s: -->(event=%d, aux=%p)", __func__, event, data);
507
508 if ((napid->state & HIF_NAPI_INITED) == 0) {
509 NAPI_DEBUG("%s: got event when NAPI not initialized",
510 __func__);
511 return -EINVAL;
512 }
513 qdf_spin_lock_bh(&(napid->lock));
514 prev_state = napid->state;
515 switch (event) {
516 case NAPI_EVT_INI_FILE:
517 case NAPI_EVT_CMD_STATE:
518 case NAPI_EVT_INT_STATE: {
519 int on = (data != ((void *)0));
520
521 HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
522 __func__, event,
523 on, prev_state);
524 if (on)
525 if (prev_state & HIF_NAPI_CONF_UP) {
526 HIF_DBG("%s: duplicate NAPI conf ON msg",
527 __func__);
528 } else {
529 HIF_DBG("%s: setting state to ON",
530 __func__);
531 napid->state |= HIF_NAPI_CONF_UP;
532 }
533 else /* off request */
534 if (prev_state & HIF_NAPI_CONF_UP) {
535 HIF_DBG("%s: setting state to OFF",
536 __func__);
537 napid->state &= ~HIF_NAPI_CONF_UP;
538 } else {
539 HIF_DBG("%s: duplicate NAPI conf OFF msg",
540 __func__);
541 }
542 break;
543 }
544 /* case NAPI_INIT_FILE/CMD_STATE */
545
546 case NAPI_EVT_CPU_STATE: {
547 int cpu = ((unsigned long int)data >> 16);
548 int val = ((unsigned long int)data & 0x0ff);
549
550 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
551 __func__, cpu, val);
552
553 /* state has already been set by hnc_cpu_notify_cb */
554 if ((val == QCA_NAPI_CPU_DOWN) &&
555 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
556 (napid->napi_cpu[cpu].napis != 0)) {
557 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
558 __func__, cpu);
559 rc = hif_napi_cpu_migrate(napid,
560 cpu,
561 HNC_ACT_RELOCATE);
562 napid->napi_cpu[cpu].napis = 0;
563 }
564 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
565 break;
566 }
567
568 case NAPI_EVT_TPUT_STATE: {
569 tput_mode = (enum qca_napi_tput_state)data;
570 if (tput_mode == QCA_NAPI_TPUT_LO) {
571 /* from TPUT_HI -> TPUT_LO */
572 NAPI_DEBUG("%s: Moving to napi_tput_LO state",
573 __func__);
574 blacklist_pending = BLACKLIST_OFF_PENDING;
575 /*
576 * Ideally we should "collapse" interrupts here, since
577 * we are "dispersing" interrupts in the "else" case.
578 * This allows the possibility that our interrupts may
579 * still be on the perf cluster the next time we enter
580 * high tput mode. However, the irq_balancer is free
581 * to move our interrupts to power cluster once
582 * blacklisting has been turned off in the "else" case.
583 */
584 } else {
585 /* from TPUT_LO -> TPUT->HI */
586 NAPI_DEBUG("%s: Moving to napi_tput_HI state",
587 __func__);
588 rc = hif_napi_cpu_migrate(napid,
589 HNC_ANY_CPU,
590 HNC_ACT_DISPERSE);
591
592 blacklist_pending = BLACKLIST_ON_PENDING;
593 }
594 napid->napi_mode = tput_mode;
595 break;
596 }
597
598 case NAPI_EVT_USR_SERIAL: {
599 unsigned long users = (unsigned long)data;
600
601 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
602 __func__, users);
603
604 rc = hif_napi_cpu_migrate(napid,
605 HNC_ANY_CPU,
606 HNC_ACT_COLLAPSE);
607 if ((users == 0) && (rc == 0))
608 blacklist_pending = BLACKLIST_ON_PENDING;
609 break;
610 }
611 case NAPI_EVT_USR_NORMAL: {
612 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
613 /*
614 * Deserialization timeout is handled at hdd layer;
615 * just mark current mode to uninitialized to ensure
616 * it will be set when the delay is over
617 */
618 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
619 break;
620 }
621 default: {
622 HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
623 __func__, event, (unsigned long) data);
624 break;
625 } /* default */
626 }; /* switch */
627
628
629 switch (blacklist_pending) {
630 case BLACKLIST_ON_PENDING:
631 /* assume the control of WLAN IRQs */
632 hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
633 break;
634 case BLACKLIST_OFF_PENDING:
635 /* yield the control of WLAN IRQs */
636 hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
637 break;
638 default: /* nothing to do */
639 break;
640 } /* switch blacklist_pending */
641
642 qdf_spin_unlock_bh(&(napid->lock));
643
644 if (prev_state != napid->state) {
645 if (napid->state == ENABLE_NAPI_MASK) {
646 rc = 1;
647 for (i = 0; i < CE_COUNT_MAX; i++) {
648 struct qca_napi_info *napii = napid->napis[i];
649 if (napii) {
650 napi = &(napii->napi);
651 NAPI_DEBUG("%s: enabling NAPI %d",
652 __func__, i);
653 napi_enable(napi);
654 }
655 }
656 } else {
657 rc = 0;
658 for (i = 0; i < CE_COUNT_MAX; i++) {
659 struct qca_napi_info *napii = napid->napis[i];
660 if (napii) {
661 napi = &(napii->napi);
662 NAPI_DEBUG("%s: disabling NAPI %d",
663 __func__, i);
664 napi_disable(napi);
665 /* in case it is affined, remove it */
666 irq_set_affinity_hint(napii->irq, NULL);
667 }
668 }
669 }
670 } else {
671 HIF_DBG("%s: no change in hif napi state (still %d)",
672 __func__, prev_state);
673 }
674
675 NAPI_DEBUG("<--[rc=%d]", rc);
676 return rc;
677}
678
679/**
680 * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
681 * @hif: hif context
682 * @ce : CE instance (or -1, to check if any CEs are enabled)
683 *
684 * Return: bool
685 */
686int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
687{
688 int rc;
689 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
690
691 if (-1 == ce)
692 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
693 else
694 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
695 (hif->napi_data.ce_map & (0x01 << ce)));
696 return rc;
697};
698
699/**
700 * hif_napi_enable_irq() - enables bus interrupts after napi_complete
701 *
702 * @hif: hif context
703 * @id : id of NAPI instance calling this (used to determine the CE)
704 *
705 * Return: void
706 */
707inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
708{
709 struct hif_softc *scn = HIF_GET_SOFTC(hif);
710
711 hif_irq_enable(scn, NAPI_ID2PIPE(id));
712}
713
714
715/**
716 * hif_napi_schedule() - schedules napi, updates stats
717 * @scn: hif context
718 * @ce_id: index of napi instance
719 *
720 * Return: void
721 */
722int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
723{
724 int cpu = smp_processor_id();
725 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
726 struct qca_napi_info *napii;
727
728 hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
729 NULL, NULL, 0);
730
731 napii = scn->napi_data.napis[ce_id];
732 if (qdf_unlikely(!napii)) {
733 HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
734 __func__, ce_id);
735 qdf_atomic_dec(&scn->active_tasklet_cnt);
736 return false;
737 }
738
739 napii->stats[cpu].napi_schedules++;
740 NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
741 napi_schedule(&(napii->napi));
742
743 return true;
744}
745
746/**
747 * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
748 * @napi_info: pointer to qca_napi_info for the napi instance
749 *
750 * Return: true => interrupt already on correct cpu, no correction needed
751 * false => interrupt on wrong cpu, correction done for cpu affinity
752 * of the interrupt
753 */
754static inline
755bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
756{
757 bool right_cpu = true;
758 int rc = 0;
759 cpumask_t cpumask;
760 int cpu;
761 struct qca_napi_data *napid;
762
763 napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
764
765 if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
766
767 cpu = qdf_get_cpu();
768 if (unlikely((hif_napi_cpu_blacklist(napid,
769 BLACKLIST_QUERY) > 0) &&
770 (cpu != napi_info->cpu))) {
771 right_cpu = false;
772
773 NAPI_DEBUG("interrupt on wrong CPU, correcting");
774 cpumask.bits[0] = (0x01 << napi_info->cpu);
775
776 irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
777 rc = irq_set_affinity_hint(napi_info->irq,
778 &cpumask);
779 irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
780
781 if (rc)
782 HIF_ERROR("error setting irq affinity hint: %d",
783 rc);
784 else
785 napi_info->stats[cpu].cpu_corrected++;
786 }
787 }
788 return right_cpu;
789}
790
791/**
792 * hif_napi_poll() - NAPI poll routine
793 * @napi : pointer to NAPI struct as kernel holds it
794 * @budget:
795 *
796 * This is the body of the poll function.
797 * The poll function is called by kernel. So, there is a wrapper
798 * function in HDD, which in turn calls this function.
799 * Two main reasons why the whole thing is not implemented in HDD:
800 * a) references to things like ce_service that HDD is not aware of
801 * b) proximity to the implementation of ce_tasklet, which the body
802 * of this function should be very close to.
803 *
804 * NOTE TO THE MAINTAINER:
805 * Consider this function and ce_tasklet very tightly coupled pairs.
806 * Any changes to ce_tasklet or this function may likely need to be
807 * reflected in the counterpart.
808 *
809 * Returns:
810 * int: the amount of work done in this poll (<= budget)
811 */
812int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
813 struct napi_struct *napi,
814 int budget)
815{
816 int rc = 0; /* default: no work done, also takes care of error */
817 int normalized = 0;
818 int bucket;
819 int cpu = smp_processor_id();
820 bool poll_on_right_cpu;
821 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
822 struct qca_napi_info *napi_info;
823 struct CE_state *ce_state = NULL;
824
825 if (unlikely(NULL == hif)) {
826 HIF_ERROR("%s: hif context is NULL", __func__);
827 QDF_ASSERT(0);
828 goto out;
829 }
830
831 napi_info = (struct qca_napi_info *)
832 container_of(napi, struct qca_napi_info, napi);
833
834 NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
835 __func__, napi_info->id, napi_info->irq, budget);
836
837 napi_info->stats[cpu].napi_polls++;
838
839 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
840 NAPI_POLL_ENTER, NULL, NULL, cpu);
841
842 qdf_spin_lock_bh(&napi_info->lro_unloading_lock);
843
844 rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
845 NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
846 __func__, rc);
847
848 if (napi_info->lro_flush_cb)
849 napi_info->lro_flush_cb(napi_info->lro_ctx);
850 qdf_spin_unlock_bh(&napi_info->lro_unloading_lock);
851
852 /* do not return 0, if there was some work done,
853 * even if it is below the scale
854 */
855 if (rc) {
856 napi_info->stats[cpu].napi_workdone += rc;
857 normalized = (rc / napi_info->scale);
858 if (normalized == 0)
859 normalized++;
860 bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
861 if (bucket >= QCA_NAPI_NUM_BUCKETS) {
862 bucket = QCA_NAPI_NUM_BUCKETS - 1;
863 HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
864 bucket, QCA_NAPI_NUM_BUCKETS);
865 }
866 napi_info->stats[cpu].napi_budget_uses[bucket]++;
867 } else {
868 /* if ce_per engine reports 0, then poll should be terminated */
869 NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
870 __func__, __LINE__);
871 }
872
873 ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
874
875 /*
876 * Not using the API hif_napi_correct_cpu directly in the if statement
877 * below since the API may not get evaluated if put at the end if any
878 * prior condition would evaluate to be true. The CPU correction
879 * check should kick in every poll.
880 */
881#ifdef NAPI_YIELD_BUDGET_BASED
882 if (ce_state && (ce_state->force_break || 0 == rc)) {
883#else
884 poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
885 if ((ce_state) &&
886 (!ce_check_rx_pending(ce_state) || (0 == rc) ||
887 !poll_on_right_cpu)) {
888#endif
889 napi_info->stats[cpu].napi_completes++;
890#ifdef NAPI_YIELD_BUDGET_BASED
891 ce_state->force_break = 0;
892#endif
893
894 hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
895 NULL, NULL, 0);
896 if (normalized >= budget)
897 normalized = budget - 1;
898
899 /* enable interrupts */
900 napi_complete(napi);
901 hif_napi_enable_irq(hif_ctx, napi_info->id);
902 /* support suspend/resume */
903 qdf_atomic_dec(&(hif->active_tasklet_cnt));
904
905 NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
906 __func__, __LINE__);
907 } else {
908 /* 4.4 kernel NAPI implementation requires drivers to
909 * return full work when they ask to be re-scheduled,
910 * or napi_complete and re-start with a fresh interrupt
911 */
912 normalized = budget;
913 }
914
915 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
916 NAPI_POLL_EXIT, NULL, NULL, normalized);
917
918 NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
919 return normalized;
920out:
921 return rc;
922}
923
924#ifdef HELIUMPLUS
925/**
926 *
927 * hif_napi_update_yield_stats() - update NAPI yield related stats
928 * @cpu_id: CPU ID for which stats needs to be updates
929 * @ce_id: Copy Engine ID for which yield stats needs to be updates
930 * @time_limit_reached: indicates whether the time limit was reached
931 * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
932 *
933 * Return: None
934 */
935void hif_napi_update_yield_stats(struct CE_state *ce_state,
936 bool time_limit_reached,
937 bool rxpkt_thresh_reached)
938{
939 struct hif_softc *hif;
940 struct qca_napi_data *napi_data = NULL;
941 int ce_id = 0;
942 int cpu_id = 0;
943
944 if (unlikely(NULL == ce_state)) {
945 QDF_ASSERT(NULL != ce_state);
946 return;
947 }
948
949 hif = ce_state->scn;
950
951 if (unlikely(NULL == hif)) {
952 QDF_ASSERT(NULL != hif);
953 return;
954 }
955 napi_data = &(hif->napi_data);
956 if (unlikely(NULL == napi_data)) {
957 QDF_ASSERT(NULL != napi_data);
958 return;
959 }
960
961 if (unlikely(NULL == napi_data->napis[ce_id]))
962 return;
963
964 ce_id = ce_state->id;
965 cpu_id = qdf_get_cpu();
966
967 if (time_limit_reached)
968 napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
969 else
970 napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
971}
972
973/**
974 *
975 * hif_napi_stats() - display NAPI CPU statistics
976 * @napid: pointer to qca_napi_data
977 *
978 * Description:
979 * Prints the various CPU cores on which the NAPI instances /CEs interrupts
980 * are being executed. Can be called from outside NAPI layer.
981 *
982 * Return: None
983 */
984void hif_napi_stats(struct qca_napi_data *napid)
985{
986 int i;
987 struct qca_napi_cpu *cpu;
988
989 if (napid == NULL) {
990 qdf_print("%s: napiid struct is null", __func__);
991 return;
992 }
993
994 cpu = napid->napi_cpu;
995 qdf_print("NAPI CPU TABLE");
996 qdf_print("lilclhead=%d, bigclhead=%d",
997 napid->lilcl_head, napid->bigcl_head);
998 for (i = 0; i < NR_CPUS; i++) {
999 qdf_print("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
1000 i,
1001 cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
1002 cpu[i].core_mask.bits[0],
1003 cpu[i].thread_mask.bits[0],
1004 cpu[i].max_freq, cpu[i].napis,
1005 cpu[i].cluster_nxt);
1006 }
1007}
1008
1009#ifdef FEATURE_NAPI_DEBUG
1010/*
1011 * Local functions
1012 * - no argument checks, all internal/trusted callers
1013 */
1014static void hnc_dump_cpus(struct qca_napi_data *napid)
1015{
1016 hif_napi_stats(napid);
1017}
1018#else
1019static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
1020#endif /* FEATURE_NAPI_DEBUG */
1021/**
1022 * hnc_link_clusters() - partitions to cpu table into clusters
1023 * @napid: pointer to NAPI data
1024 *
1025 * Takes in a CPU topology table and builds two linked lists
1026 * (big cluster cores, list-head at bigcl_head, and little cluster
1027 * cores, list-head at lilcl_head) out of it.
1028 *
1029 * If there are more than two clusters:
1030 * - bigcl_head and lilcl_head will be different,
1031 * - the cluster with highest cpufreq will be considered the "big" cluster.
1032 * If there are more than one with the highest frequency, the *last* of such
1033 * clusters will be designated as the "big cluster"
1034 * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
1035 * If there are more than one clusters with the lowest cpu freq, the *first*
1036 * of such clusters will be designated as the "little cluster"
1037 * - We only support up to 32 clusters
1038 * Return: 0 : OK
1039 * !0: error (at least one of lil/big clusters could not be found)
1040 */
1041#define HNC_MIN_CLUSTER 0
1042#define HNC_MAX_CLUSTER 31
1043static int hnc_link_clusters(struct qca_napi_data *napid)
1044{
1045 int rc = 0;
1046
1047 int i;
1048 int it = 0;
1049 uint32_t cl_done = 0x0;
1050 int cl, curcl, curclhead = 0;
1051 int more;
1052 unsigned int lilfrq = INT_MAX;
1053 unsigned int bigfrq = 0;
1054 unsigned int clfrq = 0;
1055 int prev = 0;
1056 struct qca_napi_cpu *cpus = napid->napi_cpu;
1057
1058 napid->lilcl_head = napid->bigcl_head = -1;
1059
1060 do {
1061 more = 0;
1062 it++; curcl = -1;
1063 for (i = 0; i < NR_CPUS; i++) {
1064 cl = cpus[i].cluster_id;
1065 NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1066 i, cl);
1067 if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1068 NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1069 QDF_ASSERT(0);
1070 /* continue if ASSERTs are disabled */
1071 continue;
1072 };
1073 if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1074 NAPI_DEBUG("Core mask 0. SKIPPED\n");
1075 continue;
1076 }
1077 if (cl_done & (0x01 << cl)) {
1078 NAPI_DEBUG("Cluster already processed. SKIPPED\n");
1079 continue;
1080 } else {
1081 if (more == 0) {
1082 more = 1;
1083 curcl = cl;
1084 curclhead = i; /* row */
1085 clfrq = cpus[i].max_freq;
1086 prev = -1;
1087 };
1088 if ((curcl >= 0) && (curcl != cl)) {
1089 NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
1090 cl, curcl);
1091 continue;
1092 }
1093 if (cpus[i].max_freq != clfrq)
1094 NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1095 cpus[i].max_freq, clfrq);
1096 if (clfrq >= bigfrq) {
1097 bigfrq = clfrq;
1098 napid->bigcl_head = curclhead;
1099 NAPI_DEBUG("bigcl=%d\n", curclhead);
1100 }
1101 if (clfrq < lilfrq) {
1102 lilfrq = clfrq;
1103 napid->lilcl_head = curclhead;
1104 NAPI_DEBUG("lilcl=%d\n", curclhead);
1105 }
1106 if (prev != -1)
1107 cpus[prev].cluster_nxt = i;
1108
1109 prev = i;
1110 }
1111 }
1112 if (curcl >= 0)
1113 cl_done |= (0x01 << curcl);
1114
1115 } while (more);
1116
1117 if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1118 rc = -EFAULT;
1119
1120 hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1121 return rc;
1122}
1123#undef HNC_MIN_CLUSTER
1124#undef HNC_MAX_CLUSTER
1125
1126/*
1127 * hotplug function group
1128 */
1129
1130/**
1131 * hnc_cpu_notify_cb() - handles CPU hotplug events
1132 *
1133 * On transitions to online, we onlu handle the ONLINE event,
1134 * and ignore the PREP events, because we dont want to act too
1135 * early.
1136 * On transtion to offline, we act on PREP events, because
1137 * we may need to move the irqs/NAPIs to another CPU before
1138 * it is actually off-lined.
1139 *
1140 * Return: NOTIFY_OK (dont block action)
1141 */
1142static int hnc_cpu_notify_cb(struct notifier_block *nb,
1143 unsigned long action,
1144 void *hcpu)
1145{
1146 int rc = NOTIFY_OK;
1147 unsigned long cpu = (unsigned long)hcpu;
1148 struct hif_opaque_softc *hif;
1149 struct qca_napi_data *napid = NULL;
1150
1151 NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu);
1152
1153 napid = qdf_container_of(nb, struct qca_napi_data, hnc_cpu_notifier);
1154 hif = &qdf_container_of(napid, struct hif_softc, napi_data)->osc;
1155
1156 switch (action) {
1157 case CPU_ONLINE:
1158 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1159 NAPI_DEBUG("%s: CPU %ld marked %d",
1160 __func__, cpu, napid->napi_cpu[cpu].state);
1161 break;
1162 case CPU_DEAD: /* already dead; we have marked it before, but ... */
1163 case CPU_DEAD_FROZEN:
1164 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1165 NAPI_DEBUG("%s: CPU %ld marked %d",
1166 __func__, cpu, napid->napi_cpu[cpu].state);
1167 break;
1168 case CPU_DOWN_PREPARE:
1169 case CPU_DOWN_PREPARE_FROZEN:
1170 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1171
1172 NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity",
1173 __func__, cpu, napid->napi_cpu[cpu].state);
1174
1175 /**
1176 * we need to move any NAPIs on this CPU out.
1177 * if we are in LO throughput mode, then this is valid
1178 * if the CPU is the the low designated CPU.
1179 */
1180 hif_napi_event(hif,
1181 NAPI_EVT_CPU_STATE,
1182 (void *)
1183 ((cpu << 16) | napid->napi_cpu[cpu].state));
1184 break;
1185 default:
1186 NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
1187 break;
1188 } /* switch */
1189 NAPI_DEBUG("<--%s [%d]", __func__, rc);
1190 return rc;
1191}
1192
1193/**
1194 * hnc_hotplug_hook() - installs a hotplug notifier
1195 * @hif_sc: hif_sc context
1196 * @register: !0 => register , =0 => deregister
1197 *
1198 * Because the callback relies on the data layout of
1199 * struct hif_softc & its napi_data member, this callback
1200 * registration requires that the hif_softc is passed in.
1201 *
1202 * Note that this is different from the cpu notifier used by
1203 * rx_thread (cds_schedule.c).
1204 * We may consider combining these modifiers in the future.
1205 *
1206 * Return: 0: success
1207 * <0: error
1208 */
1209static int hnc_hotplug_hook(struct hif_softc *hif_sc, int install)
1210{
1211 int rc = 0;
1212
1213 NAPI_DEBUG("-->%s(%d)", __func__, install);
1214
1215 if (install) {
1216 hif_sc->napi_data.hnc_cpu_notifier.notifier_call
1217 = hnc_cpu_notify_cb;
1218 rc = register_hotcpu_notifier(
1219 &hif_sc->napi_data.hnc_cpu_notifier);
1220 } else {
1221 unregister_hotcpu_notifier(
1222 &hif_sc->napi_data.hnc_cpu_notifier);
1223 }
1224
1225 NAPI_DEBUG("<--%s()[%d]", __func__, rc);
1226 return rc;
1227}
1228
1229/**
1230 * hnc_install_tput() - installs a callback in the throughput detector
1231 * @register: !0 => register; =0: unregister
1232 *
1233 * installs a callback to be called when wifi driver throughput (tx+rx)
1234 * crosses a threshold. Currently, we are using the same criteria as
1235 * TCP ack suppression (500 packets/100ms by default).
1236 *
1237 * Return: 0 : success
1238 * <0: failure
1239 */
1240
1241static int hnc_tput_hook(int install)
1242{
1243 int rc = 0;
1244
1245 /*
1246 * Nothing, until the bw_calculation accepts registration
1247 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1248 * hdd_napi_throughput_policy(...)
1249 */
1250 return rc;
1251}
1252
1253/*
1254 * Implementation of hif_napi_cpu API
1255 */
1256
1257/**
1258 * hif_napi_cpu_init() - initialization of irq affinity block
1259 * @ctx: pointer to qca_napi_data
1260 *
1261 * called by hif_napi_create, after the first instance is called
1262 * - builds napi_rss_cpus table from cpu topology
1263 * - links cores of the same clusters together
1264 * - installs hot-plug notifier
1265 * - installs throughput trigger notifier (when such mechanism exists)
1266 *
1267 * Return: 0: OK
1268 * <0: error code
1269 */
1270int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1271{
1272 int rc = 0;
1273 int i;
1274 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1275 struct qca_napi_cpu *cpus = napid->napi_cpu;
1276
1277 NAPI_DEBUG("--> ");
1278
1279 if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1280 NAPI_DEBUG("NAPI RSS table already initialized.\n");
1281 rc = -EALREADY;
1282 goto lab_rss_init;
1283 }
1284
1285 /* build CPU topology table */
1286 for_each_possible_cpu(i) {
1287 cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask)
1288 ? QCA_NAPI_CPU_UP
1289 : QCA_NAPI_CPU_DOWN));
1290 cpus[i].core_id = topology_core_id(i);
1291 cpus[i].cluster_id = topology_physical_package_id(i);
1292 cpumask_copy(&(cpus[i].core_mask),
1293 topology_core_cpumask(i));
1294 cpumask_copy(&(cpus[i].thread_mask),
1295 topology_sibling_cpumask(i));
1296 cpus[i].max_freq = cpufreq_quick_get_max(i);
1297 cpus[i].napis = 0x0;
1298 cpus[i].cluster_nxt = -1; /* invalid */
1299 }
1300
1301 /* link clusters together */
1302 rc = hnc_link_clusters(napid);
1303 if (0 != rc)
1304 goto lab_err_topology;
1305
1306 /* install hotplug notifier */
1307 rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 1);
1308 if (0 != rc)
1309 goto lab_err_hotplug;
1310
1311 /* install throughput notifier */
1312 rc = hnc_tput_hook(1);
1313 if (0 == rc)
1314 goto lab_rss_init;
1315
1316lab_err_hotplug:
1317 hnc_tput_hook(0);
1318 hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
1319lab_err_topology:
1320 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1321lab_rss_init:
1322 NAPI_DEBUG("<-- [rc=%d]", rc);
1323 return rc;
1324}
1325
1326/**
1327 * hif_napi_cpu_deinit() - clean-up of irq affinity block
1328 *
1329 * called by hif_napi_destroy, when the last instance is removed
1330 * - uninstalls throughput and hotplug notifiers
1331 * - clears cpu topology table
1332 * Return: 0: OK
1333 */
1334int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1335{
1336 int rc = 0;
1337 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1338
1339 NAPI_DEBUG("-->%s(...)", __func__);
1340
1341 /* uninstall tput notifier */
1342 rc = hnc_tput_hook(0);
1343
1344 /* uninstall hotplug notifier */
1345 rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
1346
1347 /* clear the topology table */
1348 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1349
1350 NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1351
1352 return rc;
1353}
1354
1355/**
1356 * hncm_migrate_to() - migrates a NAPI to a CPU
1357 * @napid: pointer to NAPI block
1358 * @ce_id: CE_id of the NAPI instance
1359 * @didx : index in the CPU topology table for the CPU to migrate to
1360 *
1361 * Migrates NAPI (identified by the CE_id) to the destination core
1362 * Updates the napi_map of the destination entry
1363 *
1364 * Return:
1365 * =0 : success
1366 * <0 : error
1367 */
1368static int hncm_migrate_to(struct qca_napi_data *napid,
1369 int napi_ce,
1370 int didx)
1371{
1372 int rc = 0;
1373 cpumask_t cpumask;
1374
1375 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1376
1377 cpumask.bits[0] = (1 << didx);
1378 if (!napid->napis[napi_ce])
1379 return -EINVAL;
1380
1381 irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0);
1382 rc = irq_set_affinity_hint(napid->napis[napi_ce]->irq, &cpumask);
1383
1384 /* unmark the napis bitmap in the cpu table */
1385 napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
1386 /* mark the napis bitmap for the new designated cpu */
1387 napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1388 napid->napis[napi_ce]->cpu = didx;
1389
1390 NAPI_DEBUG("<--%s[%d]", __func__, rc);
1391 return rc;
1392}
1393/**
1394 * hncm_dest_cpu() - finds a destination CPU for NAPI
1395 * @napid: pointer to NAPI block
1396 * @act : RELOCATE | COLLAPSE | DISPERSE
1397 *
1398 * Finds the designated destionation for the next IRQ.
1399 * RELOCATE: translated to either COLLAPSE or DISPERSE based
1400 * on napid->napi_mode (throughput state)
1401 * COLLAPSE: All have the same destination: the first online CPU in lilcl
1402 * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1403 * NAPIs on it
1404 *
1405 * Return: >=0 : index in the cpu topology table
1406 * : < 0 : error
1407 */
1408static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1409{
1410 int destidx = -1;
1411 int head, i;
1412
1413 NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1414 if (act == HNC_ACT_RELOCATE) {
1415 if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1416 act = HNC_ACT_COLLAPSE;
1417 else
1418 act = HNC_ACT_DISPERSE;
1419 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1420 __func__, act);
1421 }
1422 if (act == HNC_ACT_COLLAPSE) {
1423 head = i = napid->lilcl_head;
1424retry_collapse:
1425 while (i >= 0) {
1426 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1427 destidx = i;
1428 break;
1429 }
1430 i = napid->napi_cpu[i].cluster_nxt;
1431 }
1432 if ((destidx < 0) && (head == napid->lilcl_head)) {
1433 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1434 __func__);
1435 head = i = napid->bigcl_head;
1436 goto retry_collapse;
1437 }
1438 } else { /* HNC_ACT_DISPERSE */
1439 int smallest = 99; /* all 32 bits full */
1440 int smallidx = -1;
1441
1442 head = i = napid->bigcl_head;
1443retry_disperse:
1444 while (i >= 0) {
1445 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1446 (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1447 smallest = napid->napi_cpu[i].napis;
1448 smallidx = i;
1449 }
1450 i = napid->napi_cpu[i].cluster_nxt;
1451 }
1452 destidx = smallidx;
1453 if ((destidx < 0) && (head == napid->bigcl_head)) {
1454 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1455 __func__);
1456 head = i = napid->lilcl_head;
1457 goto retry_disperse;
1458 }
1459 }
1460 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1461 return destidx;
1462}
1463/**
1464 * hif_napi_cpu_migrate() - migrate IRQs away
1465 * @cpu: -1: all CPUs <n> specific CPU
1466 * @act: COLLAPSE | DISPERSE
1467 *
1468 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1469 * cores. Eligible cores are:
1470 * act=COLLAPSE -> the first online core of the little cluster
1471 * act=DISPERSE -> separate cores of the big cluster, so that each core will
1472 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1473 *
1474 * Note that this function is called with a spinlock acquired already.
1475 *
1476 * Return: =0: success
1477 * <0: error
1478 */
1479
1480int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1481{
1482 int rc = 0;
1483 struct qca_napi_cpu *cpup;
1484 int i, dind;
1485 uint32_t napis;
1486
1487 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1488 __func__, cpu, action);
1489 /* the following is really: hif_napi_enabled() with less overhead */
1490 if (napid->ce_map == 0) {
1491 NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1492 goto hncm_return;
1493 }
1494
1495 cpup = napid->napi_cpu;
1496
1497 switch (action) {
1498 case HNC_ACT_RELOCATE:
1499 case HNC_ACT_DISPERSE:
1500 case HNC_ACT_COLLAPSE: {
1501 /* first find the src napi set */
1502 if (cpu == HNC_ANY_CPU)
1503 napis = napid->ce_map;
1504 else
1505 napis = cpup[cpu].napis;
1506 /* then clear the napi bitmap on each CPU */
1507 for (i = 0; i < NR_CPUS; i++)
1508 cpup[i].napis = 0;
1509 /* then for each of the NAPIs to disperse: */
1510 for (i = 0; i < CE_COUNT_MAX; i++)
1511 if (napis & (1 << i)) {
1512 /* find a destination CPU */
1513 dind = hncm_dest_cpu(napid, action);
1514 if (dind >= 0) {
1515 NAPI_DEBUG("Migrating NAPI ce%d to %d",
1516 i, dind);
1517 rc = hncm_migrate_to(napid, i, dind);
1518 } else {
1519 NAPI_DEBUG("No dest for NAPI ce%d", i);
1520 hnc_dump_cpus(napid);
1521 rc = -1;
1522 }
1523 }
1524 break;
1525 }
1526 default: {
1527 NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1528 QDF_BUG(0);
1529 break;
1530 }
1531 } /* switch action */
1532
1533hncm_return:
1534 hnc_dump_cpus(napid);
1535 return rc;
1536}
1537
1538
1539/**
1540 * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1541 * @napid: pointer to qca_napi_data structure
1542 * @bl_flag: blacklist flag to enable/disable blacklisting
1543 *
1544 * The function enables/disables blacklisting for all the copy engine
1545 * interrupts on which NAPI is enabled.
1546 *
1547 * Return: None
1548 */
1549static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1550{
1551 int i;
1552 struct qca_napi_info *napii;
1553
1554 for (i = 0; i < CE_COUNT_MAX; i++) {
1555 /* check if NAPI is enabled on the CE */
1556 if (!(napid->ce_map & (0x01 << i)))
1557 continue;
1558
1559 /*double check that NAPI is allocated for the CE */
1560 napii = napid->napis[i];
1561 if (!(napii))
1562 continue;
1563
1564 if (bl_flag == true)
1565 irq_modify_status(napii->irq,
1566 0, IRQ_NO_BALANCING);
1567 else
1568 irq_modify_status(napii->irq,
1569 IRQ_NO_BALANCING, 0);
1570 HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
1571 }
1572}
1573
1574#ifdef CONFIG_SCHED_CORE_CTL
1575/* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
1576static inline int hif_napi_core_ctl_set_boost(bool boost)
1577{
1578 return core_ctl_set_boost(boost);
1579}
1580#else
1581static inline int hif_napi_core_ctl_set_boost(bool boost)
1582{
1583 return 0;
1584}
1585#endif
1586/**
1587 * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1588 * @napid: pointer to qca_napi_data structure
1589 * @op: blacklist operation to perform
1590 *
1591 * The function enables/disables/queries blacklisting for all CE RX
1592 * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1593 * core_ctl_set_boost.
1594 * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1595 * balancer.
1596 *
1597 * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1598 * for BLACKLIST_QUERY op - blacklist refcount
1599 * for BLACKLIST_ON op - return value from core_ctl_set_boost API
1600 * for BLACKLIST_OFF op - return value from core_ctl_set_boost API
1601 */
1602int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
1603 enum qca_blacklist_op op)
1604{
1605 int rc = 0;
1606 static int ref_count; /* = 0 by the compiler */
1607 uint8_t flags = napid->flags;
1608 bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1609 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
1610
1611 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1612
1613 if (!(bl_en && ccb_en)) {
1614 rc = -EINVAL;
1615 goto out;
1616 }
1617
1618 switch (op) {
1619 case BLACKLIST_QUERY:
1620 rc = ref_count;
1621 break;
1622 case BLACKLIST_ON:
1623 ref_count++;
1624 rc = 0;
1625 if (ref_count == 1) {
1626 rc = hif_napi_core_ctl_set_boost(true);
1627 NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1628 rc, ref_count);
1629 hif_napi_bl_irq(napid, true);
1630 }
1631 break;
1632 case BLACKLIST_OFF:
1633 if (ref_count)
1634 ref_count--;
1635 rc = 0;
1636 if (ref_count == 0) {
1637 rc = hif_napi_core_ctl_set_boost(false);
1638 NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1639 rc, ref_count);
1640 hif_napi_bl_irq(napid, false);
1641 }
1642 break;
1643 default:
1644 NAPI_DEBUG("Invalid blacklist op: %d", op);
1645 rc = -EINVAL;
1646 } /* switch */
1647out:
1648 NAPI_DEBUG("<--%s[%d]", __func__, rc);
1649 return rc;
1650}
1651
1652/**
1653 * hif_napi_serialize() - [de-]serialize NAPI operations
1654 * @hif: context
1655 * @is_on: 1: serialize, 0: deserialize
1656 *
1657 * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1658 * following steps (see hif_napi_event for code):
1659 * - put irqs of all NAPI instances on the same CPU
1660 * - only for the first serialize call: blacklist
1661 *
1662 * hif_napi_serialize(hif, 0):
1663 * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1664 * - at the end of the timer, check the current throughput state and
1665 * implement it.
1666 */
1667static unsigned long napi_serialize_reqs;
1668int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1669{
1670 int rc = -EINVAL;
1671
1672 if (hif != NULL)
1673 switch (is_on) {
1674 case 0: { /* de-serialize */
1675 rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1676 (void *) 0);
1677 napi_serialize_reqs = 0;
1678 break;
1679 } /* end de-serialize */
1680 case 1: { /* serialize */
1681 rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1682 (void *)napi_serialize_reqs++);
1683 break;
1684 } /* end serialize */
1685 default:
1686 break; /* no-op */
1687 } /* switch */
1688 return rc;
1689}
1690
1691#endif /* ifdef HELIUMPLUS */