Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
| 19 | /** |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 20 | * DOC: hif_irq_afinity.c |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 21 | * |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 22 | * This irq afinity implementation is os dependent, so this can be treated as |
| 23 | * an abstraction layer... Should this be moved into a /linux folder? |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 24 | */ |
| 25 | |
Dustin Brown | 49a8f6e | 2017-08-17 15:47:48 -0700 | [diff] [blame] | 26 | #include <linux/string.h> /* memset */ |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 27 | |
| 28 | /* Linux headers */ |
| 29 | #include <linux/cpumask.h> |
| 30 | #include <linux/cpufreq.h> |
| 31 | #include <linux/cpu.h> |
| 32 | #include <linux/topology.h> |
| 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/irq.h> |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 35 | #ifdef CONFIG_SCHED_CORE_CTL |
| 36 | #include <linux/sched/core_ctl.h> |
| 37 | #endif |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 38 | #include <linux/pm.h> |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 39 | #include <hif_napi.h> |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 40 | #include <hif_irq_affinity.h> |
| 41 | #include <hif_exec.h> |
| 42 | #include <hif_main.h> |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 43 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 44 | #if defined(FEATURE_NAPI_DEBUG) && defined(HIF_IRQ_AFFINITY) |
| 45 | /* |
| 46 | * Local functions |
| 47 | * - no argument checks, all internal/trusted callers |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 48 | */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 49 | static void hnc_dump_cpus(struct qca_napi_data *napid) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 50 | { |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 51 | hif_napi_stats(napid); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 52 | } |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 53 | #else |
| 54 | static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; |
| 55 | #endif /* FEATURE_NAPI_DEBUG */ |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 56 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 57 | #ifdef HIF_IRQ_AFFINITY |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 58 | /** |
| 59 | * |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 60 | * hif_exec_event() - reacts to events that impact irq affinity |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 61 | * @hif : pointer to hif context |
| 62 | * @evnt: event that has been detected |
| 63 | * @data: more data regarding the event |
| 64 | * |
| 65 | * Description: |
| 66 | * This function handles two types of events: |
| 67 | * 1- Events that change the state of NAPI (enabled/disabled): |
| 68 | * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} |
| 69 | * The state is retrievable by "hdd_napi_enabled(-1)" |
| 70 | * - NAPI will be on if either INI file is on and it has not been disabled |
| 71 | * by a subsequent vendor CMD, |
| 72 | * or it has been enabled by a vendor CMD. |
| 73 | * 2- Events that change the CPU affinity of a NAPI instance/IRQ: |
| 74 | * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} |
| 75 | * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode |
| 76 | * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() |
| 77 | * - In LO tput mode, NAPI will yield control if its interrupts to the system |
| 78 | * management functions. However in HI throughput mode, NAPI will actively |
| 79 | * manage its interrupts/instances (by trying to disperse them out to |
| 80 | * separate performance cores). |
| 81 | * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. |
| 82 | * |
| 83 | * + In some cases (roaming peer management is the only case so far), a |
| 84 | * a client can trigger a "SERIALIZE" event. Basically, this means that the |
| 85 | * users is asking NAPI to go into a truly single execution context state. |
| 86 | * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, |
| 87 | * (if called for the first time) and then moves all IRQs (for NAPI |
| 88 | * instances) to be collapsed to a single core. If called multiple times, |
| 89 | * it will just re-collapse the CPUs. This is because blacklist-on() API |
| 90 | * is reference-counted, and because the API has already been called. |
| 91 | * |
| 92 | * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go |
| 93 | * to its "normal" operation. Optionally, they can give a timeout value (in |
| 94 | * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this |
| 95 | * case, NAPI will just set the current throughput state to uninitialized |
| 96 | * and set the delay period. Once policy handler is called, it would skip |
| 97 | * applying the policy delay period times, and otherwise apply the policy. |
| 98 | * |
| 99 | * Return: |
| 100 | * < 0: some error |
| 101 | * = 0: event handled successfully |
| 102 | */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 103 | int hif_exec_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 104 | void *data) |
| 105 | { |
| 106 | int rc = 0; |
| 107 | uint32_t prev_state; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 108 | struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); |
| 109 | struct qca_napi_data *napid = &(hif->napi_data); |
| 110 | enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; |
| 111 | enum { |
| 112 | BLACKLIST_NOT_PENDING, |
| 113 | BLACKLIST_ON_PENDING, |
| 114 | BLACKLIST_OFF_PENDING |
| 115 | } blacklist_pending = BLACKLIST_NOT_PENDING; |
| 116 | |
Jeff Johnson | b945021 | 2017-09-18 10:12:38 -0700 | [diff] [blame] | 117 | NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 118 | |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 119 | qdf_spin_lock_bh(&(napid->lock)); |
| 120 | prev_state = napid->state; |
| 121 | switch (event) { |
| 122 | case NAPI_EVT_INI_FILE: |
| 123 | case NAPI_EVT_CMD_STATE: |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 124 | case NAPI_EVT_INT_STATE: |
| 125 | /* deprecated */ |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 126 | break; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 127 | |
| 128 | case NAPI_EVT_CPU_STATE: { |
| 129 | int cpu = ((unsigned long int)data >> 16); |
| 130 | int val = ((unsigned long int)data & 0x0ff); |
| 131 | |
| 132 | NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", |
| 133 | __func__, cpu, val); |
| 134 | |
| 135 | /* state has already been set by hnc_cpu_notify_cb */ |
| 136 | if ((val == QCA_NAPI_CPU_DOWN) && |
| 137 | (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ |
| 138 | (napid->napi_cpu[cpu].napis != 0)) { |
| 139 | NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", |
| 140 | __func__, cpu); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 141 | rc = hif_exec_cpu_migrate(napid, |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 142 | cpu, |
| 143 | HNC_ACT_RELOCATE); |
| 144 | napid->napi_cpu[cpu].napis = 0; |
| 145 | } |
| 146 | /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ |
| 147 | break; |
| 148 | } |
| 149 | |
| 150 | case NAPI_EVT_TPUT_STATE: { |
| 151 | tput_mode = (enum qca_napi_tput_state)data; |
| 152 | if (tput_mode == QCA_NAPI_TPUT_LO) { |
| 153 | /* from TPUT_HI -> TPUT_LO */ |
| 154 | NAPI_DEBUG("%s: Moving to napi_tput_LO state", |
| 155 | __func__); |
| 156 | blacklist_pending = BLACKLIST_OFF_PENDING; |
| 157 | /* |
| 158 | * Ideally we should "collapse" interrupts here, since |
| 159 | * we are "dispersing" interrupts in the "else" case. |
| 160 | * This allows the possibility that our interrupts may |
| 161 | * still be on the perf cluster the next time we enter |
| 162 | * high tput mode. However, the irq_balancer is free |
| 163 | * to move our interrupts to power cluster once |
| 164 | * blacklisting has been turned off in the "else" case. |
| 165 | */ |
| 166 | } else { |
| 167 | /* from TPUT_LO -> TPUT->HI */ |
| 168 | NAPI_DEBUG("%s: Moving to napi_tput_HI state", |
| 169 | __func__); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 170 | rc = hif_exec_cpu_migrate(napid, |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 171 | HNC_ANY_CPU, |
| 172 | HNC_ACT_DISPERSE); |
| 173 | |
| 174 | blacklist_pending = BLACKLIST_ON_PENDING; |
| 175 | } |
| 176 | napid->napi_mode = tput_mode; |
| 177 | break; |
| 178 | } |
| 179 | |
| 180 | case NAPI_EVT_USR_SERIAL: { |
| 181 | unsigned long users = (unsigned long)data; |
| 182 | |
| 183 | NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", |
| 184 | __func__, users); |
| 185 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 186 | rc = hif_exec_cpu_migrate(napid, |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 187 | HNC_ANY_CPU, |
| 188 | HNC_ACT_COLLAPSE); |
| 189 | if ((users == 0) && (rc == 0)) |
| 190 | blacklist_pending = BLACKLIST_ON_PENDING; |
| 191 | break; |
| 192 | } |
| 193 | case NAPI_EVT_USR_NORMAL: { |
| 194 | NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); |
| 195 | /* |
| 196 | * Deserialization timeout is handled at hdd layer; |
| 197 | * just mark current mode to uninitialized to ensure |
| 198 | * it will be set when the delay is over |
| 199 | */ |
| 200 | napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; |
| 201 | break; |
| 202 | } |
| 203 | default: { |
| 204 | HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", |
| 205 | __func__, event, (unsigned long) data); |
| 206 | break; |
| 207 | } /* default */ |
| 208 | }; /* switch */ |
| 209 | |
| 210 | |
| 211 | switch (blacklist_pending) { |
| 212 | case BLACKLIST_ON_PENDING: |
| 213 | /* assume the control of WLAN IRQs */ |
| 214 | hif_napi_cpu_blacklist(napid, BLACKLIST_ON); |
| 215 | break; |
| 216 | case BLACKLIST_OFF_PENDING: |
| 217 | /* yield the control of WLAN IRQs */ |
| 218 | hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); |
| 219 | break; |
| 220 | default: /* nothing to do */ |
| 221 | break; |
| 222 | } /* switch blacklist_pending */ |
| 223 | |
| 224 | qdf_spin_unlock_bh(&(napid->lock)); |
| 225 | |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 226 | NAPI_DEBUG("<--[rc=%d]", rc); |
| 227 | return rc; |
| 228 | } |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 229 | #endif |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 230 | |
| 231 | /** |
| 232 | * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed |
| 233 | * @napi_info: pointer to qca_napi_info for the napi instance |
| 234 | * |
| 235 | * Return: true => interrupt already on correct cpu, no correction needed |
| 236 | * false => interrupt on wrong cpu, correction done for cpu affinity |
| 237 | * of the interrupt |
| 238 | */ |
| 239 | static inline |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 240 | bool hif_exec_correct_cpu(struct hif_exec_context *exec_ctx) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 241 | { |
| 242 | bool right_cpu = true; |
| 243 | int rc = 0; |
| 244 | cpumask_t cpumask; |
| 245 | int cpu; |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 246 | struct hif_softc *hif_softc = HIF_GET_SOFTC(exec_ctx->hif); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 247 | struct qca_napi_data *napid; |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 248 | int ind; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 249 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 250 | napid = &hif_softc->napi_data; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 251 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 252 | if (!(napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION)) |
| 253 | goto done; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 254 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 255 | cpu = qdf_get_cpu(); |
| 256 | if (likely((cpu == exec_ctx->cpu) || |
| 257 | hif_exec_cpu_blacklist(napid, BLACKLIST_QUERY) == 0)) |
| 258 | goto done; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 259 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 260 | right_cpu = false; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 261 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 262 | NAPI_DEBUG("interrupt on wrong CPU, correcting"); |
| 263 | cpumask.bits[0] = (0x01 << exec_ctx->cpu); |
| 264 | |
| 265 | for (ind = 0; ind < exec_ctx->numirq; ind++) { |
| 266 | if (exec_ctx->os_irq[ind]) { |
| 267 | irq_modify_status(exec_ctx->os_irq[ind], |
| 268 | IRQ_NO_BALANCING, 0); |
| 269 | rc = irq_set_affinity_hint(exec_ctx->os_irq[ind], |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 270 | &cpumask); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 271 | irq_modify_status(exec_ctx->os_irq[ind], 0, |
| 272 | IRQ_NO_BALANCING); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 273 | |
| 274 | if (rc) |
| 275 | HIF_ERROR("error setting irq affinity hint: %d", |
| 276 | rc); |
| 277 | else |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 278 | exec_ctx->stats[cpu].cpu_corrected++; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 279 | } |
| 280 | } |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 281 | done: |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 282 | return right_cpu; |
| 283 | } |
| 284 | |
| 285 | /** |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 286 | * hncm_migrate_to() - migrates a NAPI to a CPU |
| 287 | * @napid: pointer to NAPI block |
| 288 | * @ce_id: CE_id of the NAPI instance |
| 289 | * @didx : index in the CPU topology table for the CPU to migrate to |
| 290 | * |
| 291 | * Migrates NAPI (identified by the CE_id) to the destination core |
| 292 | * Updates the napi_map of the destination entry |
| 293 | * |
| 294 | * Return: |
| 295 | * =0 : success |
| 296 | * <0 : error |
| 297 | */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 298 | static int hncm_exec_migrate_to(struct qca_napi_data *napid, uint8_t ctx_id, |
| 299 | int didx) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 300 | { |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 301 | struct hif_exec_context *exec_ctx; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 302 | int rc = 0; |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 303 | int status = 0; |
| 304 | int ind; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 305 | cpumask_t cpumask; |
| 306 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 307 | |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 308 | NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); |
| 309 | |
| 310 | cpumask.bits[0] = (1 << didx); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 311 | exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, ctx_id); |
| 312 | if (exec_ctx == NULL) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 313 | return -EINVAL; |
| 314 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 315 | for (ind = 0; ind < exec_ctx->numirq; ind++) { |
| 316 | if (exec_ctx->os_irq[ind]) { |
| 317 | irq_modify_status(exec_ctx->os_irq[ind], |
| 318 | IRQ_NO_BALANCING, 0); |
| 319 | rc = irq_set_affinity_hint(exec_ctx->os_irq[ind], |
| 320 | &cpumask); |
| 321 | if (rc) |
| 322 | status = rc; |
| 323 | } |
| 324 | } |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 325 | |
| 326 | /* unmark the napis bitmap in the cpu table */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 327 | napid->napi_cpu[exec_ctx->cpu].napis &= ~(0x01 << ctx_id); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 328 | /* mark the napis bitmap for the new designated cpu */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 329 | napid->napi_cpu[didx].napis |= (0x01 << ctx_id); |
| 330 | exec_ctx->cpu = didx; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 331 | |
| 332 | NAPI_DEBUG("<--%s[%d]", __func__, rc); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 333 | return status; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 334 | } |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 335 | |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 336 | /** |
| 337 | * hncm_dest_cpu() - finds a destination CPU for NAPI |
| 338 | * @napid: pointer to NAPI block |
| 339 | * @act : RELOCATE | COLLAPSE | DISPERSE |
| 340 | * |
| 341 | * Finds the designated destionation for the next IRQ. |
| 342 | * RELOCATE: translated to either COLLAPSE or DISPERSE based |
| 343 | * on napid->napi_mode (throughput state) |
| 344 | * COLLAPSE: All have the same destination: the first online CPU in lilcl |
| 345 | * DISPERSE: One of the CPU in bigcl, which has the smallest number of |
| 346 | * NAPIs on it |
| 347 | * |
| 348 | * Return: >=0 : index in the cpu topology table |
| 349 | * : < 0 : error |
| 350 | */ |
| 351 | static int hncm_dest_cpu(struct qca_napi_data *napid, int act) |
| 352 | { |
| 353 | int destidx = -1; |
| 354 | int head, i; |
| 355 | |
| 356 | NAPI_DEBUG("-->%s(act=%d)", __func__, act); |
| 357 | if (act == HNC_ACT_RELOCATE) { |
| 358 | if (napid->napi_mode == QCA_NAPI_TPUT_LO) |
| 359 | act = HNC_ACT_COLLAPSE; |
| 360 | else |
| 361 | act = HNC_ACT_DISPERSE; |
| 362 | NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", |
| 363 | __func__, act); |
| 364 | } |
| 365 | if (act == HNC_ACT_COLLAPSE) { |
| 366 | head = i = napid->lilcl_head; |
| 367 | retry_collapse: |
| 368 | while (i >= 0) { |
| 369 | if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { |
| 370 | destidx = i; |
| 371 | break; |
| 372 | } |
| 373 | i = napid->napi_cpu[i].cluster_nxt; |
| 374 | } |
| 375 | if ((destidx < 0) && (head == napid->lilcl_head)) { |
| 376 | NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", |
| 377 | __func__); |
| 378 | head = i = napid->bigcl_head; |
| 379 | goto retry_collapse; |
| 380 | } |
| 381 | } else { /* HNC_ACT_DISPERSE */ |
| 382 | int smallest = 99; /* all 32 bits full */ |
| 383 | int smallidx = -1; |
| 384 | |
| 385 | head = i = napid->bigcl_head; |
| 386 | retry_disperse: |
| 387 | while (i >= 0) { |
| 388 | if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && |
| 389 | (hweight32(napid->napi_cpu[i].napis) <= smallest)) { |
| 390 | smallest = napid->napi_cpu[i].napis; |
| 391 | smallidx = i; |
| 392 | } |
| 393 | i = napid->napi_cpu[i].cluster_nxt; |
| 394 | } |
| 395 | destidx = smallidx; |
| 396 | if ((destidx < 0) && (head == napid->bigcl_head)) { |
| 397 | NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", |
| 398 | __func__); |
| 399 | head = i = napid->lilcl_head; |
| 400 | goto retry_disperse; |
| 401 | } |
| 402 | } |
| 403 | NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); |
| 404 | return destidx; |
| 405 | } |
| 406 | /** |
| 407 | * hif_napi_cpu_migrate() - migrate IRQs away |
| 408 | * @cpu: -1: all CPUs <n> specific CPU |
| 409 | * @act: COLLAPSE | DISPERSE |
| 410 | * |
| 411 | * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible |
| 412 | * cores. Eligible cores are: |
| 413 | * act=COLLAPSE -> the first online core of the little cluster |
| 414 | * act=DISPERSE -> separate cores of the big cluster, so that each core will |
| 415 | * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) |
| 416 | * |
| 417 | * Note that this function is called with a spinlock acquired already. |
| 418 | * |
| 419 | * Return: =0: success |
| 420 | * <0: error |
| 421 | */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 422 | int hif_exec_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 423 | { |
| 424 | int rc = 0; |
| 425 | struct qca_napi_cpu *cpup; |
| 426 | int i, dind; |
| 427 | uint32_t napis; |
| 428 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 429 | |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 430 | NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", |
| 431 | __func__, cpu, action); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 432 | |
| 433 | if (napid->exec_map == 0) { |
| 434 | NAPI_DEBUG("%s: datapath contexts to disperse", __func__); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 435 | goto hncm_return; |
| 436 | } |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 437 | cpup = napid->napi_cpu; |
| 438 | |
| 439 | switch (action) { |
| 440 | case HNC_ACT_RELOCATE: |
| 441 | case HNC_ACT_DISPERSE: |
| 442 | case HNC_ACT_COLLAPSE: { |
| 443 | /* first find the src napi set */ |
| 444 | if (cpu == HNC_ANY_CPU) |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 445 | napis = napid->exec_map; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 446 | else |
| 447 | napis = cpup[cpu].napis; |
| 448 | /* then clear the napi bitmap on each CPU */ |
| 449 | for (i = 0; i < NR_CPUS; i++) |
| 450 | cpup[i].napis = 0; |
| 451 | /* then for each of the NAPIs to disperse: */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 452 | for (i = 0; i < HIF_MAX_GROUP; i++) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 453 | if (napis & (1 << i)) { |
| 454 | /* find a destination CPU */ |
| 455 | dind = hncm_dest_cpu(napid, action); |
| 456 | if (dind >= 0) { |
| 457 | NAPI_DEBUG("Migrating NAPI ce%d to %d", |
| 458 | i, dind); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 459 | rc = hncm_exec_migrate_to(napid, i, |
| 460 | dind); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 461 | } else { |
| 462 | NAPI_DEBUG("No dest for NAPI ce%d", i); |
| 463 | hnc_dump_cpus(napid); |
| 464 | rc = -1; |
| 465 | } |
| 466 | } |
| 467 | break; |
| 468 | } |
| 469 | default: { |
| 470 | NAPI_DEBUG("%s: bad action: %d\n", __func__, action); |
| 471 | QDF_BUG(0); |
| 472 | break; |
| 473 | } |
| 474 | } /* switch action */ |
| 475 | |
| 476 | hncm_return: |
| 477 | hnc_dump_cpus(napid); |
| 478 | return rc; |
| 479 | } |
| 480 | |
| 481 | |
| 482 | /** |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 483 | * hif_exec_bl_irq() - calls irq_modify_status to enable/disable blacklisting |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 484 | * @napid: pointer to qca_napi_data structure |
| 485 | * @bl_flag: blacklist flag to enable/disable blacklisting |
| 486 | * |
| 487 | * The function enables/disables blacklisting for all the copy engine |
| 488 | * interrupts on which NAPI is enabled. |
| 489 | * |
| 490 | * Return: None |
| 491 | */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 492 | static inline void hif_exec_bl_irq(struct qca_napi_data *napid, bool bl_flag) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 493 | { |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 494 | int i, j; |
| 495 | struct hif_exec_context *exec_ctx; |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 496 | |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 497 | for (i = 0; i < HIF_MAX_GROUP; i++) { |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 498 | /* check if NAPI is enabled on the CE */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 499 | if (!(napid->exec_map & (0x01 << i))) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 500 | continue; |
| 501 | |
| 502 | /*double check that NAPI is allocated for the CE */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 503 | exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, i); |
| 504 | if (!(exec_ctx)) |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 505 | continue; |
| 506 | |
| 507 | if (bl_flag == true) |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 508 | for (j = 0; j < exec_ctx->numirq; j++) |
| 509 | irq_modify_status(exec_ctx->os_irq[j], |
| 510 | 0, IRQ_NO_BALANCING); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 511 | else |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 512 | for (j = 0; j < exec_ctx->numirq; j++) |
| 513 | irq_modify_status(exec_ctx->os_irq[j], |
| 514 | IRQ_NO_BALANCING, 0); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 515 | HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | #ifdef CONFIG_SCHED_CORE_CTL |
| 520 | /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ |
| 521 | static inline int hif_napi_core_ctl_set_boost(bool boost) |
| 522 | { |
| 523 | return core_ctl_set_boost(boost); |
| 524 | } |
| 525 | #else |
| 526 | static inline int hif_napi_core_ctl_set_boost(bool boost) |
| 527 | { |
| 528 | return 0; |
| 529 | } |
| 530 | #endif |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 531 | |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 532 | /** |
| 533 | * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. |
| 534 | * @napid: pointer to qca_napi_data structure |
| 535 | * @op: blacklist operation to perform |
| 536 | * |
| 537 | * The function enables/disables/queries blacklisting for all CE RX |
| 538 | * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables |
| 539 | * core_ctl_set_boost. |
| 540 | * Once blacklisting is enabled, the interrupts will not be managed by the IRQ |
| 541 | * balancer. |
| 542 | * |
| 543 | * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled |
| 544 | * for BLACKLIST_QUERY op - blacklist refcount |
| 545 | * for BLACKLIST_ON op - return value from core_ctl_set_boost API |
| 546 | * for BLACKLIST_OFF op - return value from core_ctl_set_boost API |
| 547 | */ |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 548 | int hif_exec_cpu_blacklist(struct qca_napi_data *napid, |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 549 | enum qca_blacklist_op op) |
| 550 | { |
| 551 | int rc = 0; |
| 552 | static int ref_count; /* = 0 by the compiler */ |
| 553 | uint8_t flags = napid->flags; |
| 554 | bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; |
| 555 | bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; |
| 556 | |
| 557 | NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); |
| 558 | |
| 559 | if (!(bl_en && ccb_en)) { |
| 560 | rc = -EINVAL; |
| 561 | goto out; |
| 562 | } |
| 563 | |
| 564 | switch (op) { |
| 565 | case BLACKLIST_QUERY: |
| 566 | rc = ref_count; |
| 567 | break; |
| 568 | case BLACKLIST_ON: |
| 569 | ref_count++; |
| 570 | rc = 0; |
| 571 | if (ref_count == 1) { |
| 572 | rc = hif_napi_core_ctl_set_boost(true); |
| 573 | NAPI_DEBUG("boost_on() returns %d - refcnt=%d", |
| 574 | rc, ref_count); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 575 | hif_exec_bl_irq(napid, true); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 576 | } |
| 577 | break; |
| 578 | case BLACKLIST_OFF: |
| 579 | if (ref_count) |
| 580 | ref_count--; |
| 581 | rc = 0; |
| 582 | if (ref_count == 0) { |
| 583 | rc = hif_napi_core_ctl_set_boost(false); |
| 584 | NAPI_DEBUG("boost_off() returns %d - refcnt=%d", |
| 585 | rc, ref_count); |
Houston Hoffman | b3497c0 | 2017-04-22 18:27:00 -0700 | [diff] [blame] | 586 | hif_exec_bl_irq(napid, false); |
Houston Hoffman | a0ecf33 | 2017-04-22 17:41:58 -0700 | [diff] [blame] | 587 | } |
| 588 | break; |
| 589 | default: |
| 590 | NAPI_DEBUG("Invalid blacklist op: %d", op); |
| 591 | rc = -EINVAL; |
| 592 | } /* switch */ |
| 593 | out: |
| 594 | NAPI_DEBUG("<--%s[%d]", __func__, rc); |
| 595 | return rc; |
| 596 | } |
| 597 | |