Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 1 | /* |
c_cgodav | fda96ad | 2017-09-07 16:16:00 +0530 | [diff] [blame] | 2 | * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 19 | #include <linux/pci.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/if_arp.h> |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 23 | #include "qdf_lock.h" |
| 24 | #include "qdf_types.h" |
| 25 | #include "qdf_status.h" |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 26 | #include "regtable.h" |
| 27 | #include "hif.h" |
| 28 | #include "hif_io32.h" |
| 29 | #include "ce_main.h" |
| 30 | #include "ce_api.h" |
| 31 | #include "ce_reg.h" |
| 32 | #include "ce_internal.h" |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 33 | #include "ce_tasklet.h" |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 34 | #include "pld_common.h" |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 35 | #include "hif_debug.h" |
| 36 | #include "hif_napi.h" |
| 37 | |
| 38 | |
| 39 | /** |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 40 | * struct tasklet_work |
| 41 | * |
| 42 | * @id: ce_id |
| 43 | * @work: work |
| 44 | */ |
| 45 | struct tasklet_work { |
| 46 | enum ce_id_type id; |
| 47 | void *data; |
| 48 | struct work_struct work; |
| 49 | }; |
| 50 | |
| 51 | |
| 52 | /** |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 53 | * reschedule_ce_tasklet_work_handler() - reschedule work |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 54 | * @work: struct work_struct |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 55 | * |
| 56 | * Return: N/A |
| 57 | */ |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 58 | static void reschedule_ce_tasklet_work_handler(struct work_struct *work) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 59 | { |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 60 | struct tasklet_work *ce_work = container_of(work, struct tasklet_work, |
| 61 | work); |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 62 | struct hif_softc *scn = ce_work->data; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 63 | struct HIF_CE_state *hif_ce_state; |
| 64 | |
| 65 | if (NULL == scn) { |
| 66 | HIF_ERROR("%s: tasklet scn is null", __func__); |
| 67 | return; |
| 68 | } |
Komal Seelam | 02cf2f8 | 2016-02-22 20:44:25 +0530 | [diff] [blame] | 69 | |
| 70 | hif_ce_state = HIF_GET_CE_STATE(scn); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 71 | |
| 72 | if (scn->hif_init_done == false) { |
| 73 | HIF_ERROR("%s: wlan driver is unloaded", __func__); |
| 74 | return; |
| 75 | } |
Alok Kumar | 7f4494f | 2018-08-14 16:55:42 +0530 | [diff] [blame] | 76 | if (hif_ce_state->tasklets[ce_work->id].inited) |
| 77 | tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 78 | } |
| 79 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 80 | static struct tasklet_work tasklet_workers[CE_ID_MAX]; |
| 81 | static bool work_initialized; |
| 82 | |
| 83 | /** |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 84 | * init_tasklet_work() - init_tasklet_work |
| 85 | * @work: struct work_struct |
| 86 | * @work_handler: work_handler |
| 87 | * |
| 88 | * Return: N/A |
| 89 | */ |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 90 | static void init_tasklet_work(struct work_struct *work, |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 91 | work_func_t work_handler) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 92 | { |
| 93 | INIT_WORK(work, work_handler); |
| 94 | } |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 95 | |
| 96 | /** |
| 97 | * init_tasklet_workers() - init_tasklet_workers |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 98 | * @scn: HIF Context |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 99 | * |
| 100 | * Return: N/A |
| 101 | */ |
Komal Seelam | 5584a7c | 2016-02-24 19:22:48 +0530 | [diff] [blame] | 102 | void init_tasklet_workers(struct hif_opaque_softc *scn) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 103 | { |
| 104 | uint32_t id; |
| 105 | |
| 106 | for (id = 0; id < CE_ID_MAX; id++) { |
| 107 | tasklet_workers[id].id = id; |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 108 | tasklet_workers[id].data = scn; |
| 109 | init_tasklet_work(&tasklet_workers[id].work, |
| 110 | reschedule_ce_tasklet_work_handler); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 111 | } |
| 112 | work_initialized = true; |
| 113 | } |
| 114 | |
Manjunathappa Prakash | a5a3086 | 2018-05-21 16:32:32 -0700 | [diff] [blame] | 115 | /** |
| 116 | * deinit_tasklet_workers() - deinit_tasklet_workers |
| 117 | * @scn: HIF Context |
| 118 | * |
| 119 | * Return: N/A |
| 120 | */ |
| 121 | void deinit_tasklet_workers(struct hif_opaque_softc *scn) |
| 122 | { |
| 123 | u32 id; |
| 124 | |
| 125 | for (id = 0; id < CE_ID_MAX; id++) |
| 126 | cancel_work_sync(&tasklet_workers[id].work); |
| 127 | |
| 128 | work_initialized = false; |
| 129 | } |
| 130 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 131 | /** |
| 132 | * ce_schedule_tasklet() - schedule ce tasklet |
| 133 | * @tasklet_entry: struct ce_tasklet_entry |
| 134 | * |
| 135 | * Return: N/A |
| 136 | */ |
| 137 | static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) |
| 138 | { |
| 139 | tasklet_schedule(&tasklet_entry->intr_tq); |
| 140 | } |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 141 | |
| 142 | /** |
| 143 | * ce_tasklet() - ce_tasklet |
| 144 | * @data: data |
| 145 | * |
| 146 | * Return: N/A |
| 147 | */ |
| 148 | static void ce_tasklet(unsigned long data) |
| 149 | { |
| 150 | struct ce_tasklet_entry *tasklet_entry = |
| 151 | (struct ce_tasklet_entry *)data; |
| 152 | struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 153 | struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 154 | struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; |
| 155 | |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 156 | hif_record_ce_desc_event(scn, tasklet_entry->ce_id, |
c_cgodav | fda96ad | 2017-09-07 16:16:00 +0530 | [diff] [blame] | 157 | HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0); |
Houston Hoffman | 4275ba2 | 2015-12-06 21:02:11 -0800 | [diff] [blame] | 158 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 159 | if (qdf_atomic_read(&scn->link_suspended)) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 160 | HIF_ERROR("%s: ce %d tasklet fired after link suspend.", |
| 161 | __func__, tasklet_entry->ce_id); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 162 | QDF_BUG(0); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | ce_per_engine_service(scn, tasklet_entry->ce_id); |
| 166 | |
Shashikala Prabhu | 6cb5378 | 2018-06-18 16:48:07 +0530 | [diff] [blame] | 167 | if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 168 | /* |
| 169 | * There are frames pending, schedule tasklet to process them. |
| 170 | * Enable the interrupt only when there is no pending frames in |
| 171 | * any of the Copy Engine pipes. |
| 172 | */ |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 173 | hif_record_ce_desc_event(scn, tasklet_entry->ce_id, |
c_cgodav | fda96ad | 2017-09-07 16:16:00 +0530 | [diff] [blame] | 174 | HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0); |
| 175 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 176 | ce_schedule_tasklet(tasklet_entry); |
| 177 | return; |
| 178 | } |
| 179 | |
Komal Seelam | 6ee5590 | 2016-04-11 17:11:07 +0530 | [diff] [blame] | 180 | if (scn->target_status != TARGET_STATUS_RESET) |
Houston Hoffman | 8f239f6 | 2016-03-14 21:12:05 -0700 | [diff] [blame] | 181 | hif_irq_enable(scn, tasklet_entry->ce_id); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 182 | |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 183 | hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, |
c_cgodav | fda96ad | 2017-09-07 16:16:00 +0530 | [diff] [blame] | 184 | NULL, NULL, 0, 0); |
Houston Hoffman | 4275ba2 | 2015-12-06 21:02:11 -0800 | [diff] [blame] | 185 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 186 | qdf_atomic_dec(&scn->active_tasklet_cnt); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 187 | } |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 188 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 189 | /** |
| 190 | * ce_tasklet_init() - ce_tasklet_init |
| 191 | * @hif_ce_state: hif_ce_state |
| 192 | * @mask: mask |
| 193 | * |
| 194 | * Return: N/A |
| 195 | */ |
| 196 | void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) |
| 197 | { |
| 198 | int i; |
| 199 | |
| 200 | for (i = 0; i < CE_COUNT_MAX; i++) { |
| 201 | if (mask & (1 << i)) { |
| 202 | hif_ce_state->tasklets[i].ce_id = i; |
| 203 | hif_ce_state->tasklets[i].inited = true; |
| 204 | hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; |
| 205 | tasklet_init(&hif_ce_state->tasklets[i].intr_tq, |
| 206 | ce_tasklet, |
| 207 | (unsigned long)&hif_ce_state->tasklets[i]); |
| 208 | } |
| 209 | } |
| 210 | } |
| 211 | /** |
| 212 | * ce_tasklet_kill() - ce_tasklet_kill |
| 213 | * @hif_ce_state: hif_ce_state |
| 214 | * |
Alok Kumar | 7f4494f | 2018-08-14 16:55:42 +0530 | [diff] [blame] | 215 | * Context: Non-Atomic context |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 216 | * Return: N/A |
| 217 | */ |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 218 | void ce_tasklet_kill(struct hif_softc *scn) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 219 | { |
| 220 | int i; |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 221 | struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 222 | |
Alok Kumar | 7f4494f | 2018-08-14 16:55:42 +0530 | [diff] [blame] | 223 | work_initialized = false; |
| 224 | |
Krunal Soni | 287dee3 | 2018-08-24 20:07:31 -0700 | [diff] [blame] | 225 | for (i = 0; i < CE_COUNT_MAX; i++) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 226 | if (hif_ce_state->tasklets[i].inited) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 227 | hif_ce_state->tasklets[i].inited = false; |
Alok Kumar | 7f4494f | 2018-08-14 16:55:42 +0530 | [diff] [blame] | 228 | /* |
| 229 | * Cancel the tasklet work before tasklet_disable |
| 230 | * to avoid race between tasklet_schedule and |
| 231 | * tasklet_kill. Here cancel_work_sync() won't |
| 232 | * return before reschedule_ce_tasklet_work_handler() |
| 233 | * completes. Even if tasklet_schedule() happens |
| 234 | * tasklet_disable() will take care of that. |
| 235 | */ |
| 236 | cancel_work_sync(&tasklet_workers[i].work); |
Shashikala Prabhu | 6cb5378 | 2018-06-18 16:48:07 +0530 | [diff] [blame] | 237 | tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 238 | } |
Krunal Soni | 287dee3 | 2018-08-24 20:07:31 -0700 | [diff] [blame] | 239 | } |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 240 | qdf_atomic_set(&scn->active_tasklet_cnt, 0); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 241 | } |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 242 | |
Houston Hoffman | 6f60b10 | 2016-08-08 21:08:04 -0700 | [diff] [blame] | 243 | #define HIF_CE_DRAIN_WAIT_CNT 20 |
| 244 | /** |
Jeff Johnson | 1002ca5 | 2018-05-12 11:29:24 -0700 | [diff] [blame] | 245 | * hif_drain_tasklets(): wait until no tasklet is pending |
Houston Hoffman | 6f60b10 | 2016-08-08 21:08:04 -0700 | [diff] [blame] | 246 | * @scn: hif context |
| 247 | * |
| 248 | * Let running tasklets clear pending trafic. |
| 249 | * |
| 250 | * Return: 0 if no bottom half is in progress when it returns. |
| 251 | * -EFAULT if it times out. |
| 252 | */ |
| 253 | int hif_drain_tasklets(struct hif_softc *scn) |
| 254 | { |
| 255 | uint32_t ce_drain_wait_cnt = 0; |
Dustin Brown | 6bdbda5 | 2016-09-27 15:52:30 -0700 | [diff] [blame] | 256 | int32_t tasklet_cnt; |
Houston Hoffman | 6f60b10 | 2016-08-08 21:08:04 -0700 | [diff] [blame] | 257 | |
Dustin Brown | 6bdbda5 | 2016-09-27 15:52:30 -0700 | [diff] [blame] | 258 | while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { |
Houston Hoffman | 6f60b10 | 2016-08-08 21:08:04 -0700 | [diff] [blame] | 259 | if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { |
Dustin Brown | 6bdbda5 | 2016-09-27 15:52:30 -0700 | [diff] [blame] | 260 | HIF_ERROR("%s: CE still not done with access: %d", |
| 261 | __func__, tasklet_cnt); |
Houston Hoffman | 6f60b10 | 2016-08-08 21:08:04 -0700 | [diff] [blame] | 262 | |
| 263 | return -EFAULT; |
| 264 | } |
| 265 | HIF_INFO("%s: Waiting for CE to finish access", __func__); |
| 266 | msleep(10); |
| 267 | } |
| 268 | return 0; |
| 269 | } |
| 270 | |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 271 | #ifdef WLAN_SUSPEND_RESUME_TEST |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 272 | /** |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 273 | * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should |
| 274 | * trigger a unit-test resume. |
| 275 | * @scn: The HIF context to operate on |
| 276 | * @ce_id: The copy engine Id from the originating interrupt |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 277 | * |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 278 | * Return: true if the raised irq should trigger a unit-test resume |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 279 | */ |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 280 | static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 281 | { |
Dustin Brown | 6834d32 | 2017-03-20 15:02:48 -0700 | [diff] [blame] | 282 | int errno; |
| 283 | uint8_t wake_ce_id; |
Dustin Brown | 973abe8 | 2016-08-30 12:29:10 -0700 | [diff] [blame] | 284 | |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 285 | if (!hif_is_ut_suspended(scn)) |
Dustin Brown | 973abe8 | 2016-08-30 12:29:10 -0700 | [diff] [blame] | 286 | return false; |
| 287 | |
Dustin Brown | 6834d32 | 2017-03-20 15:02:48 -0700 | [diff] [blame] | 288 | /* ensure passed ce_id matches wake ce_id */ |
| 289 | errno = hif_get_wake_ce_id(scn, &wake_ce_id); |
| 290 | if (errno) { |
| 291 | HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno); |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 292 | return false; |
| 293 | } |
Dustin Brown | 973abe8 | 2016-08-30 12:29:10 -0700 | [diff] [blame] | 294 | |
Dustin Brown | 6834d32 | 2017-03-20 15:02:48 -0700 | [diff] [blame] | 295 | return ce_id == wake_ce_id; |
Dustin Brown | 6bdbda5 | 2016-09-27 15:52:30 -0700 | [diff] [blame] | 296 | } |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 297 | #else |
Dustin Brown | 6bdbda5 | 2016-09-27 15:52:30 -0700 | [diff] [blame] | 298 | static inline bool |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 299 | hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 300 | { |
| 301 | return false; |
| 302 | } |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 303 | #endif /* WLAN_SUSPEND_RESUME_TEST */ |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 304 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 305 | /** |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 306 | * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler |
| 307 | * @irq: irq coming from kernel |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 308 | * @context: context |
| 309 | * |
| 310 | * Return: N/A |
| 311 | */ |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 312 | static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 313 | { |
| 314 | struct ce_tasklet_entry *tasklet_entry = context; |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 315 | struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); |
| 316 | |
| 317 | return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), |
| 318 | tasklet_entry); |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | /** |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 322 | * hif_ce_increment_interrupt_count() - update ce stats |
| 323 | * @hif_ce_state: ce state |
| 324 | * @ce_id: ce id |
| 325 | * |
| 326 | * Return: none |
| 327 | */ |
| 328 | static inline void |
| 329 | hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) |
| 330 | { |
| 331 | int cpu_id = qdf_get_cpu(); |
| 332 | |
| 333 | hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; |
| 334 | } |
| 335 | |
| 336 | /** |
| 337 | * hif_display_ce_stats() - display ce stats |
| 338 | * @hif_ce_state: ce state |
| 339 | * |
| 340 | * Return: none |
| 341 | */ |
| 342 | void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state) |
| 343 | { |
| 344 | #define STR_SIZE 128 |
| 345 | uint8_t i, j, pos; |
| 346 | char str_buffer[STR_SIZE]; |
| 347 | int size, ret; |
| 348 | |
Houston Hoffman | 5645dd2 | 2017-08-10 15:19:13 -0700 | [diff] [blame] | 349 | qdf_debug("CE interrupt statistics:"); |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 350 | for (i = 0; i < CE_COUNT_MAX; i++) { |
| 351 | size = STR_SIZE; |
| 352 | pos = 0; |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 353 | for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { |
Orhan K AKYILDIZ | 5bfbc5b | 2016-10-26 19:39:44 -0700 | [diff] [blame] | 354 | ret = snprintf(str_buffer + pos, size, "[%d]:%d ", |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 355 | j, hif_ce_state->stats.ce_per_cpu[i][j]); |
| 356 | if (ret <= 0 || ret >= size) |
| 357 | break; |
| 358 | size -= ret; |
| 359 | pos += ret; |
| 360 | } |
Houston Hoffman | 5645dd2 | 2017-08-10 15:19:13 -0700 | [diff] [blame] | 361 | qdf_debug("CE id[%2d] - %s", i, str_buffer); |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 362 | } |
| 363 | #undef STR_SIZE |
| 364 | } |
| 365 | |
| 366 | /** |
| 367 | * hif_clear_ce_stats() - clear ce stats |
| 368 | * @hif_ce_state: ce state |
| 369 | * |
| 370 | * Return: none |
| 371 | */ |
| 372 | void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) |
| 373 | { |
Mohit Khanna | 518eb50 | 2016-10-06 19:58:02 -0700 | [diff] [blame] | 374 | qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 375 | } |
| 376 | |
| 377 | /** |
Will Huang | d6c3b87 | 2018-06-26 10:53:59 +0800 | [diff] [blame] | 378 | * hif_tasklet_schedule() - schedule tasklet |
| 379 | * @hif_ctx: hif context |
| 380 | * @tasklet_entry: ce tasklet entry |
| 381 | * |
| 382 | * Return: false if tasklet already scheduled, otherwise true |
| 383 | */ |
| 384 | static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx, |
| 385 | struct ce_tasklet_entry *tasklet_entry) |
| 386 | { |
| 387 | struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); |
| 388 | |
| 389 | if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) { |
| 390 | HIF_DBG("tasklet scheduled, return"); |
| 391 | qdf_atomic_dec(&scn->active_tasklet_cnt); |
| 392 | return false; |
| 393 | } |
| 394 | |
| 395 | tasklet_schedule(&tasklet_entry->intr_tq); |
| 396 | return true; |
| 397 | } |
| 398 | |
| 399 | /** |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 400 | * ce_dispatch_interrupt() - dispatch an interrupt to a processing context |
| 401 | * @ce_id: ce_id |
| 402 | * @tasklet_entry: context |
| 403 | * |
| 404 | * Return: N/A |
| 405 | */ |
| 406 | irqreturn_t ce_dispatch_interrupt(int ce_id, |
| 407 | struct ce_tasklet_entry *tasklet_entry) |
| 408 | { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 409 | struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 410 | struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); |
Komal Seelam | 5584a7c | 2016-02-24 19:22:48 +0530 | [diff] [blame] | 411 | struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 412 | |
| 413 | if (tasklet_entry->ce_id != ce_id) { |
| 414 | HIF_ERROR("%s: ce_id (expect %d, received %d) does not match", |
| 415 | __func__, tasklet_entry->ce_id, ce_id); |
| 416 | return IRQ_NONE; |
| 417 | } |
Orhan K AKYILDIZ | 0615891 | 2015-11-11 18:01:15 -0800 | [diff] [blame] | 418 | if (unlikely(ce_id >= CE_COUNT_MAX)) { |
| 419 | HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d", |
| 420 | __func__, tasklet_entry->ce_id, CE_COUNT_MAX); |
| 421 | return IRQ_NONE; |
| 422 | } |
Houston Hoffman | 648a918 | 2017-05-21 23:27:50 -0700 | [diff] [blame] | 423 | |
Houston Hoffman | 8f239f6 | 2016-03-14 21:12:05 -0700 | [diff] [blame] | 424 | hif_irq_disable(scn, ce_id); |
Govind Singh | bc679dc | 2017-06-08 12:33:59 +0530 | [diff] [blame] | 425 | |
| 426 | if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) |
| 427 | return IRQ_HANDLED; |
| 428 | |
c_cgodav | fda96ad | 2017-09-07 16:16:00 +0530 | [diff] [blame] | 429 | hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, |
| 430 | NULL, NULL, 0, 0); |
Nirav Shah | b70bd73 | 2016-05-25 14:31:51 +0530 | [diff] [blame] | 431 | hif_ce_increment_interrupt_count(hif_ce_state, ce_id); |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 432 | |
Dustin Brown | ccf859d | 2017-06-01 14:31:01 -0700 | [diff] [blame] | 433 | if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { |
| 434 | hif_ut_fw_resume(scn); |
Rajeev Kumar | 5bac30f | 2016-07-14 17:31:29 -0700 | [diff] [blame] | 435 | hif_irq_enable(scn, ce_id); |
| 436 | return IRQ_HANDLED; |
| 437 | } |
| 438 | |
Dustin Brown | 6bdbda5 | 2016-09-27 15:52:30 -0700 | [diff] [blame] | 439 | qdf_atomic_inc(&scn->active_tasklet_cnt); |
| 440 | |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 441 | if (hif_napi_enabled(hif_hdl, ce_id)) |
| 442 | hif_napi_schedule(hif_hdl, ce_id); |
Houston Hoffman | e9afdc1 | 2015-11-18 19:41:52 -0800 | [diff] [blame] | 443 | else |
Will Huang | d6c3b87 | 2018-06-26 10:53:59 +0800 | [diff] [blame] | 444 | hif_tasklet_schedule(hif_hdl, tasklet_entry); |
Houston Hoffman | e9afdc1 | 2015-11-18 19:41:52 -0800 | [diff] [blame] | 445 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 446 | return IRQ_HANDLED; |
| 447 | } |
| 448 | |
| 449 | /** |
| 450 | * const char *ce_name |
| 451 | * |
| 452 | * @ce_name: ce_name |
| 453 | */ |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 454 | const char *ce_name[] = { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 455 | "WLAN_CE_0", |
| 456 | "WLAN_CE_1", |
| 457 | "WLAN_CE_2", |
| 458 | "WLAN_CE_3", |
| 459 | "WLAN_CE_4", |
| 460 | "WLAN_CE_5", |
| 461 | "WLAN_CE_6", |
| 462 | "WLAN_CE_7", |
| 463 | "WLAN_CE_8", |
| 464 | "WLAN_CE_9", |
| 465 | "WLAN_CE_10", |
| 466 | "WLAN_CE_11", |
| 467 | }; |
| 468 | /** |
| 469 | * ce_unregister_irq() - ce_unregister_irq |
| 470 | * @hif_ce_state: hif_ce_state copy engine device handle |
| 471 | * @mask: which coppy engines to unregister for. |
| 472 | * |
| 473 | * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, |
| 474 | * unregister for copy engine x. |
| 475 | * |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 476 | * Return: QDF_STATUS |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 477 | */ |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 478 | QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 479 | { |
| 480 | int id; |
Himanshu Agarwal | 2a92459 | 2016-06-30 18:04:14 +0530 | [diff] [blame] | 481 | int ce_count; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 482 | int ret; |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 483 | struct hif_softc *scn; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 484 | |
| 485 | if (hif_ce_state == NULL) { |
| 486 | HIF_WARN("%s: hif_ce_state = NULL", __func__); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 487 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 488 | } |
Himanshu Agarwal | 2a92459 | 2016-06-30 18:04:14 +0530 | [diff] [blame] | 489 | |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 490 | scn = HIF_GET_SOFTC(hif_ce_state); |
| 491 | ce_count = scn->ce_count; |
Orhan K AKYILDIZ | f006e93 | 2016-11-14 00:35:44 -0800 | [diff] [blame] | 492 | /* we are removing interrupts, so better stop NAPI */ |
| 493 | ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), |
| 494 | NAPI_EVT_INT_STATE, (void *)0); |
| 495 | if (ret != 0) |
| 496 | HIF_ERROR("%s: napi_event INT_STATE returned %d", |
| 497 | __func__, ret); |
| 498 | /* this is not fatal, continue */ |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 499 | |
Kiran Venkatappa | a17e5e5 | 2016-12-20 11:32:06 +0530 | [diff] [blame] | 500 | /* filter mask to free only for ce's with irq registered */ |
| 501 | mask &= hif_ce_state->ce_register_irq_done; |
Houston Hoffman | d6f946c | 2016-04-06 15:16:00 -0700 | [diff] [blame] | 502 | for (id = 0; id < ce_count; id++) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 503 | if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 504 | ret = pld_ce_free_irq(scn->qdf_dev->dev, id, |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 505 | &hif_ce_state->tasklets[id]); |
| 506 | if (ret < 0) |
| 507 | HIF_ERROR( |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 508 | "%s: pld_unregister_irq error - ce_id = %d, ret = %d", |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 509 | __func__, id, ret); |
| 510 | } |
Nandha Kishore Easwaran | 7cdaae2 | 2018-07-30 15:02:51 +0530 | [diff] [blame] | 511 | ce_disable_polling(scn->ce_id_to_state[id]); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 512 | } |
Kiran Venkatappa | a17e5e5 | 2016-12-20 11:32:06 +0530 | [diff] [blame] | 513 | hif_ce_state->ce_register_irq_done &= ~mask; |
| 514 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 515 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 516 | } |
| 517 | /** |
| 518 | * ce_register_irq() - ce_register_irq |
| 519 | * @hif_ce_state: hif_ce_state |
| 520 | * @mask: which coppy engines to unregister for. |
| 521 | * |
| 522 | * Registers copy engine irqs matching mask. If a 1 is set at bit x, |
| 523 | * Register for copy engine x. |
| 524 | * |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 525 | * Return: QDF_STATUS |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 526 | */ |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 527 | QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 528 | { |
| 529 | int id; |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 530 | int ce_count; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 531 | int ret; |
| 532 | unsigned long irqflags = IRQF_TRIGGER_RISING; |
| 533 | uint32_t done_mask = 0; |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 534 | struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); |
| 535 | |
| 536 | ce_count = scn->ce_count; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 537 | |
Houston Hoffman | d6f946c | 2016-04-06 15:16:00 -0700 | [diff] [blame] | 538 | for (id = 0; id < ce_count; id++) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 539 | if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { |
Yuanyuan Liu | fd594c2 | 2016-04-25 13:59:19 -0700 | [diff] [blame] | 540 | ret = pld_ce_request_irq(scn->qdf_dev->dev, id, |
Houston Hoffman | 247f09b | 2016-04-06 21:21:40 -0700 | [diff] [blame] | 541 | hif_snoc_interrupt_handler, |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 542 | irqflags, ce_name[id], |
| 543 | &hif_ce_state->tasklets[id]); |
| 544 | if (ret) { |
| 545 | HIF_ERROR( |
| 546 | "%s: cannot register CE %d irq handler, ret = %d", |
| 547 | __func__, id, ret); |
| 548 | ce_unregister_irq(hif_ce_state, done_mask); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 549 | return QDF_STATUS_E_FAULT; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 550 | } |
Manikandan Mohan | afd6e88 | 2017-04-07 17:46:41 -0700 | [diff] [blame] | 551 | done_mask |= 1 << id; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 552 | } |
| 553 | } |
Kiran Venkatappa | a17e5e5 | 2016-12-20 11:32:06 +0530 | [diff] [blame] | 554 | hif_ce_state->ce_register_irq_done |= done_mask; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 555 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 556 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 557 | } |