Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 1 | /* |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 2 | * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 28 | #include <linux/pci.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/interrupt.h> |
| 31 | #include <linux/if_arp.h> |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 32 | #include "qdf_lock.h" |
| 33 | #include "qdf_types.h" |
| 34 | #include "qdf_status.h" |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 35 | #include "regtable.h" |
| 36 | #include "hif.h" |
| 37 | #include "hif_io32.h" |
| 38 | #include "ce_main.h" |
| 39 | #include "ce_api.h" |
| 40 | #include "ce_reg.h" |
| 41 | #include "ce_internal.h" |
| 42 | #ifdef CONFIG_CNSS |
| 43 | #include <net/cnss.h> |
Houston Hoffman | bc69349 | 2016-03-14 21:11:41 -0700 | [diff] [blame] | 44 | #include "platform_icnss.h" |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 45 | #endif |
| 46 | #include "hif_debug.h" |
| 47 | #include "hif_napi.h" |
| 48 | |
| 49 | |
| 50 | /** |
| 51 | * ce_irq_status() - read CE IRQ status |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 52 | * @scn: struct hif_softc |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 53 | * @ce_id: ce_id |
| 54 | * @host_status: host_status |
| 55 | * |
| 56 | * Return: IRQ status |
| 57 | */ |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 58 | static inline void ce_irq_status(struct hif_softc *scn, |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 59 | int ce_id, uint32_t *host_status) |
| 60 | { |
| 61 | uint32_t offset = HOST_IS_ADDRESS + CE_BASE_ADDRESS(ce_id); |
| 62 | |
| 63 | *host_status = hif_read32_mb(scn->mem + offset); |
| 64 | } |
| 65 | |
| 66 | /** |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 67 | * struct tasklet_work |
| 68 | * |
| 69 | * @id: ce_id |
| 70 | * @work: work |
| 71 | */ |
| 72 | struct tasklet_work { |
| 73 | enum ce_id_type id; |
| 74 | void *data; |
| 75 | struct work_struct work; |
| 76 | }; |
| 77 | |
| 78 | |
| 79 | /** |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 80 | * reschedule_ce_tasklet_work_handler() - reschedule work |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 81 | * @work: struct work_struct |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 82 | * |
| 83 | * Return: N/A |
| 84 | */ |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 85 | static void reschedule_ce_tasklet_work_handler(struct work_struct *work) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 86 | { |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 87 | struct tasklet_work *ce_work = container_of(work, struct tasklet_work, |
| 88 | work); |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 89 | struct hif_softc *scn = ce_work->data; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 90 | struct HIF_CE_state *hif_ce_state; |
| 91 | |
| 92 | if (NULL == scn) { |
| 93 | HIF_ERROR("%s: tasklet scn is null", __func__); |
| 94 | return; |
| 95 | } |
Komal Seelam | 02cf2f8 | 2016-02-22 20:44:25 +0530 | [diff] [blame] | 96 | |
| 97 | hif_ce_state = HIF_GET_CE_STATE(scn); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 98 | |
| 99 | if (scn->hif_init_done == false) { |
| 100 | HIF_ERROR("%s: wlan driver is unloaded", __func__); |
| 101 | return; |
| 102 | } |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 103 | tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 104 | return; |
| 105 | } |
| 106 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 107 | static struct tasklet_work tasklet_workers[CE_ID_MAX]; |
| 108 | static bool work_initialized; |
| 109 | |
| 110 | /** |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 111 | * init_tasklet_work() - init_tasklet_work |
| 112 | * @work: struct work_struct |
| 113 | * @work_handler: work_handler |
| 114 | * |
| 115 | * Return: N/A |
| 116 | */ |
| 117 | #ifdef CONFIG_CNSS |
| 118 | static void init_tasklet_work(struct work_struct *work, |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 119 | work_func_t work_handler) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 120 | { |
| 121 | cnss_init_work(work, work_handler); |
| 122 | } |
| 123 | #else |
| 124 | static void init_tasklet_work(struct work_struct *work, |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 125 | work_func_t work_handler) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 126 | { |
| 127 | INIT_WORK(work, work_handler); |
| 128 | } |
| 129 | #endif |
| 130 | |
| 131 | /** |
| 132 | * init_tasklet_workers() - init_tasklet_workers |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 133 | * @scn: HIF Context |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 134 | * |
| 135 | * Return: N/A |
| 136 | */ |
Komal Seelam | 5584a7c | 2016-02-24 19:22:48 +0530 | [diff] [blame] | 137 | void init_tasklet_workers(struct hif_opaque_softc *scn) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 138 | { |
| 139 | uint32_t id; |
| 140 | |
| 141 | for (id = 0; id < CE_ID_MAX; id++) { |
| 142 | tasklet_workers[id].id = id; |
Komal Seelam | f860068 | 2016-02-02 18:17:13 +0530 | [diff] [blame] | 143 | tasklet_workers[id].data = scn; |
| 144 | init_tasklet_work(&tasklet_workers[id].work, |
| 145 | reschedule_ce_tasklet_work_handler); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 146 | } |
| 147 | work_initialized = true; |
| 148 | } |
| 149 | |
| 150 | #ifdef CONFIG_SLUB_DEBUG_ON |
| 151 | /** |
| 152 | * ce_schedule_tasklet() - schedule ce tasklet |
| 153 | * @tasklet_entry: struct ce_tasklet_entry |
| 154 | * |
| 155 | * Return: N/A |
| 156 | */ |
| 157 | static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) |
| 158 | { |
Orhan K AKYILDIZ | 0615891 | 2015-11-11 18:01:15 -0800 | [diff] [blame] | 159 | if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX)) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 160 | schedule_work(&tasklet_workers[tasklet_entry->ce_id].work); |
| 161 | else |
| 162 | HIF_ERROR("%s: work_initialized = %d, ce_id = %d", |
| 163 | __func__, work_initialized, tasklet_entry->ce_id); |
| 164 | } |
| 165 | #else |
| 166 | /** |
| 167 | * ce_schedule_tasklet() - schedule ce tasklet |
| 168 | * @tasklet_entry: struct ce_tasklet_entry |
| 169 | * |
| 170 | * Return: N/A |
| 171 | */ |
| 172 | static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) |
| 173 | { |
| 174 | tasklet_schedule(&tasklet_entry->intr_tq); |
| 175 | } |
| 176 | #endif |
| 177 | |
| 178 | /** |
| 179 | * ce_tasklet() - ce_tasklet |
| 180 | * @data: data |
| 181 | * |
| 182 | * Return: N/A |
| 183 | */ |
| 184 | static void ce_tasklet(unsigned long data) |
| 185 | { |
| 186 | struct ce_tasklet_entry *tasklet_entry = |
| 187 | (struct ce_tasklet_entry *)data; |
| 188 | struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 189 | struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 190 | struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; |
| 191 | |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 192 | hif_record_ce_desc_event(scn, tasklet_entry->ce_id, |
| 193 | HIF_CE_TASKLET_ENTRY, NULL, NULL, 0); |
Houston Hoffman | 4275ba2 | 2015-12-06 21:02:11 -0800 | [diff] [blame] | 194 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 195 | if (qdf_atomic_read(&scn->link_suspended)) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 196 | HIF_ERROR("%s: ce %d tasklet fired after link suspend.", |
| 197 | __func__, tasklet_entry->ce_id); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 198 | QDF_BUG(0); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | ce_per_engine_service(scn, tasklet_entry->ce_id); |
| 202 | |
Dhanashri Atre | 65b674f | 2015-10-30 15:12:03 -0700 | [diff] [blame] | 203 | if (CE_state->lro_flush_cb != NULL) { |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 204 | CE_state->lro_flush_cb(CE_state->lro_data); |
| 205 | } |
| 206 | |
| 207 | if (ce_check_rx_pending(scn, tasklet_entry->ce_id)) { |
| 208 | /* |
| 209 | * There are frames pending, schedule tasklet to process them. |
| 210 | * Enable the interrupt only when there is no pending frames in |
| 211 | * any of the Copy Engine pipes. |
| 212 | */ |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 213 | hif_record_ce_desc_event(scn, tasklet_entry->ce_id, |
Houston Hoffman | 4275ba2 | 2015-12-06 21:02:11 -0800 | [diff] [blame] | 214 | HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 215 | ce_schedule_tasklet(tasklet_entry); |
| 216 | return; |
| 217 | } |
| 218 | |
| 219 | if (scn->target_status != OL_TRGET_STATUS_RESET) |
Houston Hoffman | 8f239f6 | 2016-03-14 21:12:05 -0700 | [diff] [blame] | 220 | hif_irq_enable(scn, tasklet_entry->ce_id); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 221 | |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 222 | hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, |
| 223 | NULL, NULL, 0); |
Houston Hoffman | 4275ba2 | 2015-12-06 21:02:11 -0800 | [diff] [blame] | 224 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 225 | qdf_atomic_dec(&scn->active_tasklet_cnt); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 226 | } |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 227 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 228 | /** |
| 229 | * ce_tasklet_init() - ce_tasklet_init |
| 230 | * @hif_ce_state: hif_ce_state |
| 231 | * @mask: mask |
| 232 | * |
| 233 | * Return: N/A |
| 234 | */ |
| 235 | void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) |
| 236 | { |
| 237 | int i; |
| 238 | |
| 239 | for (i = 0; i < CE_COUNT_MAX; i++) { |
| 240 | if (mask & (1 << i)) { |
| 241 | hif_ce_state->tasklets[i].ce_id = i; |
| 242 | hif_ce_state->tasklets[i].inited = true; |
| 243 | hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; |
| 244 | tasklet_init(&hif_ce_state->tasklets[i].intr_tq, |
| 245 | ce_tasklet, |
| 246 | (unsigned long)&hif_ce_state->tasklets[i]); |
| 247 | } |
| 248 | } |
| 249 | } |
| 250 | /** |
| 251 | * ce_tasklet_kill() - ce_tasklet_kill |
| 252 | * @hif_ce_state: hif_ce_state |
| 253 | * |
| 254 | * Return: N/A |
| 255 | */ |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 256 | void ce_tasklet_kill(struct hif_softc *scn) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 257 | { |
| 258 | int i; |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 259 | struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 260 | |
| 261 | for (i = 0; i < CE_COUNT_MAX; i++) |
| 262 | if (hif_ce_state->tasklets[i].inited) { |
| 263 | tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); |
| 264 | hif_ce_state->tasklets[i].inited = false; |
| 265 | } |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 266 | qdf_atomic_set(&scn->active_tasklet_cnt, 0); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 267 | } |
| 268 | /** |
| 269 | * ce_irq_handler() - ce_irq_handler |
| 270 | * @ce_id: ce_id |
| 271 | * @context: context |
| 272 | * |
| 273 | * Return: N/A |
| 274 | */ |
| 275 | static irqreturn_t ce_irq_handler(int irq, void *context) |
| 276 | { |
| 277 | struct ce_tasklet_entry *tasklet_entry = context; |
| 278 | struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 279 | struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); |
Komal Seelam | 5584a7c | 2016-02-24 19:22:48 +0530 | [diff] [blame] | 280 | struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 281 | uint32_t host_status; |
Orhan K AKYILDIZ | 0615891 | 2015-11-11 18:01:15 -0800 | [diff] [blame] | 282 | int ce_id = icnss_get_ce_id(irq); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 283 | |
| 284 | if (tasklet_entry->ce_id != ce_id) { |
| 285 | HIF_ERROR("%s: ce_id (expect %d, received %d) does not match", |
| 286 | __func__, tasklet_entry->ce_id, ce_id); |
| 287 | return IRQ_NONE; |
| 288 | } |
Orhan K AKYILDIZ | 0615891 | 2015-11-11 18:01:15 -0800 | [diff] [blame] | 289 | if (unlikely(ce_id >= CE_COUNT_MAX)) { |
| 290 | HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d", |
| 291 | __func__, tasklet_entry->ce_id, CE_COUNT_MAX); |
| 292 | return IRQ_NONE; |
| 293 | } |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 294 | #ifndef HIF_PCI |
| 295 | disable_irq_nosync(irq); |
| 296 | #endif |
Houston Hoffman | 8f239f6 | 2016-03-14 21:12:05 -0700 | [diff] [blame] | 297 | hif_irq_disable(scn, ce_id); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 298 | ce_irq_status(scn, ce_id, &host_status); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 299 | qdf_atomic_inc(&scn->active_tasklet_cnt); |
Komal Seelam | bd7c51d | 2016-02-24 10:27:30 +0530 | [diff] [blame] | 300 | hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0); |
Komal Seelam | 644263d | 2016-02-22 20:45:49 +0530 | [diff] [blame] | 301 | if (hif_napi_enabled(hif_hdl, ce_id)) |
| 302 | hif_napi_schedule(hif_hdl, ce_id); |
Houston Hoffman | e9afdc1 | 2015-11-18 19:41:52 -0800 | [diff] [blame] | 303 | else |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 304 | tasklet_schedule(&tasklet_entry->intr_tq); |
Houston Hoffman | e9afdc1 | 2015-11-18 19:41:52 -0800 | [diff] [blame] | 305 | |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 306 | return IRQ_HANDLED; |
| 307 | } |
| 308 | |
| 309 | /** |
| 310 | * const char *ce_name |
| 311 | * |
| 312 | * @ce_name: ce_name |
| 313 | */ |
| 314 | const char *ce_name[ICNSS_MAX_IRQ_REGISTRATIONS] = { |
| 315 | "WLAN_CE_0", |
| 316 | "WLAN_CE_1", |
| 317 | "WLAN_CE_2", |
| 318 | "WLAN_CE_3", |
| 319 | "WLAN_CE_4", |
| 320 | "WLAN_CE_5", |
| 321 | "WLAN_CE_6", |
| 322 | "WLAN_CE_7", |
| 323 | "WLAN_CE_8", |
| 324 | "WLAN_CE_9", |
| 325 | "WLAN_CE_10", |
| 326 | "WLAN_CE_11", |
| 327 | }; |
| 328 | /** |
| 329 | * ce_unregister_irq() - ce_unregister_irq |
| 330 | * @hif_ce_state: hif_ce_state copy engine device handle |
| 331 | * @mask: which coppy engines to unregister for. |
| 332 | * |
| 333 | * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, |
| 334 | * unregister for copy engine x. |
| 335 | * |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 336 | * Return: QDF_STATUS |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 337 | */ |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 338 | QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 339 | { |
| 340 | int id; |
| 341 | int ret; |
| 342 | |
| 343 | if (hif_ce_state == NULL) { |
| 344 | HIF_WARN("%s: hif_ce_state = NULL", __func__); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 345 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 346 | } |
| 347 | for (id = 0; id < CE_COUNT_MAX; id++) { |
| 348 | if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { |
| 349 | ret = icnss_ce_free_irq(id, |
| 350 | &hif_ce_state->tasklets[id]); |
| 351 | if (ret < 0) |
| 352 | HIF_ERROR( |
| 353 | "%s: icnss_unregister_irq error - ce_id = %d, ret = %d", |
| 354 | __func__, id, ret); |
| 355 | } |
| 356 | } |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 357 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 358 | } |
| 359 | /** |
| 360 | * ce_register_irq() - ce_register_irq |
| 361 | * @hif_ce_state: hif_ce_state |
| 362 | * @mask: which coppy engines to unregister for. |
| 363 | * |
| 364 | * Registers copy engine irqs matching mask. If a 1 is set at bit x, |
| 365 | * Register for copy engine x. |
| 366 | * |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 367 | * Return: QDF_STATUS |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 368 | */ |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 369 | QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 370 | { |
| 371 | int id; |
| 372 | int ret; |
| 373 | unsigned long irqflags = IRQF_TRIGGER_RISING; |
| 374 | uint32_t done_mask = 0; |
| 375 | |
| 376 | for (id = 0; id < CE_COUNT_MAX; id++) { |
| 377 | if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { |
| 378 | ret = icnss_ce_request_irq(id, ce_irq_handler, |
| 379 | irqflags, ce_name[id], |
| 380 | &hif_ce_state->tasklets[id]); |
| 381 | if (ret) { |
| 382 | HIF_ERROR( |
| 383 | "%s: cannot register CE %d irq handler, ret = %d", |
| 384 | __func__, id, ret); |
| 385 | ce_unregister_irq(hif_ce_state, done_mask); |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 386 | return QDF_STATUS_E_FAULT; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 387 | } else { |
| 388 | done_mask |= 1 << id; |
| 389 | } |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | #ifndef HIF_PCI |
| 394 | /* move to hif_configure_irq */ |
Komal Seelam | 02cf2f8 | 2016-02-22 20:44:25 +0530 | [diff] [blame] | 395 | ce_enable_irq_in_group_reg(HIF_GET_SOFTC(hif_ce_state), done_mask); |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 396 | #endif |
| 397 | |
Chouhan, Anurag | fc06aa9 | 2016-03-03 19:05:05 +0530 | [diff] [blame] | 398 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | d5c9f1c | 2015-11-08 19:04:44 -0800 | [diff] [blame] | 399 | } |