blob: 2fe72139f7a0c251f2774ccb1e0ee0cc41424d81 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
c_cgodavfda96ad2017-09-07 16:16:00 +05302 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/if_arp.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053023#include "qdf_lock.h"
24#include "qdf_types.h"
25#include "qdf_status.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080026#include "regtable.h"
27#include "hif.h"
28#include "hif_io32.h"
29#include "ce_main.h"
30#include "ce_api.h"
31#include "ce_reg.h"
32#include "ce_internal.h"
Houston Hoffman247f09b2016-04-06 21:21:40 -070033#include "ce_tasklet.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "hif_napi.h"
37
38
39/**
Komal Seelamf8600682016-02-02 18:17:13 +053040 * struct tasklet_work
41 *
42 * @id: ce_id
43 * @work: work
44 */
45struct tasklet_work {
46 enum ce_id_type id;
47 void *data;
48 struct work_struct work;
49};
50
51
52/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080053 * reschedule_ce_tasklet_work_handler() - reschedule work
Komal Seelamf8600682016-02-02 18:17:13 +053054 * @work: struct work_struct
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080055 *
56 * Return: N/A
57 */
Komal Seelamf8600682016-02-02 18:17:13 +053058static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080059{
Komal Seelamf8600682016-02-02 18:17:13 +053060 struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
61 work);
Komal Seelam644263d2016-02-22 20:45:49 +053062 struct hif_softc *scn = ce_work->data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080063 struct HIF_CE_state *hif_ce_state;
64
65 if (NULL == scn) {
66 HIF_ERROR("%s: tasklet scn is null", __func__);
67 return;
68 }
Komal Seelam02cf2f82016-02-22 20:44:25 +053069
70 hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080071
72 if (scn->hif_init_done == false) {
73 HIF_ERROR("%s: wlan driver is unloaded", __func__);
74 return;
75 }
Alok Kumar7f4494f2018-08-14 16:55:42 +053076 if (hif_ce_state->tasklets[ce_work->id].inited)
77 tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080078}
79
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080080static struct tasklet_work tasklet_workers[CE_ID_MAX];
81static bool work_initialized;
82
83/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084 * init_tasklet_work() - init_tasklet_work
85 * @work: struct work_struct
86 * @work_handler: work_handler
87 *
88 * Return: N/A
89 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090static void init_tasklet_work(struct work_struct *work,
Komal Seelamf8600682016-02-02 18:17:13 +053091 work_func_t work_handler)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080092{
93 INIT_WORK(work, work_handler);
94}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080095
96/**
97 * init_tasklet_workers() - init_tasklet_workers
Komal Seelamf8600682016-02-02 18:17:13 +053098 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080099 *
100 * Return: N/A
101 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530102void init_tasklet_workers(struct hif_opaque_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800103{
104 uint32_t id;
105
106 for (id = 0; id < CE_ID_MAX; id++) {
107 tasklet_workers[id].id = id;
Komal Seelamf8600682016-02-02 18:17:13 +0530108 tasklet_workers[id].data = scn;
109 init_tasklet_work(&tasklet_workers[id].work,
110 reschedule_ce_tasklet_work_handler);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800111 }
112 work_initialized = true;
113}
114
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -0700115/**
116 * deinit_tasklet_workers() - deinit_tasklet_workers
117 * @scn: HIF Context
118 *
119 * Return: N/A
120 */
121void deinit_tasklet_workers(struct hif_opaque_softc *scn)
122{
123 u32 id;
124
125 for (id = 0; id < CE_ID_MAX; id++)
126 cancel_work_sync(&tasklet_workers[id].work);
127
128 work_initialized = false;
129}
130
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800131/**
132 * ce_schedule_tasklet() - schedule ce tasklet
133 * @tasklet_entry: struct ce_tasklet_entry
134 *
135 * Return: N/A
136 */
137static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
138{
139 tasklet_schedule(&tasklet_entry->intr_tq);
140}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141
142/**
143 * ce_tasklet() - ce_tasklet
144 * @data: data
145 *
146 * Return: N/A
147 */
148static void ce_tasklet(unsigned long data)
149{
150 struct ce_tasklet_entry *tasklet_entry =
151 (struct ce_tasklet_entry *)data;
152 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530153 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800154 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
155
Komal Seelambd7c51d2016-02-24 10:27:30 +0530156 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
c_cgodavfda96ad2017-09-07 16:16:00 +0530157 HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800158
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530159 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800160 HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
161 __func__, tasklet_entry->ce_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530162 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800163 }
164
165 ce_per_engine_service(scn, tasklet_entry->ce_id);
166
Shashikala Prabhu6cb53782018-06-18 16:48:07 +0530167 if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800168 /*
169 * There are frames pending, schedule tasklet to process them.
170 * Enable the interrupt only when there is no pending frames in
171 * any of the Copy Engine pipes.
172 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530173 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
c_cgodavfda96ad2017-09-07 16:16:00 +0530174 HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
175
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800176 ce_schedule_tasklet(tasklet_entry);
177 return;
178 }
179
Komal Seelam6ee55902016-04-11 17:11:07 +0530180 if (scn->target_status != TARGET_STATUS_RESET)
Houston Hoffman8f239f62016-03-14 21:12:05 -0700181 hif_irq_enable(scn, tasklet_entry->ce_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800182
Komal Seelambd7c51d2016-02-24 10:27:30 +0530183 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
c_cgodavfda96ad2017-09-07 16:16:00 +0530184 NULL, NULL, 0, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800185
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530186 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800187}
Komal Seelambd7c51d2016-02-24 10:27:30 +0530188
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800189/**
190 * ce_tasklet_init() - ce_tasklet_init
191 * @hif_ce_state: hif_ce_state
192 * @mask: mask
193 *
194 * Return: N/A
195 */
196void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
197{
198 int i;
199
200 for (i = 0; i < CE_COUNT_MAX; i++) {
201 if (mask & (1 << i)) {
202 hif_ce_state->tasklets[i].ce_id = i;
203 hif_ce_state->tasklets[i].inited = true;
204 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
205 tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
206 ce_tasklet,
207 (unsigned long)&hif_ce_state->tasklets[i]);
208 }
209 }
210}
211/**
212 * ce_tasklet_kill() - ce_tasklet_kill
213 * @hif_ce_state: hif_ce_state
214 *
Alok Kumar7f4494f2018-08-14 16:55:42 +0530215 * Context: Non-Atomic context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800216 * Return: N/A
217 */
Komal Seelam644263d2016-02-22 20:45:49 +0530218void ce_tasklet_kill(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800219{
220 int i;
Komal Seelam644263d2016-02-22 20:45:49 +0530221 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800222
Alok Kumar7f4494f2018-08-14 16:55:42 +0530223 work_initialized = false;
224
Krunal Soni287dee32018-08-24 20:07:31 -0700225 for (i = 0; i < CE_COUNT_MAX; i++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800226 if (hif_ce_state->tasklets[i].inited) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800227 hif_ce_state->tasklets[i].inited = false;
Alok Kumar7f4494f2018-08-14 16:55:42 +0530228 /*
229 * Cancel the tasklet work before tasklet_disable
230 * to avoid race between tasklet_schedule and
231 * tasklet_kill. Here cancel_work_sync() won't
232 * return before reschedule_ce_tasklet_work_handler()
233 * completes. Even if tasklet_schedule() happens
234 * tasklet_disable() will take care of that.
235 */
236 cancel_work_sync(&tasklet_workers[i].work);
Shashikala Prabhu6cb53782018-06-18 16:48:07 +0530237 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800238 }
Krunal Soni287dee32018-08-24 20:07:31 -0700239 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530240 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800241}
Houston Hoffman247f09b2016-04-06 21:21:40 -0700242
Houston Hoffman6f60b102016-08-08 21:08:04 -0700243#define HIF_CE_DRAIN_WAIT_CNT 20
244/**
Jeff Johnson1002ca52018-05-12 11:29:24 -0700245 * hif_drain_tasklets(): wait until no tasklet is pending
Houston Hoffman6f60b102016-08-08 21:08:04 -0700246 * @scn: hif context
247 *
248 * Let running tasklets clear pending trafic.
249 *
250 * Return: 0 if no bottom half is in progress when it returns.
251 * -EFAULT if it times out.
252 */
253int hif_drain_tasklets(struct hif_softc *scn)
254{
255 uint32_t ce_drain_wait_cnt = 0;
Dustin Brown6bdbda52016-09-27 15:52:30 -0700256 int32_t tasklet_cnt;
Houston Hoffman6f60b102016-08-08 21:08:04 -0700257
Dustin Brown6bdbda52016-09-27 15:52:30 -0700258 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
Houston Hoffman6f60b102016-08-08 21:08:04 -0700259 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
Dustin Brown6bdbda52016-09-27 15:52:30 -0700260 HIF_ERROR("%s: CE still not done with access: %d",
261 __func__, tasklet_cnt);
Houston Hoffman6f60b102016-08-08 21:08:04 -0700262
263 return -EFAULT;
264 }
265 HIF_INFO("%s: Waiting for CE to finish access", __func__);
266 msleep(10);
267 }
268 return 0;
269}
270
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700271#ifdef WLAN_SUSPEND_RESUME_TEST
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700272/**
Dustin Brownccf859d2017-06-01 14:31:01 -0700273 * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
274 * trigger a unit-test resume.
275 * @scn: The HIF context to operate on
276 * @ce_id: The copy engine Id from the originating interrupt
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700277 *
Dustin Brownccf859d2017-06-01 14:31:01 -0700278 * Return: true if the raised irq should trigger a unit-test resume
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700279 */
Dustin Brownccf859d2017-06-01 14:31:01 -0700280static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700281{
Dustin Brown6834d322017-03-20 15:02:48 -0700282 int errno;
283 uint8_t wake_ce_id;
Dustin Brown973abe82016-08-30 12:29:10 -0700284
Dustin Brownccf859d2017-06-01 14:31:01 -0700285 if (!hif_is_ut_suspended(scn))
Dustin Brown973abe82016-08-30 12:29:10 -0700286 return false;
287
Dustin Brown6834d322017-03-20 15:02:48 -0700288 /* ensure passed ce_id matches wake ce_id */
289 errno = hif_get_wake_ce_id(scn, &wake_ce_id);
290 if (errno) {
291 HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700292 return false;
293 }
Dustin Brown973abe82016-08-30 12:29:10 -0700294
Dustin Brown6834d322017-03-20 15:02:48 -0700295 return ce_id == wake_ce_id;
Dustin Brown6bdbda52016-09-27 15:52:30 -0700296}
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700297#else
Dustin Brown6bdbda52016-09-27 15:52:30 -0700298static inline bool
Dustin Brownccf859d2017-06-01 14:31:01 -0700299hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700300{
301 return false;
302}
Dustin Brownccf859d2017-06-01 14:31:01 -0700303#endif /* WLAN_SUSPEND_RESUME_TEST */
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700304
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800305/**
Houston Hoffman247f09b2016-04-06 21:21:40 -0700306 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
307 * @irq: irq coming from kernel
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800308 * @context: context
309 *
310 * Return: N/A
311 */
Houston Hoffman247f09b2016-04-06 21:21:40 -0700312static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800313{
314 struct ce_tasklet_entry *tasklet_entry = context;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700315 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
316
317 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
318 tasklet_entry);
Houston Hoffman247f09b2016-04-06 21:21:40 -0700319}
320
321/**
Nirav Shahb70bd732016-05-25 14:31:51 +0530322 * hif_ce_increment_interrupt_count() - update ce stats
323 * @hif_ce_state: ce state
324 * @ce_id: ce id
325 *
326 * Return: none
327 */
328static inline void
329hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
330{
331 int cpu_id = qdf_get_cpu();
332
333 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
334}
335
336/**
337 * hif_display_ce_stats() - display ce stats
338 * @hif_ce_state: ce state
339 *
340 * Return: none
341 */
342void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
343{
344#define STR_SIZE 128
345 uint8_t i, j, pos;
346 char str_buffer[STR_SIZE];
347 int size, ret;
348
Houston Hoffman5645dd22017-08-10 15:19:13 -0700349 qdf_debug("CE interrupt statistics:");
Nirav Shahb70bd732016-05-25 14:31:51 +0530350 for (i = 0; i < CE_COUNT_MAX; i++) {
351 size = STR_SIZE;
352 pos = 0;
Nirav Shahb70bd732016-05-25 14:31:51 +0530353 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
Orhan K AKYILDIZ5bfbc5b2016-10-26 19:39:44 -0700354 ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
Nirav Shahb70bd732016-05-25 14:31:51 +0530355 j, hif_ce_state->stats.ce_per_cpu[i][j]);
356 if (ret <= 0 || ret >= size)
357 break;
358 size -= ret;
359 pos += ret;
360 }
Houston Hoffman5645dd22017-08-10 15:19:13 -0700361 qdf_debug("CE id[%2d] - %s", i, str_buffer);
Nirav Shahb70bd732016-05-25 14:31:51 +0530362 }
363#undef STR_SIZE
364}
365
366/**
367 * hif_clear_ce_stats() - clear ce stats
368 * @hif_ce_state: ce state
369 *
370 * Return: none
371 */
372void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
373{
Mohit Khanna518eb502016-10-06 19:58:02 -0700374 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
Nirav Shahb70bd732016-05-25 14:31:51 +0530375}
376
377/**
Will Huangd6c3b872018-06-26 10:53:59 +0800378 * hif_tasklet_schedule() - schedule tasklet
379 * @hif_ctx: hif context
380 * @tasklet_entry: ce tasklet entry
381 *
382 * Return: false if tasklet already scheduled, otherwise true
383 */
384static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
385 struct ce_tasklet_entry *tasklet_entry)
386{
387 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
388
389 if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
390 HIF_DBG("tasklet scheduled, return");
391 qdf_atomic_dec(&scn->active_tasklet_cnt);
392 return false;
393 }
394
395 tasklet_schedule(&tasklet_entry->intr_tq);
396 return true;
397}
398
399/**
Houston Hoffman247f09b2016-04-06 21:21:40 -0700400 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
401 * @ce_id: ce_id
402 * @tasklet_entry: context
403 *
404 * Return: N/A
405 */
406irqreturn_t ce_dispatch_interrupt(int ce_id,
407 struct ce_tasklet_entry *tasklet_entry)
408{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800409 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530410 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Komal Seelam5584a7c2016-02-24 19:22:48 +0530411 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800412
413 if (tasklet_entry->ce_id != ce_id) {
414 HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
415 __func__, tasklet_entry->ce_id, ce_id);
416 return IRQ_NONE;
417 }
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800418 if (unlikely(ce_id >= CE_COUNT_MAX)) {
419 HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
420 __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
421 return IRQ_NONE;
422 }
Houston Hoffman648a9182017-05-21 23:27:50 -0700423
Houston Hoffman8f239f62016-03-14 21:12:05 -0700424 hif_irq_disable(scn, ce_id);
Govind Singhbc679dc2017-06-08 12:33:59 +0530425
426 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
427 return IRQ_HANDLED;
428
c_cgodavfda96ad2017-09-07 16:16:00 +0530429 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
430 NULL, NULL, 0, 0);
Nirav Shahb70bd732016-05-25 14:31:51 +0530431 hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700432
Dustin Brownccf859d2017-06-01 14:31:01 -0700433 if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
434 hif_ut_fw_resume(scn);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700435 hif_irq_enable(scn, ce_id);
436 return IRQ_HANDLED;
437 }
438
Dustin Brown6bdbda52016-09-27 15:52:30 -0700439 qdf_atomic_inc(&scn->active_tasklet_cnt);
440
Komal Seelam644263d2016-02-22 20:45:49 +0530441 if (hif_napi_enabled(hif_hdl, ce_id))
442 hif_napi_schedule(hif_hdl, ce_id);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800443 else
Will Huangd6c3b872018-06-26 10:53:59 +0800444 hif_tasklet_schedule(hif_hdl, tasklet_entry);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800445
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800446 return IRQ_HANDLED;
447}
448
449/**
450 * const char *ce_name
451 *
452 * @ce_name: ce_name
453 */
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700454const char *ce_name[] = {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800455 "WLAN_CE_0",
456 "WLAN_CE_1",
457 "WLAN_CE_2",
458 "WLAN_CE_3",
459 "WLAN_CE_4",
460 "WLAN_CE_5",
461 "WLAN_CE_6",
462 "WLAN_CE_7",
463 "WLAN_CE_8",
464 "WLAN_CE_9",
465 "WLAN_CE_10",
466 "WLAN_CE_11",
467};
468/**
469 * ce_unregister_irq() - ce_unregister_irq
470 * @hif_ce_state: hif_ce_state copy engine device handle
471 * @mask: which coppy engines to unregister for.
472 *
473 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
474 * unregister for copy engine x.
475 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530476 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800477 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530478QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800479{
480 int id;
Himanshu Agarwal2a924592016-06-30 18:04:14 +0530481 int ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800482 int ret;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700483 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800484
485 if (hif_ce_state == NULL) {
486 HIF_WARN("%s: hif_ce_state = NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530487 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800488 }
Himanshu Agarwal2a924592016-06-30 18:04:14 +0530489
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700490 scn = HIF_GET_SOFTC(hif_ce_state);
491 ce_count = scn->ce_count;
Orhan K AKYILDIZf006e932016-11-14 00:35:44 -0800492 /* we are removing interrupts, so better stop NAPI */
493 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
494 NAPI_EVT_INT_STATE, (void *)0);
495 if (ret != 0)
496 HIF_ERROR("%s: napi_event INT_STATE returned %d",
497 __func__, ret);
498 /* this is not fatal, continue */
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700499
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +0530500 /* filter mask to free only for ce's with irq registered */
501 mask &= hif_ce_state->ce_register_irq_done;
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700502 for (id = 0; id < ce_count; id++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800503 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700504 ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800505 &hif_ce_state->tasklets[id]);
506 if (ret < 0)
507 HIF_ERROR(
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700508 "%s: pld_unregister_irq error - ce_id = %d, ret = %d",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800509 __func__, id, ret);
510 }
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +0530511 ce_disable_polling(scn->ce_id_to_state[id]);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800512 }
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +0530513 hif_ce_state->ce_register_irq_done &= ~mask;
514
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530515 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800516}
517/**
518 * ce_register_irq() - ce_register_irq
519 * @hif_ce_state: hif_ce_state
520 * @mask: which coppy engines to unregister for.
521 *
522 * Registers copy engine irqs matching mask. If a 1 is set at bit x,
523 * Register for copy engine x.
524 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530525 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800526 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530527QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800528{
529 int id;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700530 int ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800531 int ret;
532 unsigned long irqflags = IRQF_TRIGGER_RISING;
533 uint32_t done_mask = 0;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700534 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
535
536 ce_count = scn->ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800537
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700538 for (id = 0; id < ce_count; id++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800539 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700540 ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
Houston Hoffman247f09b2016-04-06 21:21:40 -0700541 hif_snoc_interrupt_handler,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800542 irqflags, ce_name[id],
543 &hif_ce_state->tasklets[id]);
544 if (ret) {
545 HIF_ERROR(
546 "%s: cannot register CE %d irq handler, ret = %d",
547 __func__, id, ret);
548 ce_unregister_irq(hif_ce_state, done_mask);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530549 return QDF_STATUS_E_FAULT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800550 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700551 done_mask |= 1 << id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800552 }
553 }
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +0530554 hif_ce_state->ce_register_irq_done |= done_mask;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800555
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530556 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800557}