blob: ef13e4e2d0e14836c0390af1aa6ad4c5fbfe8880 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelamf8600682016-02-02 18:17:13 +05302 * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/if_arp.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053032#include "qdf_lock.h"
33#include "qdf_types.h"
34#include "qdf_status.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
36#include "hif.h"
37#include "hif_io32.h"
38#include "ce_main.h"
39#include "ce_api.h"
40#include "ce_reg.h"
41#include "ce_internal.h"
Houston Hoffman247f09b2016-04-06 21:21:40 -070042#include "ce_tasklet.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070043#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044#include "hif_debug.h"
45#include "hif_napi.h"
46
47
48/**
Komal Seelamf8600682016-02-02 18:17:13 +053049 * struct tasklet_work
50 *
51 * @id: ce_id
52 * @work: work
53 */
54struct tasklet_work {
55 enum ce_id_type id;
56 void *data;
57 struct work_struct work;
58};
59
60
61/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062 * reschedule_ce_tasklet_work_handler() - reschedule work
Komal Seelamf8600682016-02-02 18:17:13 +053063 * @work: struct work_struct
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080064 *
65 * Return: N/A
66 */
Komal Seelamf8600682016-02-02 18:17:13 +053067static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080068{
Komal Seelamf8600682016-02-02 18:17:13 +053069 struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
70 work);
Komal Seelam644263d2016-02-22 20:45:49 +053071 struct hif_softc *scn = ce_work->data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080072 struct HIF_CE_state *hif_ce_state;
73
74 if (NULL == scn) {
75 HIF_ERROR("%s: tasklet scn is null", __func__);
76 return;
77 }
Komal Seelam02cf2f82016-02-22 20:44:25 +053078
79 hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080080
81 if (scn->hif_init_done == false) {
82 HIF_ERROR("%s: wlan driver is unloaded", __func__);
83 return;
84 }
Komal Seelamf8600682016-02-02 18:17:13 +053085 tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080086 return;
87}
88
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080089static struct tasklet_work tasklet_workers[CE_ID_MAX];
90static bool work_initialized;
91
92/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093 * init_tasklet_work() - init_tasklet_work
94 * @work: struct work_struct
95 * @work_handler: work_handler
96 *
97 * Return: N/A
98 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080099static void init_tasklet_work(struct work_struct *work,
Komal Seelamf8600682016-02-02 18:17:13 +0530100 work_func_t work_handler)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800101{
102 INIT_WORK(work, work_handler);
103}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800104
105/**
106 * init_tasklet_workers() - init_tasklet_workers
Komal Seelamf8600682016-02-02 18:17:13 +0530107 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800108 *
109 * Return: N/A
110 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530111void init_tasklet_workers(struct hif_opaque_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800112{
113 uint32_t id;
114
115 for (id = 0; id < CE_ID_MAX; id++) {
116 tasklet_workers[id].id = id;
Komal Seelamf8600682016-02-02 18:17:13 +0530117 tasklet_workers[id].data = scn;
118 init_tasklet_work(&tasklet_workers[id].work,
119 reschedule_ce_tasklet_work_handler);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800120 }
121 work_initialized = true;
122}
123
Venkateswara Swamy Bandaru814094e2016-11-11 15:24:27 +0530124#ifdef HIF_CONFIG_SLUB_DEBUG_ON
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800125/**
126 * ce_schedule_tasklet() - schedule ce tasklet
127 * @tasklet_entry: struct ce_tasklet_entry
128 *
129 * Return: N/A
130 */
131static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
132{
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800133 if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800134 schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
135 else
136 HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
137 __func__, work_initialized, tasklet_entry->ce_id);
138}
139#else
140/**
141 * ce_schedule_tasklet() - schedule ce tasklet
142 * @tasklet_entry: struct ce_tasklet_entry
143 *
144 * Return: N/A
145 */
146static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
147{
148 tasklet_schedule(&tasklet_entry->intr_tq);
149}
150#endif
151
152/**
153 * ce_tasklet() - ce_tasklet
154 * @data: data
155 *
156 * Return: N/A
157 */
158static void ce_tasklet(unsigned long data)
159{
160 struct ce_tasklet_entry *tasklet_entry =
161 (struct ce_tasklet_entry *)data;
162 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530163 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800164 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
165
Komal Seelambd7c51d2016-02-24 10:27:30 +0530166 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
167 HIF_CE_TASKLET_ENTRY, NULL, NULL, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800168
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530169 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800170 HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
171 __func__, tasklet_entry->ce_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530172 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800173 }
174
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700175 qdf_spin_lock_bh(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800176 ce_per_engine_service(scn, tasklet_entry->ce_id);
177
Dhanashri Atre65b674f2015-10-30 15:12:03 -0700178 if (CE_state->lro_flush_cb != NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800179 CE_state->lro_flush_cb(CE_state->lro_data);
180 }
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700181 qdf_spin_unlock_bh(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800182
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700183 if (ce_check_rx_pending(CE_state)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800184 /*
185 * There are frames pending, schedule tasklet to process them.
186 * Enable the interrupt only when there is no pending frames in
187 * any of the Copy Engine pipes.
188 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530189 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
Houston Hoffman4275ba22015-12-06 21:02:11 -0800190 HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800191 ce_schedule_tasklet(tasklet_entry);
192 return;
193 }
194
Komal Seelam6ee55902016-04-11 17:11:07 +0530195 if (scn->target_status != TARGET_STATUS_RESET)
Houston Hoffman8f239f62016-03-14 21:12:05 -0700196 hif_irq_enable(scn, tasklet_entry->ce_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800197
Komal Seelambd7c51d2016-02-24 10:27:30 +0530198 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
199 NULL, NULL, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800200
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530201 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800202}
Komal Seelambd7c51d2016-02-24 10:27:30 +0530203
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800204/**
205 * ce_tasklet_init() - ce_tasklet_init
206 * @hif_ce_state: hif_ce_state
207 * @mask: mask
208 *
209 * Return: N/A
210 */
211void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
212{
213 int i;
214
215 for (i = 0; i < CE_COUNT_MAX; i++) {
216 if (mask & (1 << i)) {
217 hif_ce_state->tasklets[i].ce_id = i;
218 hif_ce_state->tasklets[i].inited = true;
219 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
220 tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
221 ce_tasklet,
222 (unsigned long)&hif_ce_state->tasklets[i]);
223 }
224 }
225}
226/**
227 * ce_tasklet_kill() - ce_tasklet_kill
228 * @hif_ce_state: hif_ce_state
229 *
230 * Return: N/A
231 */
Komal Seelam644263d2016-02-22 20:45:49 +0530232void ce_tasklet_kill(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800233{
234 int i;
Komal Seelam644263d2016-02-22 20:45:49 +0530235 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800236
237 for (i = 0; i < CE_COUNT_MAX; i++)
238 if (hif_ce_state->tasklets[i].inited) {
239 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
240 hif_ce_state->tasklets[i].inited = false;
241 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530242 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800243}
Houston Hoffman247f09b2016-04-06 21:21:40 -0700244
Houston Hoffman6f60b102016-08-08 21:08:04 -0700245#define HIF_CE_DRAIN_WAIT_CNT 20
246/**
247 * hif_drain_tasklets(): wait untill no tasklet is pending
248 * @scn: hif context
249 *
250 * Let running tasklets clear pending trafic.
251 *
252 * Return: 0 if no bottom half is in progress when it returns.
253 * -EFAULT if it times out.
254 */
255int hif_drain_tasklets(struct hif_softc *scn)
256{
257 uint32_t ce_drain_wait_cnt = 0;
Dustin Brown6bdbda52016-09-27 15:52:30 -0700258 int32_t tasklet_cnt;
Houston Hoffman6f60b102016-08-08 21:08:04 -0700259
Dustin Brown6bdbda52016-09-27 15:52:30 -0700260 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
Houston Hoffman6f60b102016-08-08 21:08:04 -0700261 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
Dustin Brown6bdbda52016-09-27 15:52:30 -0700262 HIF_ERROR("%s: CE still not done with access: %d",
263 __func__, tasklet_cnt);
Houston Hoffman6f60b102016-08-08 21:08:04 -0700264
265 return -EFAULT;
266 }
267 HIF_INFO("%s: Waiting for CE to finish access", __func__);
268 msleep(10);
269 }
270 return 0;
271}
272
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700273#ifdef WLAN_SUSPEND_RESUME_TEST
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700274/**
Dustin Brown6bdbda52016-09-27 15:52:30 -0700275 * hif_fake_apps_resume_work() - Work handler for fake apps resume callback
276 * @work: The work struct being passed from the linux kernel
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700277 *
278 * Return: none
279 */
Dustin Brown6bdbda52016-09-27 15:52:30 -0700280void hif_fake_apps_resume_work(struct work_struct *work)
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700281{
Dustin Brown6bdbda52016-09-27 15:52:30 -0700282 struct fake_apps_context *ctx =
283 container_of(work, struct fake_apps_context, resume_work);
284
285 QDF_BUG(ctx->resume_callback);
286 ctx->resume_callback(0);
287 ctx->resume_callback = NULL;
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700288}
289
290/**
Dustin Brown6bdbda52016-09-27 15:52:30 -0700291 * hif_fake_apps_suspend(): Setup unit-test related suspend state. Call after
292 * a normal WoW suspend has been completed.
293 * @hif_ctx: The HIF context to operate on
294 * @callback: The function to call when fake apps resume is triggered
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700295 *
Dustin Brown6bdbda52016-09-27 15:52:30 -0700296 * Set the fake suspend flag such that hif knows that it will need
297 * to fake the apps resume process using hdd_trigger_fake_apps_resume
298 *
299 * Return: none
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700300 */
Dustin Brown6bdbda52016-09-27 15:52:30 -0700301void hif_fake_apps_suspend(struct hif_opaque_softc *hif_ctx,
302 hif_fake_resume_callback callback)
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700303{
Dustin Brown6bdbda52016-09-27 15:52:30 -0700304 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
305
306 scn->fake_apps_ctx.resume_callback = callback;
307 set_bit(HIF_FA_SUSPENDED_BIT, &scn->fake_apps_ctx.state);
308}
309
310/**
311 * hif_fake_apps_resume(): Cleanup unit-test related suspend state. Call before
312 * doing a normal WoW resume if suspend was initiated via fake apps
313 * suspend.
314 * @hif_ctx: The HIF context to operate on
315 *
316 * Return: none
317 */
318void hif_fake_apps_resume(struct hif_opaque_softc *hif_ctx)
319{
320 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
321
322 clear_bit(HIF_FA_SUSPENDED_BIT, &scn->fake_apps_ctx.state);
323 scn->fake_apps_ctx.resume_callback = NULL;
324}
325
326/**
327 * hif_interrupt_is_fake_apps_resume(): Determines if the raised irq should
328 * trigger a fake apps resume.
329 * @hif_ctx: The HIF context to operate on
330 * @ce_id: The copy engine Id from the originating interrupt
331 *
332 * Return: true if the raised irq should trigger a fake apps resume
333 */
334static bool hif_interrupt_is_fake_apps_resume(struct hif_opaque_softc *hif_ctx,
335 int ce_id)
336{
337 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Dustin Brown973abe82016-08-30 12:29:10 -0700338 uint8_t ul_pipe, dl_pipe;
339 int ul_is_polled, dl_is_polled;
340 QDF_STATUS status;
341
Dustin Brown6bdbda52016-09-27 15:52:30 -0700342 if (!test_bit(HIF_FA_SUSPENDED_BIT, &scn->fake_apps_ctx.state))
Dustin Brown973abe82016-08-30 12:29:10 -0700343 return false;
344
Dustin Brown6bdbda52016-09-27 15:52:30 -0700345 /* ensure passed ce_id matches wake irq */
346 /* dl_pipe will be populated with the wake irq number */
347 status = hif_map_service_to_pipe(hif_ctx, HTC_CTRL_RSVD_SVC,
Dustin Brown973abe82016-08-30 12:29:10 -0700348 &ul_pipe, &dl_pipe,
349 &ul_is_polled, &dl_is_polled);
350
351 if (status) {
352 HIF_ERROR("%s: pipe_mapping failure", __func__);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700353 return false;
354 }
Dustin Brown973abe82016-08-30 12:29:10 -0700355
Dustin Brown6bdbda52016-09-27 15:52:30 -0700356 return ce_id == dl_pipe;
357}
Dustin Brown973abe82016-08-30 12:29:10 -0700358
Dustin Brown6bdbda52016-09-27 15:52:30 -0700359/**
360 * hif_trigger_fake_apps_resume(): Trigger a fake apps resume by scheduling the
361 * previously registered callback for execution
362 * @hif_ctx: The HIF context to operate on
363 *
364 * Return: None
365 */
366static void hif_trigger_fake_apps_resume(struct hif_opaque_softc *hif_ctx)
367{
368 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
369
370 if (!test_and_clear_bit(HIF_FA_SUSPENDED_BIT,
371 &scn->fake_apps_ctx.state))
372 return;
373
374 schedule_work(&scn->fake_apps_ctx.resume_work);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700375}
376
377#else
378
Dustin Brown6bdbda52016-09-27 15:52:30 -0700379static inline bool
380hif_interrupt_is_fake_apps_resume(struct hif_opaque_softc *hif_ctx, int ce_id)
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700381{
382 return false;
383}
Dustin Brown6bdbda52016-09-27 15:52:30 -0700384
385static inline void
386hif_trigger_fake_apps_resume(struct hif_opaque_softc *hif_ctx)
387{
388}
389
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700390#endif /* End of WLAN_SUSPEND_RESUME_TEST */
391
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800392/**
Houston Hoffman247f09b2016-04-06 21:21:40 -0700393 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
394 * @irq: irq coming from kernel
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800395 * @context: context
396 *
397 * Return: N/A
398 */
Houston Hoffman247f09b2016-04-06 21:21:40 -0700399static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800400{
401 struct ce_tasklet_entry *tasklet_entry = context;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700402 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
403
404 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
405 tasklet_entry);
Houston Hoffman247f09b2016-04-06 21:21:40 -0700406}
407
408/**
Nirav Shahb70bd732016-05-25 14:31:51 +0530409 * hif_ce_increment_interrupt_count() - update ce stats
410 * @hif_ce_state: ce state
411 * @ce_id: ce id
412 *
413 * Return: none
414 */
415static inline void
416hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
417{
418 int cpu_id = qdf_get_cpu();
419
420 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
421}
422
423/**
424 * hif_display_ce_stats() - display ce stats
425 * @hif_ce_state: ce state
426 *
427 * Return: none
428 */
429void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
430{
431#define STR_SIZE 128
432 uint8_t i, j, pos;
433 char str_buffer[STR_SIZE];
434 int size, ret;
435
436 qdf_print("CE interrupt statistics:");
437 for (i = 0; i < CE_COUNT_MAX; i++) {
438 size = STR_SIZE;
439 pos = 0;
440 qdf_print("CE id: %d", i);
441 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
442 ret = snprintf(str_buffer + pos, size, "[%d]: %d",
443 j, hif_ce_state->stats.ce_per_cpu[i][j]);
444 if (ret <= 0 || ret >= size)
445 break;
446 size -= ret;
447 pos += ret;
448 }
449 qdf_print("%s", str_buffer);
450 }
451#undef STR_SIZE
452}
453
454/**
455 * hif_clear_ce_stats() - clear ce stats
456 * @hif_ce_state: ce state
457 *
458 * Return: none
459 */
460void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
461{
462 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_intr_stats));
463}
464
465/**
Houston Hoffman247f09b2016-04-06 21:21:40 -0700466 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
467 * @ce_id: ce_id
468 * @tasklet_entry: context
469 *
470 * Return: N/A
471 */
472irqreturn_t ce_dispatch_interrupt(int ce_id,
473 struct ce_tasklet_entry *tasklet_entry)
474{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800475 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530476 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Komal Seelam5584a7c2016-02-24 19:22:48 +0530477 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800478
479 if (tasklet_entry->ce_id != ce_id) {
480 HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
481 __func__, tasklet_entry->ce_id, ce_id);
482 return IRQ_NONE;
483 }
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800484 if (unlikely(ce_id >= CE_COUNT_MAX)) {
485 HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
486 __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
487 return IRQ_NONE;
488 }
Houston Hoffman8f239f62016-03-14 21:12:05 -0700489 hif_irq_disable(scn, ce_id);
Komal Seelambd7c51d2016-02-24 10:27:30 +0530490 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
Nirav Shahb70bd732016-05-25 14:31:51 +0530491 hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700492
Dustin Brown6bdbda52016-09-27 15:52:30 -0700493 if (unlikely(hif_interrupt_is_fake_apps_resume(hif_hdl, ce_id))) {
494 hif_trigger_fake_apps_resume(hif_hdl);
Rajeev Kumar5bac30f2016-07-14 17:31:29 -0700495 hif_irq_enable(scn, ce_id);
496 return IRQ_HANDLED;
497 }
498
Dustin Brown6bdbda52016-09-27 15:52:30 -0700499 qdf_atomic_inc(&scn->active_tasklet_cnt);
500
Komal Seelam644263d2016-02-22 20:45:49 +0530501 if (hif_napi_enabled(hif_hdl, ce_id))
502 hif_napi_schedule(hif_hdl, ce_id);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800503 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800504 tasklet_schedule(&tasklet_entry->intr_tq);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800505
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800506 return IRQ_HANDLED;
507}
508
509/**
510 * const char *ce_name
511 *
512 * @ce_name: ce_name
513 */
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700514const char *ce_name[] = {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800515 "WLAN_CE_0",
516 "WLAN_CE_1",
517 "WLAN_CE_2",
518 "WLAN_CE_3",
519 "WLAN_CE_4",
520 "WLAN_CE_5",
521 "WLAN_CE_6",
522 "WLAN_CE_7",
523 "WLAN_CE_8",
524 "WLAN_CE_9",
525 "WLAN_CE_10",
526 "WLAN_CE_11",
527};
528/**
529 * ce_unregister_irq() - ce_unregister_irq
530 * @hif_ce_state: hif_ce_state copy engine device handle
531 * @mask: which coppy engines to unregister for.
532 *
533 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
534 * unregister for copy engine x.
535 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530536 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800537 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530538QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800539{
540 int id;
Himanshu Agarwal2a924592016-06-30 18:04:14 +0530541 int ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800542 int ret;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700543 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800544
545 if (hif_ce_state == NULL) {
546 HIF_WARN("%s: hif_ce_state = NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530547 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800548 }
Himanshu Agarwal2a924592016-06-30 18:04:14 +0530549
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700550 scn = HIF_GET_SOFTC(hif_ce_state);
551 ce_count = scn->ce_count;
Orhan K AKYILDIZf006e932016-11-14 00:35:44 -0800552 /* we are removing interrupts, so better stop NAPI */
553 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
554 NAPI_EVT_INT_STATE, (void *)0);
555 if (ret != 0)
556 HIF_ERROR("%s: napi_event INT_STATE returned %d",
557 __func__, ret);
558 /* this is not fatal, continue */
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700559
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +0530560 /* filter mask to free only for ce's with irq registered */
561 mask &= hif_ce_state->ce_register_irq_done;
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700562 for (id = 0; id < ce_count; id++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800563 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700564 ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800565 &hif_ce_state->tasklets[id]);
566 if (ret < 0)
567 HIF_ERROR(
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700568 "%s: pld_unregister_irq error - ce_id = %d, ret = %d",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800569 __func__, id, ret);
570 }
571 }
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +0530572 hif_ce_state->ce_register_irq_done &= ~mask;
573
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530574 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800575}
576/**
577 * ce_register_irq() - ce_register_irq
578 * @hif_ce_state: hif_ce_state
579 * @mask: which coppy engines to unregister for.
580 *
581 * Registers copy engine irqs matching mask. If a 1 is set at bit x,
582 * Register for copy engine x.
583 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530584 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800585 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530586QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800587{
588 int id;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700589 int ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800590 int ret;
591 unsigned long irqflags = IRQF_TRIGGER_RISING;
592 uint32_t done_mask = 0;
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700593 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
594
595 ce_count = scn->ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800596
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700597 for (id = 0; id < ce_count; id++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800598 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700599 ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
Houston Hoffman247f09b2016-04-06 21:21:40 -0700600 hif_snoc_interrupt_handler,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800601 irqflags, ce_name[id],
602 &hif_ce_state->tasklets[id]);
603 if (ret) {
604 HIF_ERROR(
605 "%s: cannot register CE %d irq handler, ret = %d",
606 __func__, id, ret);
607 ce_unregister_irq(hif_ce_state, done_mask);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530608 return QDF_STATUS_E_FAULT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800609 } else {
610 done_mask |= 1 << id;
611 }
612 }
613 }
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +0530614 hif_ce_state->ce_register_irq_done |= done_mask;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800615
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530616 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800617}