blob: 5c692cc6a03dfe0ff027c94c7a10497e6f121b62 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelamf8600682016-02-02 18:17:13 +05302 * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/if_arp.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053032#include "qdf_lock.h"
33#include "qdf_types.h"
34#include "qdf_status.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
36#include "hif.h"
37#include "hif_io32.h"
38#include "ce_main.h"
39#include "ce_api.h"
40#include "ce_reg.h"
41#include "ce_internal.h"
Houston Hoffman247f09b2016-04-06 21:21:40 -070042#include "ce_tasklet.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080043#ifdef CONFIG_CNSS
44#include <net/cnss.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080045#endif
Houston Hoffman4849fcc2016-05-05 15:42:35 -070046#include "platform_icnss.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080047#include "hif_debug.h"
48#include "hif_napi.h"
49
50
51/**
Komal Seelamf8600682016-02-02 18:17:13 +053052 * struct tasklet_work
53 *
54 * @id: ce_id
55 * @work: work
56 */
57struct tasklet_work {
58 enum ce_id_type id;
59 void *data;
60 struct work_struct work;
61};
62
63
64/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065 * reschedule_ce_tasklet_work_handler() - reschedule work
Komal Seelamf8600682016-02-02 18:17:13 +053066 * @work: struct work_struct
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080067 *
68 * Return: N/A
69 */
Komal Seelamf8600682016-02-02 18:17:13 +053070static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080071{
Komal Seelamf8600682016-02-02 18:17:13 +053072 struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
73 work);
Komal Seelam644263d2016-02-22 20:45:49 +053074 struct hif_softc *scn = ce_work->data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080075 struct HIF_CE_state *hif_ce_state;
76
77 if (NULL == scn) {
78 HIF_ERROR("%s: tasklet scn is null", __func__);
79 return;
80 }
Komal Seelam02cf2f82016-02-22 20:44:25 +053081
82 hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080083
84 if (scn->hif_init_done == false) {
85 HIF_ERROR("%s: wlan driver is unloaded", __func__);
86 return;
87 }
Komal Seelamf8600682016-02-02 18:17:13 +053088 tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080089 return;
90}
91
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080092static struct tasklet_work tasklet_workers[CE_ID_MAX];
93static bool work_initialized;
94
95/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080096 * init_tasklet_work() - init_tasklet_work
97 * @work: struct work_struct
98 * @work_handler: work_handler
99 *
100 * Return: N/A
101 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800102static void init_tasklet_work(struct work_struct *work,
Komal Seelamf8600682016-02-02 18:17:13 +0530103 work_func_t work_handler)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800104{
105 INIT_WORK(work, work_handler);
106}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800107
108/**
109 * init_tasklet_workers() - init_tasklet_workers
Komal Seelamf8600682016-02-02 18:17:13 +0530110 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800111 *
112 * Return: N/A
113 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530114void init_tasklet_workers(struct hif_opaque_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800115{
116 uint32_t id;
117
118 for (id = 0; id < CE_ID_MAX; id++) {
119 tasklet_workers[id].id = id;
Komal Seelamf8600682016-02-02 18:17:13 +0530120 tasklet_workers[id].data = scn;
121 init_tasklet_work(&tasklet_workers[id].work,
122 reschedule_ce_tasklet_work_handler);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800123 }
124 work_initialized = true;
125}
126
127#ifdef CONFIG_SLUB_DEBUG_ON
128/**
129 * ce_schedule_tasklet() - schedule ce tasklet
130 * @tasklet_entry: struct ce_tasklet_entry
131 *
132 * Return: N/A
133 */
134static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
135{
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800136 if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800137 schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
138 else
139 HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
140 __func__, work_initialized, tasklet_entry->ce_id);
141}
142#else
143/**
144 * ce_schedule_tasklet() - schedule ce tasklet
145 * @tasklet_entry: struct ce_tasklet_entry
146 *
147 * Return: N/A
148 */
149static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
150{
151 tasklet_schedule(&tasklet_entry->intr_tq);
152}
153#endif
154
155/**
156 * ce_tasklet() - ce_tasklet
157 * @data: data
158 *
159 * Return: N/A
160 */
161static void ce_tasklet(unsigned long data)
162{
163 struct ce_tasklet_entry *tasklet_entry =
164 (struct ce_tasklet_entry *)data;
165 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530166 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800167 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
168
Komal Seelambd7c51d2016-02-24 10:27:30 +0530169 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
170 HIF_CE_TASKLET_ENTRY, NULL, NULL, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800171
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530172 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800173 HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
174 __func__, tasklet_entry->ce_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530175 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800176 }
177
178 ce_per_engine_service(scn, tasklet_entry->ce_id);
179
Dhanashri Atre65b674f2015-10-30 15:12:03 -0700180 if (CE_state->lro_flush_cb != NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800181 CE_state->lro_flush_cb(CE_state->lro_data);
182 }
183
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700184 if (ce_check_rx_pending(CE_state)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800185 /*
186 * There are frames pending, schedule tasklet to process them.
187 * Enable the interrupt only when there is no pending frames in
188 * any of the Copy Engine pipes.
189 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530190 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
Houston Hoffman4275ba22015-12-06 21:02:11 -0800191 HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800192 ce_schedule_tasklet(tasklet_entry);
193 return;
194 }
195
Komal Seelam6ee55902016-04-11 17:11:07 +0530196 if (scn->target_status != TARGET_STATUS_RESET)
Houston Hoffman8f239f62016-03-14 21:12:05 -0700197 hif_irq_enable(scn, tasklet_entry->ce_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800198
Komal Seelambd7c51d2016-02-24 10:27:30 +0530199 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
200 NULL, NULL, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800201
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530202 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800203}
Komal Seelambd7c51d2016-02-24 10:27:30 +0530204
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800205/**
206 * ce_tasklet_init() - ce_tasklet_init
207 * @hif_ce_state: hif_ce_state
208 * @mask: mask
209 *
210 * Return: N/A
211 */
212void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
213{
214 int i;
215
216 for (i = 0; i < CE_COUNT_MAX; i++) {
217 if (mask & (1 << i)) {
218 hif_ce_state->tasklets[i].ce_id = i;
219 hif_ce_state->tasklets[i].inited = true;
220 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
221 tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
222 ce_tasklet,
223 (unsigned long)&hif_ce_state->tasklets[i]);
224 }
225 }
226}
227/**
228 * ce_tasklet_kill() - ce_tasklet_kill
229 * @hif_ce_state: hif_ce_state
230 *
231 * Return: N/A
232 */
Komal Seelam644263d2016-02-22 20:45:49 +0530233void ce_tasklet_kill(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800234{
235 int i;
Komal Seelam644263d2016-02-22 20:45:49 +0530236 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800237
238 for (i = 0; i < CE_COUNT_MAX; i++)
239 if (hif_ce_state->tasklets[i].inited) {
240 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
241 hif_ce_state->tasklets[i].inited = false;
242 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530243 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800244}
Houston Hoffman247f09b2016-04-06 21:21:40 -0700245
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800246/**
Houston Hoffman247f09b2016-04-06 21:21:40 -0700247 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
248 * @irq: irq coming from kernel
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800249 * @context: context
250 *
251 * Return: N/A
252 */
Houston Hoffman247f09b2016-04-06 21:21:40 -0700253static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800254{
255 struct ce_tasklet_entry *tasklet_entry = context;
Houston Hoffman247f09b2016-04-06 21:21:40 -0700256 return ce_dispatch_interrupt(icnss_get_ce_id(irq), tasklet_entry);
257}
258
259/**
260 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
261 * @ce_id: ce_id
262 * @tasklet_entry: context
263 *
264 * Return: N/A
265 */
266irqreturn_t ce_dispatch_interrupt(int ce_id,
267 struct ce_tasklet_entry *tasklet_entry)
268{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800269 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530270 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Komal Seelam5584a7c2016-02-24 19:22:48 +0530271 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800272
273 if (tasklet_entry->ce_id != ce_id) {
274 HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
275 __func__, tasklet_entry->ce_id, ce_id);
276 return IRQ_NONE;
277 }
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800278 if (unlikely(ce_id >= CE_COUNT_MAX)) {
279 HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
280 __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
281 return IRQ_NONE;
282 }
Houston Hoffman8f239f62016-03-14 21:12:05 -0700283 hif_irq_disable(scn, ce_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530284 qdf_atomic_inc(&scn->active_tasklet_cnt);
Komal Seelambd7c51d2016-02-24 10:27:30 +0530285 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
Komal Seelam644263d2016-02-22 20:45:49 +0530286 if (hif_napi_enabled(hif_hdl, ce_id))
287 hif_napi_schedule(hif_hdl, ce_id);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800288 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800289 tasklet_schedule(&tasklet_entry->intr_tq);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800290
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800291 return IRQ_HANDLED;
292}
293
294/**
295 * const char *ce_name
296 *
297 * @ce_name: ce_name
298 */
299const char *ce_name[ICNSS_MAX_IRQ_REGISTRATIONS] = {
300 "WLAN_CE_0",
301 "WLAN_CE_1",
302 "WLAN_CE_2",
303 "WLAN_CE_3",
304 "WLAN_CE_4",
305 "WLAN_CE_5",
306 "WLAN_CE_6",
307 "WLAN_CE_7",
308 "WLAN_CE_8",
309 "WLAN_CE_9",
310 "WLAN_CE_10",
311 "WLAN_CE_11",
312};
313/**
314 * ce_unregister_irq() - ce_unregister_irq
315 * @hif_ce_state: hif_ce_state copy engine device handle
316 * @mask: which coppy engines to unregister for.
317 *
318 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
319 * unregister for copy engine x.
320 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530321 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800322 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530323QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800324{
325 int id;
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700326 int ce_count = HIF_GET_SOFTC(hif_ce_state)->ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800327 int ret;
328
329 if (hif_ce_state == NULL) {
330 HIF_WARN("%s: hif_ce_state = NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530331 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800332 }
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700333 for (id = 0; id < ce_count; id++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800334 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
335 ret = icnss_ce_free_irq(id,
336 &hif_ce_state->tasklets[id]);
337 if (ret < 0)
338 HIF_ERROR(
339 "%s: icnss_unregister_irq error - ce_id = %d, ret = %d",
340 __func__, id, ret);
341 }
342 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530343 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800344}
345/**
346 * ce_register_irq() - ce_register_irq
347 * @hif_ce_state: hif_ce_state
348 * @mask: which coppy engines to unregister for.
349 *
350 * Registers copy engine irqs matching mask. If a 1 is set at bit x,
351 * Register for copy engine x.
352 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530353 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800354 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530355QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800356{
357 int id;
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700358 int ce_count = HIF_GET_SOFTC(hif_ce_state)->ce_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800359 int ret;
360 unsigned long irqflags = IRQF_TRIGGER_RISING;
361 uint32_t done_mask = 0;
362
Houston Hoffmand6f946c2016-04-06 15:16:00 -0700363 for (id = 0; id < ce_count; id++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800364 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
Houston Hoffman247f09b2016-04-06 21:21:40 -0700365 ret = icnss_ce_request_irq(id,
366 hif_snoc_interrupt_handler,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800367 irqflags, ce_name[id],
368 &hif_ce_state->tasklets[id]);
369 if (ret) {
370 HIF_ERROR(
371 "%s: cannot register CE %d irq handler, ret = %d",
372 __func__, id, ret);
373 ce_unregister_irq(hif_ce_state, done_mask);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530374 return QDF_STATUS_E_FAULT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800375 } else {
376 done_mask |= 1 << id;
377 }
378 }
379 }
380
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530381 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800382}