blob: df87f4bc09f06c1ee75aa099a485e5704aa5e8a4 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelamf8600682016-02-02 18:17:13 +05302 * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/if_arp.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053032#include "qdf_lock.h"
33#include "qdf_types.h"
34#include "qdf_status.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
36#include "hif.h"
37#include "hif_io32.h"
38#include "ce_main.h"
39#include "ce_api.h"
40#include "ce_reg.h"
41#include "ce_internal.h"
42#ifdef CONFIG_CNSS
43#include <net/cnss.h>
Houston Hoffmanbc693492016-03-14 21:11:41 -070044#include "platform_icnss.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080045#endif
46#include "hif_debug.h"
47#include "hif_napi.h"
48
49
50/**
51 * ce_irq_status() - read CE IRQ status
Komal Seelam644263d2016-02-22 20:45:49 +053052 * @scn: struct hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080053 * @ce_id: ce_id
54 * @host_status: host_status
55 *
56 * Return: IRQ status
57 */
Komal Seelam644263d2016-02-22 20:45:49 +053058static inline void ce_irq_status(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080059 int ce_id, uint32_t *host_status)
60{
61 uint32_t offset = HOST_IS_ADDRESS + CE_BASE_ADDRESS(ce_id);
62
63 *host_status = hif_read32_mb(scn->mem + offset);
64}
65
66/**
Komal Seelamf8600682016-02-02 18:17:13 +053067 * struct tasklet_work
68 *
69 * @id: ce_id
70 * @work: work
71 */
72struct tasklet_work {
73 enum ce_id_type id;
74 void *data;
75 struct work_struct work;
76};
77
78
79/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080080 * reschedule_ce_tasklet_work_handler() - reschedule work
Komal Seelamf8600682016-02-02 18:17:13 +053081 * @work: struct work_struct
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080082 *
83 * Return: N/A
84 */
Komal Seelamf8600682016-02-02 18:17:13 +053085static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080086{
Komal Seelamf8600682016-02-02 18:17:13 +053087 struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
88 work);
Komal Seelam644263d2016-02-22 20:45:49 +053089 struct hif_softc *scn = ce_work->data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090 struct HIF_CE_state *hif_ce_state;
91
92 if (NULL == scn) {
93 HIF_ERROR("%s: tasklet scn is null", __func__);
94 return;
95 }
Komal Seelam02cf2f82016-02-22 20:44:25 +053096
97 hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080098
99 if (scn->hif_init_done == false) {
100 HIF_ERROR("%s: wlan driver is unloaded", __func__);
101 return;
102 }
Komal Seelamf8600682016-02-02 18:17:13 +0530103 tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800104 return;
105}
106
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800107static struct tasklet_work tasklet_workers[CE_ID_MAX];
108static bool work_initialized;
109
110/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800111 * init_tasklet_work() - init_tasklet_work
112 * @work: struct work_struct
113 * @work_handler: work_handler
114 *
115 * Return: N/A
116 */
117#ifdef CONFIG_CNSS
118static void init_tasklet_work(struct work_struct *work,
Komal Seelamf8600682016-02-02 18:17:13 +0530119 work_func_t work_handler)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800120{
121 cnss_init_work(work, work_handler);
122}
123#else
124static void init_tasklet_work(struct work_struct *work,
Komal Seelamf8600682016-02-02 18:17:13 +0530125 work_func_t work_handler)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800126{
127 INIT_WORK(work, work_handler);
128}
129#endif
130
131/**
132 * init_tasklet_workers() - init_tasklet_workers
Komal Seelamf8600682016-02-02 18:17:13 +0530133 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800134 *
135 * Return: N/A
136 */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530137void init_tasklet_workers(struct hif_opaque_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800138{
139 uint32_t id;
140
141 for (id = 0; id < CE_ID_MAX; id++) {
142 tasklet_workers[id].id = id;
Komal Seelamf8600682016-02-02 18:17:13 +0530143 tasklet_workers[id].data = scn;
144 init_tasklet_work(&tasklet_workers[id].work,
145 reschedule_ce_tasklet_work_handler);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800146 }
147 work_initialized = true;
148}
149
150#ifdef CONFIG_SLUB_DEBUG_ON
151/**
152 * ce_schedule_tasklet() - schedule ce tasklet
153 * @tasklet_entry: struct ce_tasklet_entry
154 *
155 * Return: N/A
156 */
157static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
158{
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800159 if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800160 schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
161 else
162 HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
163 __func__, work_initialized, tasklet_entry->ce_id);
164}
165#else
166/**
167 * ce_schedule_tasklet() - schedule ce tasklet
168 * @tasklet_entry: struct ce_tasklet_entry
169 *
170 * Return: N/A
171 */
172static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
173{
174 tasklet_schedule(&tasklet_entry->intr_tq);
175}
176#endif
177
178/**
179 * ce_tasklet() - ce_tasklet
180 * @data: data
181 *
182 * Return: N/A
183 */
184static void ce_tasklet(unsigned long data)
185{
186 struct ce_tasklet_entry *tasklet_entry =
187 (struct ce_tasklet_entry *)data;
188 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530189 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800190 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
191
Komal Seelambd7c51d2016-02-24 10:27:30 +0530192 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
193 HIF_CE_TASKLET_ENTRY, NULL, NULL, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800194
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530195 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800196 HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
197 __func__, tasklet_entry->ce_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530198 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800199 }
200
201 ce_per_engine_service(scn, tasklet_entry->ce_id);
202
Dhanashri Atre65b674f2015-10-30 15:12:03 -0700203 if (CE_state->lro_flush_cb != NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800204 CE_state->lro_flush_cb(CE_state->lro_data);
205 }
206
207 if (ce_check_rx_pending(scn, tasklet_entry->ce_id)) {
208 /*
209 * There are frames pending, schedule tasklet to process them.
210 * Enable the interrupt only when there is no pending frames in
211 * any of the Copy Engine pipes.
212 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530213 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
Houston Hoffman4275ba22015-12-06 21:02:11 -0800214 HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800215 ce_schedule_tasklet(tasklet_entry);
216 return;
217 }
218
219 if (scn->target_status != OL_TRGET_STATUS_RESET)
Houston Hoffman8f239f62016-03-14 21:12:05 -0700220 hif_irq_enable(scn, tasklet_entry->ce_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800221
Komal Seelambd7c51d2016-02-24 10:27:30 +0530222 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
223 NULL, NULL, 0);
Houston Hoffman4275ba22015-12-06 21:02:11 -0800224
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530225 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800226}
Komal Seelambd7c51d2016-02-24 10:27:30 +0530227
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800228/**
229 * ce_tasklet_init() - ce_tasklet_init
230 * @hif_ce_state: hif_ce_state
231 * @mask: mask
232 *
233 * Return: N/A
234 */
235void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
236{
237 int i;
238
239 for (i = 0; i < CE_COUNT_MAX; i++) {
240 if (mask & (1 << i)) {
241 hif_ce_state->tasklets[i].ce_id = i;
242 hif_ce_state->tasklets[i].inited = true;
243 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
244 tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
245 ce_tasklet,
246 (unsigned long)&hif_ce_state->tasklets[i]);
247 }
248 }
249}
250/**
251 * ce_tasklet_kill() - ce_tasklet_kill
252 * @hif_ce_state: hif_ce_state
253 *
254 * Return: N/A
255 */
Komal Seelam644263d2016-02-22 20:45:49 +0530256void ce_tasklet_kill(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800257{
258 int i;
Komal Seelam644263d2016-02-22 20:45:49 +0530259 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800260
261 for (i = 0; i < CE_COUNT_MAX; i++)
262 if (hif_ce_state->tasklets[i].inited) {
263 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
264 hif_ce_state->tasklets[i].inited = false;
265 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530266 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800267}
268/**
269 * ce_irq_handler() - ce_irq_handler
270 * @ce_id: ce_id
271 * @context: context
272 *
273 * Return: N/A
274 */
275static irqreturn_t ce_irq_handler(int irq, void *context)
276{
277 struct ce_tasklet_entry *tasklet_entry = context;
278 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530279 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
Komal Seelam5584a7c2016-02-24 19:22:48 +0530280 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800281 uint32_t host_status;
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800282 int ce_id = icnss_get_ce_id(irq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800283
284 if (tasklet_entry->ce_id != ce_id) {
285 HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
286 __func__, tasklet_entry->ce_id, ce_id);
287 return IRQ_NONE;
288 }
Orhan K AKYILDIZ06158912015-11-11 18:01:15 -0800289 if (unlikely(ce_id >= CE_COUNT_MAX)) {
290 HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
291 __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
292 return IRQ_NONE;
293 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800294#ifndef HIF_PCI
295 disable_irq_nosync(irq);
296#endif
Houston Hoffman8f239f62016-03-14 21:12:05 -0700297 hif_irq_disable(scn, ce_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800298 ce_irq_status(scn, ce_id, &host_status);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530299 qdf_atomic_inc(&scn->active_tasklet_cnt);
Komal Seelambd7c51d2016-02-24 10:27:30 +0530300 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
Komal Seelam644263d2016-02-22 20:45:49 +0530301 if (hif_napi_enabled(hif_hdl, ce_id))
302 hif_napi_schedule(hif_hdl, ce_id);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800303 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800304 tasklet_schedule(&tasklet_entry->intr_tq);
Houston Hoffmane9afdc12015-11-18 19:41:52 -0800305
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800306 return IRQ_HANDLED;
307}
308
309/**
310 * const char *ce_name
311 *
312 * @ce_name: ce_name
313 */
314const char *ce_name[ICNSS_MAX_IRQ_REGISTRATIONS] = {
315 "WLAN_CE_0",
316 "WLAN_CE_1",
317 "WLAN_CE_2",
318 "WLAN_CE_3",
319 "WLAN_CE_4",
320 "WLAN_CE_5",
321 "WLAN_CE_6",
322 "WLAN_CE_7",
323 "WLAN_CE_8",
324 "WLAN_CE_9",
325 "WLAN_CE_10",
326 "WLAN_CE_11",
327};
328/**
329 * ce_unregister_irq() - ce_unregister_irq
330 * @hif_ce_state: hif_ce_state copy engine device handle
331 * @mask: which coppy engines to unregister for.
332 *
333 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
334 * unregister for copy engine x.
335 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530336 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530338QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800339{
340 int id;
341 int ret;
342
343 if (hif_ce_state == NULL) {
344 HIF_WARN("%s: hif_ce_state = NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530345 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800346 }
347 for (id = 0; id < CE_COUNT_MAX; id++) {
348 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
349 ret = icnss_ce_free_irq(id,
350 &hif_ce_state->tasklets[id]);
351 if (ret < 0)
352 HIF_ERROR(
353 "%s: icnss_unregister_irq error - ce_id = %d, ret = %d",
354 __func__, id, ret);
355 }
356 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530357 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800358}
359/**
360 * ce_register_irq() - ce_register_irq
361 * @hif_ce_state: hif_ce_state
362 * @mask: which coppy engines to unregister for.
363 *
364 * Registers copy engine irqs matching mask. If a 1 is set at bit x,
365 * Register for copy engine x.
366 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530367 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800368 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530369QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800370{
371 int id;
372 int ret;
373 unsigned long irqflags = IRQF_TRIGGER_RISING;
374 uint32_t done_mask = 0;
375
376 for (id = 0; id < CE_COUNT_MAX; id++) {
377 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
378 ret = icnss_ce_request_irq(id, ce_irq_handler,
379 irqflags, ce_name[id],
380 &hif_ce_state->tasklets[id]);
381 if (ret) {
382 HIF_ERROR(
383 "%s: cannot register CE %d irq handler, ret = %d",
384 __func__, id, ret);
385 ce_unregister_irq(hif_ce_state, done_mask);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530386 return QDF_STATUS_E_FAULT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800387 } else {
388 done_mask |= 1 << id;
389 }
390 }
391 }
392
393#ifndef HIF_PCI
394 /* move to hif_configure_irq */
Komal Seelam02cf2f82016-02-22 20:44:25 +0530395 ce_enable_irq_in_group_reg(HIF_GET_SOFTC(hif_ce_state), done_mask);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800396#endif
397
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530398 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800399}