blob: 0601b75d0cfa46ad50a0e23a97f4b1d7037b4350 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/if_arp.h>
32#ifdef CONFIG_PCI_MSM
33#include <linux/msm_pcie.h>
34#endif
35#include "hif_io32.h"
36#include "if_pci.h"
37#include "hif.h"
38#include "hif_main.h"
Houston Hoffman63777f22016-03-14 21:11:49 -070039#include "ce_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "ce_api.h"
41#include "ce_internal.h"
42#include "ce_reg.h"
Houston Hoffman108da402016-03-14 21:11:24 -070043#include "ce_bmi.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044#include "regtable.h"
Houston Hoffmanec93ab02016-05-03 20:09:55 -070045#include "hif_hw_version.h"
Houston Hoffman62aa58d2015-11-02 21:14:55 -080046#include <linux/debugfs.h>
47#include <linux/seq_file.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053048#include "qdf_status.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053049#include "qdf_atomic.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050#ifdef CONFIG_CNSS
51#include <net/cnss.h>
52#else
53#include "cnss_stub.h"
54#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080055#include "mp_dev.h"
56#include "hif_debug.h"
57
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080058#include "if_pci_internal.h"
59#include "icnss_stub.h"
60#include "ce_tasklet.h"
Houston Hoffmanf303f912016-03-14 21:11:42 -070061#include "targaddrs.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
Houston Hoffman32bc8eb2016-03-14 21:11:34 -070063#include "pci_api.h"
64
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065/* Maximum ms timeout for host to wake up target */
66#define PCIE_WAKE_TIMEOUT 1000
67#define RAMDUMP_EVENT_TIMEOUT 2500
68
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080069/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
70 * PCIe data bus error
71 * As workaround for this issue - changing the reset sequence to
72 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
73 */
74#define CPU_WARM_RESET_WAR
Houston Hoffmanfb698ef2016-05-05 19:50:44 -070075
76#ifdef CONFIG_WIN
77extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
78#endif
79
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080080/*
81 * Top-level interrupt handler for all PCI interrupts from a Target.
82 * When a block of MSI interrupts is allocated, this top-level handler
83 * is not used; instead, we directly call the correct sub-handler.
84 */
85struct ce_irq_reg_table {
86 uint32_t irq_enable;
87 uint32_t irq_status;
88};
89
Houston Hoffman06bc4f52015-12-16 18:43:34 -080090#if !defined(QCA_WIFI_3_0_ADRASTEA)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080091static inline void cnss_intr_notify_q6(void)
92{
93}
94#endif
95
Houston Hoffman06bc4f52015-12-16 18:43:34 -080096#if !defined(QCA_WIFI_3_0_ADRASTEA)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080097static inline void *cnss_get_target_smem(void)
98{
99 return NULL;
100}
101#endif
102
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800103#ifndef QCA_WIFI_3_0_ADRASTEA
104static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
105{
106 return;
107}
108#else
109void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
110{
Komal Seelam644263d2016-02-22 20:45:49 +0530111 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800112 unsigned int target_enable0, target_enable1;
113 unsigned int target_cause0, target_cause1;
114
115 target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
116 target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
117 target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
118 target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
119
120 if ((target_enable0 & target_cause0) ||
121 (target_enable1 & target_cause1)) {
122 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
123 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
124
125 if (scn->notice_send)
126 cnss_intr_notify_q6();
127 }
128}
129#endif
130
Houston Hoffman247f09b2016-04-06 21:21:40 -0700131/**
132 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
133 * @scn: scn
134 *
135 * Return: N/A
136 */
137static void pci_dispatch_interrupt(struct hif_softc *scn)
138{
139 uint32_t intr_summary;
140 int id;
141 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
142
143 if (scn->hif_init_done != true)
144 return;
145
146 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
147 return;
148
149 intr_summary = CE_INTERRUPT_SUMMARY(scn);
150
151 if (intr_summary == 0) {
Komal Seelam6ee55902016-04-11 17:11:07 +0530152 if ((scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman247f09b2016-04-06 21:21:40 -0700153 (!qdf_atomic_read(&scn->link_suspended))) {
154
155 hif_write32_mb(scn->mem +
156 (SOC_CORE_BASE_ADDRESS |
157 PCIE_INTR_ENABLE_ADDRESS),
158 HOST_GROUP0_MASK);
159
160 hif_read32_mb(scn->mem +
161 (SOC_CORE_BASE_ADDRESS |
162 PCIE_INTR_ENABLE_ADDRESS));
163 }
164 Q_TARGET_ACCESS_END(scn);
165 return;
166 } else {
167 Q_TARGET_ACCESS_END(scn);
168 }
169
170 scn->ce_irq_summary = intr_summary;
171 for (id = 0; intr_summary && (id < scn->ce_count); id++) {
172 if (intr_summary & (1 << id)) {
173 intr_summary &= ~(1 << id);
174 ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
175 }
176 }
177}
178
Houston Hoffman3db96a42016-05-05 19:54:39 -0700179irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800180{
181 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530182 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530183 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800184 volatile int tmp;
185 uint16_t val;
186 uint32_t bar0;
187 uint32_t fw_indicator_address, fw_indicator;
188 bool ssr_irq = false;
189 unsigned int host_cause, host_enable;
190
191 if (LEGACY_INTERRUPTS(sc)) {
192 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
193 return IRQ_HANDLED;
194
195 if (ADRASTEA_BU) {
196 host_enable = hif_read32_mb(sc->mem +
197 PCIE_INTR_ENABLE_ADDRESS);
198 host_cause = hif_read32_mb(sc->mem +
199 PCIE_INTR_CAUSE_ADDRESS);
200 if (!(host_enable & host_cause)) {
201 hif_pci_route_adrastea_interrupt(sc);
202 return IRQ_HANDLED;
203 }
204 }
205
206 /* Clear Legacy PCI line interrupts
207 * IMPORTANT: INTR_CLR regiser has to be set
208 * after INTR_ENABLE is set to 0,
209 * otherwise interrupt can not be really cleared */
210 hif_write32_mb(sc->mem +
211 (SOC_CORE_BASE_ADDRESS |
212 PCIE_INTR_ENABLE_ADDRESS), 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800213
214 hif_write32_mb(sc->mem +
215 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
216 ADRASTEA_BU ?
217 (host_enable & host_cause) :
218 HOST_GROUP0_MASK);
219
220 if (ADRASTEA_BU)
221 hif_write32_mb(sc->mem + 0x2f100c , (host_cause >> 1));
222
223 /* IMPORTANT: this extra read transaction is required to
224 * flush the posted write buffer */
225 if (!ADRASTEA_BU) {
226 tmp =
227 hif_read32_mb(sc->mem +
228 (SOC_CORE_BASE_ADDRESS |
229 PCIE_INTR_ENABLE_ADDRESS));
230
231 if (tmp == 0xdeadbeef) {
232 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
233 __func__);
234
235 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
236 HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
237 __func__, val);
238
239 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
240 HIF_ERROR("%s: PCI Device ID = 0x%04x",
241 __func__, val);
242
243 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
244 HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
245 val);
246
247 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
248 HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
249 val);
250
251 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
252 &bar0);
253 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
254 bar0);
255
256 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
257 __func__,
258 hif_read32_mb(sc->mem +
259 PCIE_LOCAL_BASE_ADDRESS
260 + RTC_STATE_ADDRESS));
261 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
262 __func__,
263 hif_read32_mb(sc->mem +
264 PCIE_LOCAL_BASE_ADDRESS
265 + PCIE_SOC_WAKE_ADDRESS));
266 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
267 __func__,
268 hif_read32_mb(sc->mem + 0x80008),
269 hif_read32_mb(sc->mem + 0x8000c));
270 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
271 __func__,
272 hif_read32_mb(sc->mem + 0x80010),
273 hif_read32_mb(sc->mem + 0x80014));
274 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
275 __func__,
276 hif_read32_mb(sc->mem + 0x80018),
277 hif_read32_mb(sc->mem + 0x8001c));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530278 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800279 }
280
281 PCI_CLR_CAUSE0_REGISTER(sc);
282 }
283
284 if (HAS_FW_INDICATOR) {
285 fw_indicator_address = hif_state->fw_indicator_address;
286 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
287 if ((fw_indicator != ~0) &&
288 (fw_indicator & FW_IND_EVENT_PENDING))
289 ssr_irq = true;
290 }
291
292 if (Q_TARGET_ACCESS_END(scn) < 0)
293 return IRQ_HANDLED;
294 }
295 /* TBDXXX: Add support for WMAC */
296
297 if (ssr_irq) {
298 sc->irq_event = irq;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530299 qdf_atomic_set(&scn->tasklet_from_intr, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800300
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530301 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800302 tasklet_schedule(&sc->intr_tq);
303 } else {
Houston Hoffman247f09b2016-04-06 21:21:40 -0700304 pci_dispatch_interrupt(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800305 }
306
307 return IRQ_HANDLED;
308}
309
310static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
311{
312 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
313
Komal Seelam02cf2f82016-02-22 20:44:25 +0530314 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800315
316 return IRQ_HANDLED;
317}
318
Komal Seelam644263d2016-02-22 20:45:49 +0530319bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800320{
321 return 1; /* FIX THIS */
322}
323
324/**
325 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
Komal Seelam644263d2016-02-22 20:45:49 +0530326 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800327 *
328 * Return: void
329 */
330#if CONFIG_ATH_PCIE_MAX_PERF == 0
Komal Seelam644263d2016-02-22 20:45:49 +0530331void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800332{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530333 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800334 A_target_id_t pci_addr = scn->mem;
335
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530336 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 /*
338 * If the deferred sleep timer is running cancel it
339 * and put the soc into sleep.
340 */
341 if (hif_state->fake_sleep == true) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530342 qdf_timer_stop(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800343 if (hif_state->verified_awake == false) {
344 hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
345 PCIE_SOC_WAKE_ADDRESS,
346 PCIE_SOC_WAKE_RESET);
347 }
348 hif_state->fake_sleep = false;
349 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530350 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800351}
352#else
Komal Seelam644263d2016-02-22 20:45:49 +0530353inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800354{
355 return;
356}
357#endif
358
359#define A_PCIE_LOCAL_REG_READ(mem, addr) \
360 hif_read32_mb((char *)(mem) + \
361 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
362
363#define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
364 hif_write32_mb(((char *)(mem) + \
365 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
366
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700367#ifdef QCA_WIFI_3_0
368/**
369 * hif_targ_is_awake() - check to see if the target is awake
370 * @hif_ctx: hif context
371 *
372 * emulation never goes to sleep
373 *
374 * Return: true if target is awake
375 */
376bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
377{
378 return true;
379}
380#else
381/**
382 * hif_targ_is_awake() - check to see if the target is awake
383 * @hif_ctx: hif context
384 *
385 * Return: true if the targets clocks are on
386 */
387bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
388{
389 uint32_t val;
390
391 if (scn->recovery)
392 return false;
393 val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS
394 + RTC_STATE_ADDRESS);
Houston Hoffmanf241eb02016-05-10 17:07:36 -0700395 return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700396}
397#endif
398
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800399#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
400static void hif_pci_device_reset(struct hif_pci_softc *sc)
401{
402 void __iomem *mem = sc->mem;
403 int i;
404 uint32_t val;
Komal Seelam644263d2016-02-22 20:45:49 +0530405 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800406
407 if (!scn->hostdef)
408 return;
409
410 /* NB: Don't check resetok here. This form of reset
411 * is integral to correct operation. */
412
413 if (!SOC_GLOBAL_RESET_ADDRESS) {
414 return;
415 }
416
417 if (!mem) {
418 return;
419 }
420
421 HIF_ERROR("%s: Reset Device", __func__);
422
423 /*
424 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
425 * writing WAKE_V, the Target may scribble over Host memory!
426 */
427 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
428 PCIE_SOC_WAKE_V_MASK);
429 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
430 if (hif_targ_is_awake(scn, mem))
431 break;
432
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530433 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800434 }
435
436 /* Put Target, including PCIe, into RESET. */
437 val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
438 val |= 1;
439 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
440 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
441 if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
442 RTC_STATE_COLD_RESET_MASK)
443 break;
444
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530445 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800446 }
447
448 /* Pull Target, including PCIe, out of RESET. */
449 val &= ~1;
450 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
451 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
452 if (!
453 (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
454 RTC_STATE_COLD_RESET_MASK))
455 break;
456
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530457 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800458 }
459
460 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
461}
462
463/* CPU warm reset function
464 * Steps:
465 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
466 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
467 * correctly on WARM reset
468 * 3. Clear TARGET CPU LF timer interrupt
469 * 4. Reset all CEs to clear any pending CE tarnsactions
470 * 5. Warm reset CPU
471 */
472void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
473{
474 void __iomem *mem = sc->mem;
475 int i;
476 uint32_t val;
477 uint32_t fw_indicator;
Komal Seelam644263d2016-02-22 20:45:49 +0530478 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800479
480 /* NB: Don't check resetok here. This form of reset is
481 * integral to correct operation. */
482
483 if (!mem) {
484 return;
485 }
486
487 HIF_INFO_MED("%s: Target Warm Reset", __func__);
488
489 /*
490 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
491 * writing WAKE_V, the Target may scribble over Host memory!
492 */
493 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
494 PCIE_SOC_WAKE_V_MASK);
495 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
496 if (hif_targ_is_awake(scn, mem))
497 break;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530498 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800499 }
500
501 /*
502 * Disable Pending interrupts
503 */
504 val =
505 hif_read32_mb(mem +
506 (SOC_CORE_BASE_ADDRESS |
507 PCIE_INTR_CAUSE_ADDRESS));
508 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
509 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
510 /* Target CPU Intr Cause */
511 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
512 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
513
514 val =
515 hif_read32_mb(mem +
516 (SOC_CORE_BASE_ADDRESS |
517 PCIE_INTR_ENABLE_ADDRESS));
518 hif_write32_mb((mem +
519 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
520 hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
521 HOST_GROUP0_MASK);
522
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530523 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800524
525 /* Clear FW_INDICATOR_ADDRESS */
526 if (HAS_FW_INDICATOR) {
527 fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
528 hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
529 }
530
531 /* Clear Target LF Timer interrupts */
532 val =
533 hif_read32_mb(mem +
534 (RTC_SOC_BASE_ADDRESS +
535 SOC_LF_TIMER_CONTROL0_ADDRESS));
536 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
537 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
538 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
539 hif_write32_mb(mem +
540 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
541 val);
542
543 /* Reset CE */
544 val =
545 hif_read32_mb(mem +
546 (RTC_SOC_BASE_ADDRESS |
547 SOC_RESET_CONTROL_ADDRESS));
548 val |= SOC_RESET_CONTROL_CE_RST_MASK;
549 hif_write32_mb((mem +
550 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
551 val);
552 val =
553 hif_read32_mb(mem +
554 (RTC_SOC_BASE_ADDRESS |
555 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530556 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800557
558 /* CE unreset */
559 val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
560 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
561 val);
562 val =
563 hif_read32_mb(mem +
564 (RTC_SOC_BASE_ADDRESS |
565 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530566 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800567
568 /* Read Target CPU Intr Cause */
569 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
570 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
571 __func__, val);
572
573 /* CPU warm RESET */
574 val =
575 hif_read32_mb(mem +
576 (RTC_SOC_BASE_ADDRESS |
577 SOC_RESET_CONTROL_ADDRESS));
578 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
579 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
580 val);
581 val =
582 hif_read32_mb(mem +
583 (RTC_SOC_BASE_ADDRESS |
584 SOC_RESET_CONTROL_ADDRESS));
585 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
586 __func__, val);
587
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530588 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800589 HIF_INFO_MED("%s: Target Warm reset complete", __func__);
590
591}
592
593#ifndef QCA_WIFI_3_0
Komal Seelam5584a7c2016-02-24 19:22:48 +0530594int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800595{
Komal Seelam644263d2016-02-22 20:45:49 +0530596 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530597 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800598 void __iomem *mem = sc->mem;
599 uint32_t val;
600
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700601 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
602 return ATH_ISR_NOSCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800603 val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700604 if (Q_TARGET_ACCESS_END(scn) < 0)
605 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800606
607 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
608
609 if (val & FW_IND_HELPER)
610 return 0;
611
612 return 1;
613}
614#endif
615
Komal Seelam5584a7c2016-02-24 19:22:48 +0530616int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800617{
Komal Seelam644263d2016-02-22 20:45:49 +0530618 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800619 uint16_t device_id;
620 uint32_t val;
621 uint16_t timeout_count = 0;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530622 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800623
624 /* Check device ID from PCIe configuration space for link status */
625 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
626 if (device_id != sc->devid) {
627 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
628 __func__, device_id, sc->devid);
629 return -EACCES;
630 }
631
632 /* Check PCIe local register for bar/memory access */
633 val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
634 RTC_STATE_ADDRESS);
635 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
636
637 /* Try to wake up taget if it sleeps */
638 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
639 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
640 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
641 hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
642 PCIE_SOC_WAKE_ADDRESS));
643
644 /* Check if taget can be woken up */
645 while (!hif_targ_is_awake(scn, sc->mem)) {
646 if (timeout_count >= PCIE_WAKE_TIMEOUT) {
647 HIF_ERROR("%s: wake up timeout, %08x, %08x",
648 __func__,
649 hif_read32_mb(sc->mem +
650 PCIE_LOCAL_BASE_ADDRESS +
651 RTC_STATE_ADDRESS),
652 hif_read32_mb(sc->mem +
653 PCIE_LOCAL_BASE_ADDRESS +
654 PCIE_SOC_WAKE_ADDRESS));
655 return -EACCES;
656 }
657
658 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
659 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
660
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530661 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800662 timeout_count += 100;
663 }
664
665 /* Check Power register for SoC internal bus issues */
666 val =
667 hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
668 SOC_POWER_REG_OFFSET);
669 HIF_INFO_MED("%s: Power register is %08x", __func__, val);
670
671 return 0;
672}
673
Govind Singh2443fb32016-01-13 17:44:48 +0530674/**
Houston Hoffman3c017e72016-03-14 21:12:11 -0700675 * __hif_pci_dump_registers(): dump other PCI debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530676 * @scn: struct hif_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530677 *
Houston Hoffman3c017e72016-03-14 21:12:11 -0700678 * This function dumps pci debug registers. The parrent function
679 * dumps the copy engine registers before calling this function.
Govind Singh2443fb32016-01-13 17:44:48 +0530680 *
681 * Return: void
682 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700683static void __hif_pci_dump_registers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800684{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530685 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800686 void __iomem *mem = sc->mem;
687 uint32_t val, i, j;
688 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
689 uint32_t ce_base;
690
Houston Hoffmanbac94542016-03-14 21:11:59 -0700691 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
692 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693
694 /* DEBUG_INPUT_SEL_SRC = 0x6 */
695 val =
696 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
697 WLAN_DEBUG_INPUT_SEL_OFFSET);
698 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
699 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
700 hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
701 val);
702
703 /* DEBUG_CONTROL_ENABLE = 0x1 */
704 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
705 WLAN_DEBUG_CONTROL_OFFSET);
706 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
707 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
708 hif_write32_mb(mem + GPIO_BASE_ADDRESS +
709 WLAN_DEBUG_CONTROL_OFFSET, val);
710
711 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
712 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
713 WLAN_DEBUG_INPUT_SEL_OFFSET),
714 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
715 WLAN_DEBUG_CONTROL_OFFSET));
716
717 HIF_INFO_MED("%s: Debug CE", __func__);
718 /* Loop CE debug output */
719 /* AMBA_DEBUG_BUS_SEL = 0xc */
720 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
721 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
722 val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
723 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
724
725 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
726 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
727 val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
728 CE_WRAPPER_DEBUG_OFFSET);
729 val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
730 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
731 hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
732 CE_WRAPPER_DEBUG_OFFSET, val);
733
734 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
735 __func__, wrapper_idx[i],
736 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
737 AMBA_DEBUG_BUS_OFFSET),
738 hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
739 CE_WRAPPER_DEBUG_OFFSET));
740
741 if (wrapper_idx[i] <= 7) {
742 for (j = 0; j <= 5; j++) {
743 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
744 /* For (j=0~5) write CE_DEBUG_SEL = j */
745 val =
746 hif_read32_mb(mem + ce_base +
747 CE_DEBUG_OFFSET);
748 val &= ~CE_DEBUG_SEL_MASK;
749 val |= CE_DEBUG_SEL_SET(j);
750 hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
751 val);
752
753 /* read (@gpio_athr_wlan_reg)
754 * WLAN_DEBUG_OUT_DATA */
755 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
756 WLAN_DEBUG_OUT_OFFSET);
757 val = WLAN_DEBUG_OUT_DATA_GET(val);
758
759 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
760 __func__, j,
761 hif_read32_mb(mem + ce_base +
762 CE_DEBUG_OFFSET), val);
763 }
764 } else {
765 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
766 val =
767 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
768 WLAN_DEBUG_OUT_OFFSET);
769 val = WLAN_DEBUG_OUT_DATA_GET(val);
770
771 HIF_INFO_MED("%s: out: %x", __func__, val);
772 }
773 }
774
775 HIF_INFO_MED("%s: Debug PCIe:", __func__);
776 /* Loop PCIe debug output */
777 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
778 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
779 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
780 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
781 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
782
783 for (i = 0; i <= 8; i++) {
784 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
785 val =
786 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
787 AMBA_DEBUG_BUS_OFFSET);
788 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
789 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
790 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
791 val);
792
793 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
794 val =
795 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
796 WLAN_DEBUG_OUT_OFFSET);
797 val = WLAN_DEBUG_OUT_DATA_GET(val);
798
799 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
800 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
801 WLAN_DEBUG_OUT_OFFSET), val,
802 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
803 WLAN_DEBUG_OUT_OFFSET));
804 }
805
Houston Hoffmanbac94542016-03-14 21:11:59 -0700806 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800807}
808
Govind Singh2443fb32016-01-13 17:44:48 +0530809/**
810 * hif_dump_registers(): dump bus debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530811 * @scn: struct hif_opaque_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530812 *
813 * This function dumps hif bus debug registers
814 *
815 * Return: 0 for success or error code
816 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700817int hif_pci_dump_registers(struct hif_softc *hif_ctx)
Govind Singh2443fb32016-01-13 17:44:48 +0530818{
819 int status;
Komal Seelam644263d2016-02-22 20:45:49 +0530820 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Govind Singh2443fb32016-01-13 17:44:48 +0530821
822 status = hif_dump_ce_registers(scn);
823
824 if (status)
825 HIF_ERROR("%s: Dump CE Registers Failed", __func__);
826
Houston Hoffman3c017e72016-03-14 21:12:11 -0700827 /* dump non copy engine pci registers */
828 __hif_pci_dump_registers(scn);
Govind Singh2443fb32016-01-13 17:44:48 +0530829
830 return 0;
831}
832
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800833/*
834 * Handler for a per-engine interrupt on a PARTICULAR CE.
835 * This is used in cases where each CE has a private
836 * MSI interrupt.
837 */
838static irqreturn_t ce_per_engine_handler(int irq, void *arg)
839{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800840 int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
841
842 /*
843 * NOTE: We are able to derive CE_id from irq because we
844 * use a one-to-one mapping for CE's 0..5.
845 * CE's 6 & 7 do not use interrupts at all.
846 *
847 * This mapping must be kept in sync with the mapping
848 * used by firmware.
849 */
850
Komal Seelam02cf2f82016-02-22 20:44:25 +0530851 ce_per_engine_service(arg, CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800852
853 return IRQ_HANDLED;
854}
855
856#ifdef CONFIG_SLUB_DEBUG_ON
857
858/* worker thread to schedule wlan_tasklet in SLUB debug build */
Komal Seelamaa72bb72016-02-01 17:22:50 +0530859static void reschedule_tasklet_work_handler(void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800860{
Komal Seelamaa72bb72016-02-01 17:22:50 +0530861 struct hif_pci_softc *sc = arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530862 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800863
Komal Seelamaa72bb72016-02-01 17:22:50 +0530864 if (!scn) {
Komal Seelam644263d2016-02-22 20:45:49 +0530865 HIF_ERROR("%s: hif_softc is NULL\n", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800866 return;
867 }
868
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800869 if (scn->hif_init_done == false) {
870 HIF_ERROR("%s: wlan driver is unloaded", __func__);
871 return;
872 }
873
874 tasklet_schedule(&sc->intr_tq);
875 return;
876}
877
Komal Seelamaa72bb72016-02-01 17:22:50 +0530878/**
879 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
880 * work
881 * @sc: HIF PCI Context
882 *
883 * Return: void
884 */
885static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
886{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530887 qdf_create_work(0, &sc->reschedule_tasklet_work,
888 reschedule_tasklet_work_handler, NULL);
Komal Seelamaa72bb72016-02-01 17:22:50 +0530889}
890#else
891static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
892#endif /* CONFIG_SLUB_DEBUG_ON */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893
Houston Hoffman3db96a42016-05-05 19:54:39 -0700894void wlan_tasklet(unsigned long data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800895{
896 struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
Komal Seelam644263d2016-02-22 20:45:49 +0530897 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898
899 if (scn->hif_init_done == false)
900 goto end;
901
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530902 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800903 goto end;
904
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800905 if (!ADRASTEA_BU) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800906 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +0530907 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800908 goto end;
909 }
910
911end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530912 qdf_atomic_set(&scn->tasklet_from_intr, 0);
913 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800914}
915
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800916#ifdef FEATURE_RUNTIME_PM
917#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
918 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
919
920/**
921 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
922 * @sc: hif_pci_softc context
923 * @msg: log message
924 *
925 * log runtime pm stats when something seems off.
926 *
927 * Return: void
928 */
929void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
930{
931 struct hif_pm_runtime_lock *ctx;
932
933 HIF_ERROR("%s: usage_count: %d, pm_state: %d, prevent_suspend_cnt: %d",
934 msg, atomic_read(&sc->dev->power.usage_count),
935 atomic_read(&sc->pm_state),
936 sc->prevent_suspend_cnt);
937
938 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
939 sc->dev->power.runtime_status,
940 sc->dev->power.runtime_error,
941 sc->dev->power.disable_depth,
942 sc->dev->power.autosuspend_delay);
943
944 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
945 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
946 sc->pm_stats.request_resume);
947
948 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
949 sc->pm_stats.allow_suspend,
950 sc->pm_stats.prevent_suspend);
951
952 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
953 sc->pm_stats.prevent_suspend_timeout,
954 sc->pm_stats.allow_suspend_timeout);
955
956 HIF_ERROR("Suspended: %u, resumed: %u count",
957 sc->pm_stats.suspended,
958 sc->pm_stats.resumed);
959
960 HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
961 sc->pm_stats.suspend_err,
962 sc->pm_stats.runtime_get_err);
963
964 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
965
966 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
967 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
968 }
969
970 WARN_ON(1);
971}
972
973/**
974 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
975 * @s: file to print to
976 * @data: unused
977 *
978 * debugging tool added to the debug fs for displaying runtimepm stats
979 *
980 * Return: 0
981 */
982static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
983{
984 struct hif_pci_softc *sc = s->private;
985 static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
986 "SUSPENDED"};
987 unsigned int msecs_age;
988 int pm_state = atomic_read(&sc->pm_state);
989 unsigned long timer_expires, flags;
990 struct hif_pm_runtime_lock *ctx;
991
992 seq_printf(s, "%30s: %s\n", "Runtime PM state",
993 autopm_state[pm_state]);
994 seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
995 sc->pm_stats.last_resume_caller);
996
997 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
998 msecs_age = jiffies_to_msecs(
999 jiffies - sc->pm_stats.suspend_jiffies);
1000 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1001 msecs_age / 1000, msecs_age % 1000);
1002 }
1003
1004 seq_printf(s, "%30s: %d\n", "PM Usage count",
1005 atomic_read(&sc->dev->power.usage_count));
1006
1007 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1008 sc->prevent_suspend_cnt);
1009
1010 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1011 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1012 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1013 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1014 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1015 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1016 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1017 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1018 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1019 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1020 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1021
1022 timer_expires = sc->runtime_timer_expires;
1023 if (timer_expires > 0) {
1024 msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1025 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1026 msecs_age / 1000, msecs_age % 1000);
1027 }
1028
1029 spin_lock_irqsave(&sc->runtime_lock, flags);
1030 if (list_empty(&sc->prevent_suspend_list)) {
1031 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1032 return 0;
1033 }
1034
1035 seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1036 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1037 seq_printf(s, "%s", ctx->name);
1038 if (ctx->timeout)
1039 seq_printf(s, "(%d ms)", ctx->timeout);
1040 seq_puts(s, " ");
1041 }
1042 seq_puts(s, "\n");
1043 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1044
1045 return 0;
1046}
1047#undef HIF_PCI_RUNTIME_PM_STATS
1048
1049/**
1050 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1051 * @inode
1052 * @file
1053 *
1054 * Return: linux error code of single_open.
1055 */
1056static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1057{
1058 return single_open(file, hif_pci_pm_runtime_debugfs_show,
1059 inode->i_private);
1060}
1061
1062#ifdef WLAN_OPEN_SOURCE
1063static const struct file_operations hif_pci_runtime_pm_fops = {
1064 .owner = THIS_MODULE,
1065 .open = hif_pci_runtime_pm_open,
1066 .release = single_release,
1067 .read = seq_read,
1068 .llseek = seq_lseek,
1069};
1070
1071/**
1072 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1073 * @sc: pci context
1074 *
1075 * creates a debugfs entry to debug the runtime pm feature.
1076 */
1077static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1078{
1079 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1080 S_IRUSR, NULL, sc,
1081 &hif_pci_runtime_pm_fops);
1082}
1083/**
1084 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1085 * @sc: pci context
1086 *
1087 * removes the debugfs entry to debug the runtime pm feature.
1088 */
1089static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1090{
1091 debugfs_remove(sc->pm_dentry);
1092}
1093#else
1094static inline void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1095{
1096}
1097static inline void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1098{
1099}
1100#endif
1101
Houston Hoffman9078a152015-11-02 16:15:02 -08001102static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001103
1104/**
1105 * hif_pm_runtime_start(): start the runtime pm
1106 * @sc: pci context
1107 *
1108 * After this call, runtime pm will be active.
1109 */
1110static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1111{
Komal Seelam644263d2016-02-22 20:45:49 +05301112 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001113 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001114
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001115 if (!ol_sc->hif_config.enable_runtime_pm) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001116 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1117 return;
1118 }
1119
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001120 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001121 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1122 __func__);
1123 return;
1124 }
1125
1126 setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1127 (unsigned long)sc);
1128
1129 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1130 ol_sc->runtime_pm_delay);
1131
1132 cnss_runtime_init(sc->dev, ol_sc->runtime_pm_delay);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301133 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001134 hif_runtime_pm_debugfs_create(sc);
1135}
1136
1137/**
1138 * hif_pm_runtime_stop(): stop runtime pm
1139 * @sc: pci context
1140 *
1141 * Turns off runtime pm and frees corresponding resources
1142 * that were acquired by hif_runtime_pm_start().
1143 */
1144static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1145{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001146 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Komal Seelambd7c51d2016-02-24 10:27:30 +05301147 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001148
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001149 if (!ol_sc->hif_config.enable_runtime_pm)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001150 return;
1151
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001152 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001153 return;
1154
1155 cnss_runtime_exit(sc->dev);
1156 cnss_pm_runtime_request(sc->dev, CNSS_PM_RUNTIME_RESUME);
1157
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301158 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001159
1160 hif_runtime_pm_debugfs_remove(sc);
1161 del_timer_sync(&sc->runtime_timer);
1162 /* doesn't wait for penting trafic unlike cld-2.0 */
1163}
1164
1165/**
1166 * hif_pm_runtime_open(): initialize runtime pm
1167 * @sc: pci data structure
1168 *
1169 * Early initialization
1170 */
1171static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1172{
1173 spin_lock_init(&sc->runtime_lock);
1174
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301175 qdf_atomic_init(&sc->pm_state);
Houston Hoffmancceec342015-11-11 11:37:20 -08001176 sc->prevent_linkdown_lock =
1177 hif_runtime_lock_init("linkdown suspend disabled");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301178 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001179 INIT_LIST_HEAD(&sc->prevent_suspend_list);
1180}
1181
1182/**
Houston Hoffman20968292016-03-23 17:55:47 -07001183 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1184 * @sc: pci context
1185 *
1186 * Ensure we have only one vote against runtime suspend before closing
1187 * the runtime suspend feature.
1188 *
1189 * all gets by the wlan driver should have been returned
1190 * one vote should remain as part of cnss_runtime_exit
1191 *
1192 * needs to be revisited if we share the root complex.
1193 */
1194static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1195{
1196 if (atomic_read(&sc->dev->power.usage_count) != 1)
1197 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1198
1199 /* ensure 1 and only 1 usage count so that when the wlan
1200 * driver is re-insmodded runtime pm won't be
1201 * disabled also ensures runtime pm doesn't get
1202 * broken on by being less than 1.
1203 */
1204 if (atomic_read(&sc->dev->power.usage_count) <= 0)
1205 atomic_set(&sc->dev->power.usage_count, 1);
1206 while (atomic_read(&sc->dev->power.usage_count) > 1)
1207 hif_pm_runtime_put_auto(sc->dev);
1208}
1209
1210/**
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001211 * hif_pm_runtime_close(): close runtime pm
1212 * @sc: pci bus handle
1213 *
1214 * ensure runtime_pm is stopped before closing the driver
1215 */
1216static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1217{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301218 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001219 return;
1220 else
1221 hif_pm_runtime_stop(sc);
Houston Hoffman20968292016-03-23 17:55:47 -07001222
1223 hif_pm_runtime_sanitize_on_exit(sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001224}
1225
1226
1227#else
1228
1229static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1230static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1231static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
Houston Hoffman53b34c42015-11-18 15:51:32 -08001232static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001233#endif
1234
1235/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001236 * hif_disable_power_gating() - disable HW power gating
1237 * @hif_ctx: hif context
1238 *
1239 * disables pcie L1 power states
1240 */
1241static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1242{
1243 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1244 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1245
1246 if (NULL == scn) {
1247 HIF_ERROR("%s: Could not disable ASPM scn is null",
1248 __func__);
1249 return;
1250 }
1251
1252 /* Disable ASPM when pkt log is enabled */
1253 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1254 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1255}
1256
1257/**
1258 * hif_enable_power_gating() - enable HW power gating
1259 * @hif_ctx: hif context
1260 *
1261 * enables pcie L1 power states
1262 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001263static void hif_enable_power_gating(struct hif_pci_softc *sc)
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001264{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001265 if (NULL == sc) {
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001266 HIF_ERROR("%s: Could not disable ASPM scn is null",
1267 __func__);
1268 return;
1269 }
1270
1271 /* Re-enable ASPM after firmware/OTP download is complete */
1272 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1273}
1274
1275/**
1276 * hif_enable_power_management() - enable power management
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001277 * @hif_ctx: hif context
1278 *
1279 * Currently only does runtime pm. Eventually this function could
1280 * consolidate other power state features such as only letting
1281 * the soc sleep after the driver finishes loading and re-enabling
1282 * aspm (hif_enable_power_gating).
1283 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001284void hif_pci_enable_power_management(struct hif_softc *hif_sc,
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001285 bool is_packet_log_enabled)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001286{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001287 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001288
Komal Seelam02cf2f82016-02-22 20:44:25 +05301289 if (pci_ctx == NULL) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001290 HIF_ERROR("%s, hif_ctx null", __func__);
1291 return;
1292 }
1293
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001294 hif_pm_runtime_start(pci_ctx);
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001295
1296 if (!is_packet_log_enabled)
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001297 hif_enable_power_gating(pci_ctx);
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001298
1299 if (!CONFIG_ATH_PCIE_MAX_PERF &&
1300 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
Houston Hoffman4ca03b62016-03-14 21:11:51 -07001301 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001302 HIF_ERROR("%s, failed to set target to sleep",
1303 __func__);
1304 }
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001305}
1306
Houston Hoffman53b34c42015-11-18 15:51:32 -08001307/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001308 * hif_disable_power_management() - disable power management
Houston Hoffman53b34c42015-11-18 15:51:32 -08001309 * @hif_ctx: hif context
1310 *
1311 * Currently disables runtime pm. Should be updated to behave
1312 * if runtime pm is not started. Should be updated to take care
1313 * of aspm and soc sleep for driver load.
1314 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001315void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
Houston Hoffman53b34c42015-11-18 15:51:32 -08001316{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301317 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman53b34c42015-11-18 15:51:32 -08001318
Komal Seelam02cf2f82016-02-22 20:44:25 +05301319 if (pci_ctx == NULL) {
Houston Hoffman53b34c42015-11-18 15:51:32 -08001320 HIF_ERROR("%s, hif_ctx null", __func__);
1321 return;
1322 }
1323
Houston Hoffman53b34c42015-11-18 15:51:32 -08001324 hif_pm_runtime_stop(pci_ctx);
1325}
1326
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001327#define ATH_PCI_PROBE_RETRY_MAX 3
1328/**
1329 * hif_bus_open(): hif_bus_open
1330 * @scn: scn
1331 * @bus_type: bus type
1332 *
1333 * Return: n/a
1334 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001335QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001336{
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001337 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001338
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001339 hif_ctx->bus_type = bus_type;
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001340 hif_pm_runtime_open(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001341
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301342 qdf_spinlock_create(&sc->irq_lock);
Houston Hoffman8a13e5c2015-10-29 16:12:09 -07001343
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001344 return hif_ce_open(hif_ctx);
Houston Hoffman108da402016-03-14 21:11:24 -07001345}
1346
1347#ifdef BMI_RSP_POLLING
1348#define BMI_RSP_CB_REGISTER 0
1349#else
1350#define BMI_RSP_CB_REGISTER 1
1351#endif
1352
1353/**
1354 * hif_register_bmi_callbacks() - register bmi callbacks
1355 * @hif_sc: hif context
1356 *
1357 * Bmi phase uses different copy complete callbacks than mission mode.
1358 */
1359static void hif_register_bmi_callbacks(struct hif_softc *hif_sc)
1360{
1361 struct HIF_CE_pipe_info *pipe_info;
1362 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1363
1364 /*
1365 * Initially, establish CE completion handlers for use with BMI.
1366 * These are overwritten with generic handlers after we exit BMI phase.
1367 */
1368 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1369 ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1370
1371 if (BMI_RSP_CB_REGISTER) {
1372 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1373 ce_recv_cb_register(
1374 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1375 }
1376}
1377
1378/**
Houston Hoffman854e67f2016-03-14 21:11:39 -07001379 * hif_wake_target_cpu() - wake the target's cpu
1380 * @scn: hif context
1381 *
1382 * Send an interrupt to the device to wake up the Target CPU
1383 * so it has an opportunity to notice any changed state.
1384 */
1385void hif_wake_target_cpu(struct hif_softc *scn)
1386{
1387 QDF_STATUS rv;
1388 uint32_t core_ctrl;
1389 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1390
1391 rv = hif_diag_read_access(hif_hdl,
1392 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1393 &core_ctrl);
1394 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1395 /* A_INUM_FIRMWARE interrupt to Target CPU */
1396 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1397
1398 rv = hif_diag_write_access(hif_hdl,
1399 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1400 core_ctrl);
1401 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1402}
1403
Houston Hoffman63777f22016-03-14 21:11:49 -07001404/**
1405 * soc_wake_reset() - allow the target to go to sleep
1406 * @scn: hif_softc
1407 *
1408 * Clear the force wake register. This is done by
1409 * hif_sleep_entry and cancel defered timer sleep.
1410 */
1411static void soc_wake_reset(struct hif_softc *scn)
1412{
1413 hif_write32_mb(scn->mem +
1414 PCIE_LOCAL_BASE_ADDRESS +
1415 PCIE_SOC_WAKE_ADDRESS,
1416 PCIE_SOC_WAKE_RESET);
1417}
1418
1419/**
1420 * hif_sleep_entry() - gate target sleep
1421 * @arg: hif context
1422 *
1423 * This function is the callback for the sleep timer.
1424 * Check if last force awake critical section was at least
1425 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
1426 * allow the target to go to sleep and cancel the sleep timer.
1427 * otherwise reschedule the sleep timer.
1428 */
1429static void hif_sleep_entry(void *arg)
1430{
1431 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1432 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1433 uint32_t idle_ms;
1434
1435 if (scn->recovery)
1436 return;
1437
1438 if (hif_is_driver_unloading(scn))
1439 return;
1440
1441 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1442 if (hif_state->verified_awake == false) {
1443 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1444 - hif_state->sleep_ticks);
1445 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1446 if (!qdf_atomic_read(&scn->link_suspended)) {
1447 soc_wake_reset(scn);
1448 hif_state->fake_sleep = false;
1449 }
1450 } else {
1451 qdf_timer_stop(&hif_state->sleep_timer);
1452 qdf_timer_start(&hif_state->sleep_timer,
1453 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1454 }
1455 } else {
1456 qdf_timer_stop(&hif_state->sleep_timer);
1457 qdf_timer_start(&hif_state->sleep_timer,
1458 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1459 }
1460 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1461}
1462
Houston Hoffman854e67f2016-03-14 21:11:39 -07001463#define HIF_HIA_MAX_POLL_LOOP 1000000
1464#define HIF_HIA_POLLING_DELAY_MS 10
1465
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001466#ifdef CONFIG_WIN
1467void hif_set_hia_extnd(struct hif_softc *scn)
1468{
1469 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1470 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1471 uint32_t target_type = tgt_info->target_type;
1472
1473 HIF_TRACE("%s: E", __func__);
1474
1475 if ((target_type == TARGET_TYPE_AR900B) ||
1476 target_type == TARGET_TYPE_QCA9984 ||
1477 target_type == TARGET_TYPE_QCA9888) {
1478 /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1479 in RTC space */
1480 tgt_info->target_revision
1481 = CHIP_ID_REVISION_GET(hif_read32_mb(scn->mem
1482 + CHIP_ID_ADDRESS));
1483 qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n",
1484 target_type, tgt_info->target_revision);
1485 }
1486
1487 {
1488 uint32_t flag2_value = 0;
1489 uint32_t flag2_targ_addr =
1490 host_interest_item_address(target_type,
1491 offsetof(struct host_interest_s, hi_skip_clock_init));
1492
1493 if ((ar900b_20_targ_clk != -1) &&
1494 (frac != -1) && (intval != -1)) {
1495 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1496 &flag2_value);
1497 qdf_print("\n Setting clk_override\n");
1498 flag2_value |= CLOCK_OVERRIDE;
1499
1500 hif_diag_write_access(hif_hdl, flag2_targ_addr,
1501 flag2_value);
1502 qdf_print("\n CLOCK PLL val set %d\n", flag2_value);
1503 } else {
1504 qdf_print(KERN_INFO"\n CLOCK PLL skipped\n");
1505 }
1506 }
1507
1508 if (target_type == TARGET_TYPE_AR900B
1509 || target_type == TARGET_TYPE_QCA9984
1510 || target_type == TARGET_TYPE_QCA9888) {
1511
1512 /* for AR9980_2.0, 300 mhz clock is used, right now we assume
1513 * this would be supplied through module parameters,
1514 * if not supplied assumed default or same behavior as 1.0.
1515 * Assume 1.0 clock can't be tuned, reset to defaults
1516 */
1517
1518 qdf_print(KERN_INFO"%s: setting the target pll frac %x intval %x\n",
1519 __func__, frac, intval);
1520
1521 /* do not touch frac, and int val, let them be default -1,
1522 * if desired, host can supply these through module params
1523 */
1524 if (frac != -1 || intval != -1) {
1525 uint32_t flag2_value = 0;
1526 uint32_t flag2_targ_addr;
1527
1528 flag2_targ_addr =
1529 host_interest_item_address(target_type,
1530 offsetof(struct host_interest_s,
1531 hi_clock_info));
1532 hif_diag_read_access(hif_hdl,
1533 flag2_targ_addr, &flag2_value);
1534 qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1535 flag2_value);
1536 hif_diag_write_access(hif_hdl, flag2_value, frac);
1537 qdf_print("\n INT Val %x Address %x\n",
1538 intval, flag2_value + 4);
1539 hif_diag_write_access(hif_hdl,
1540 flag2_value + 4, intval);
1541 } else {
1542 qdf_print(KERN_INFO"%s: no frac provided, skipping pre-configuring PLL\n",
1543 __func__);
1544 }
1545
1546 /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1547 if ((target_type == TARGET_TYPE_AR900B)
1548 && (tgt_info->target_revision == AR900B_REV_2)
1549 && ar900b_20_targ_clk != -1) {
1550 uint32_t flag2_value = 0;
1551 uint32_t flag2_targ_addr;
1552
1553 flag2_targ_addr
1554 = host_interest_item_address(target_type,
1555 offsetof(struct host_interest_s,
1556 hi_desired_cpu_speed_hz));
1557 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1558 &flag2_value);
1559 qdf_print("\n ====> hi_desired_cpu_speed_hz Address %x\n",
1560 flag2_value);
1561 hif_diag_write_access(hif_hdl, flag2_value,
1562 ar900b_20_targ_clk/*300000000u*/);
1563 } else if (target_type == TARGET_TYPE_QCA9888) {
1564 uint32_t flag2_targ_addr;
1565
1566 if (200000000u != qca9888_20_targ_clk) {
1567 qca9888_20_targ_clk = 300000000u;
1568 /* Setting the target clock speed to 300 mhz */
1569 }
1570
1571 flag2_targ_addr
1572 = host_interest_item_address(target_type,
1573 offsetof(struct host_interest_s,
1574 hi_desired_cpu_speed_hz));
1575 hif_diag_write_access(hif_hdl, flag2_targ_addr,
1576 qca9888_20_targ_clk);
1577 } else {
1578 qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n",
1579 __func__);
1580 }
1581 } else {
1582 if (frac != -1 || intval != -1) {
1583 uint32_t flag2_value = 0;
1584 uint32_t flag2_targ_addr =
1585 host_interest_item_address(target_type,
1586 offsetof(struct host_interest_s,
1587 hi_clock_info));
1588 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1589 &flag2_value);
1590 qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1591 flag2_value);
1592 hif_diag_write_access(hif_hdl, flag2_value, frac);
1593 qdf_print("\n INT Val %x Address %x\n", intval,
1594 flag2_value + 4);
1595 hif_diag_write_access(hif_hdl, flag2_value + 4,
1596 intval);
1597 }
1598 }
1599}
1600
1601#else
1602
1603void hif_set_hia_extnd(struct hif_softc *scn)
1604{
1605}
1606
1607#endif
1608
Houston Hoffman854e67f2016-03-14 21:11:39 -07001609/**
1610 * hif_set_hia() - fill out the host interest area
1611 * @scn: hif context
1612 *
1613 * This is replaced by hif_wlan_enable for integrated targets.
1614 * This fills out the host interest area. The firmware will
1615 * process these memory addresses when it is first brought out
1616 * of reset.
1617 *
1618 * Return: 0 for success.
1619 */
1620int hif_set_hia(struct hif_softc *scn)
1621{
1622 QDF_STATUS rv;
1623 uint32_t interconnect_targ_addr = 0;
1624 uint32_t pcie_state_targ_addr = 0;
1625 uint32_t pipe_cfg_targ_addr = 0;
1626 uint32_t svc_to_pipe_map = 0;
1627 uint32_t pcie_config_flags = 0;
1628 uint32_t flag2_value = 0;
1629 uint32_t flag2_targ_addr = 0;
1630#ifdef QCA_WIFI_3_0
1631 uint32_t host_interest_area = 0;
1632 uint8_t i;
1633#else
1634 uint32_t ealloc_value = 0;
1635 uint32_t ealloc_targ_addr = 0;
1636 uint8_t banks_switched = 1;
1637 uint32_t chip_id;
1638#endif
1639 uint32_t pipe_cfg_addr;
1640 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1641 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1642 uint32_t target_type = tgt_info->target_type;
1643 int target_ce_config_sz, target_service_to_ce_map_sz;
1644 static struct CE_pipe_config *target_ce_config;
1645 struct service_to_pipe *target_service_to_ce_map;
1646
1647 HIF_TRACE("%s: E", __func__);
1648
1649 hif_get_target_ce_config(&target_ce_config, &target_ce_config_sz,
1650 &target_service_to_ce_map,
1651 &target_service_to_ce_map_sz,
1652 NULL, NULL);
1653
1654 if (ADRASTEA_BU)
1655 return QDF_STATUS_SUCCESS;
1656
1657#ifdef QCA_WIFI_3_0
1658 i = 0;
1659 while (i < HIF_HIA_MAX_POLL_LOOP) {
1660 host_interest_area = hif_read32_mb(scn->mem +
1661 A_SOC_CORE_SCRATCH_0_ADDRESS);
1662 if ((host_interest_area & 0x01) == 0) {
1663 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1664 host_interest_area = 0;
1665 i++;
1666 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1667 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1668 } else {
1669 host_interest_area &= (~0x01);
1670 hif_write32_mb(scn->mem + 0x113014, 0);
1671 break;
1672 }
1673 }
1674
1675 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1676 HIF_ERROR("%s: hia polling timeout", __func__);
1677 return -EIO;
1678 }
1679
1680 if (host_interest_area == 0) {
1681 HIF_ERROR("%s: host_interest_area = 0", __func__);
1682 return -EIO;
1683 }
1684
1685 interconnect_targ_addr = host_interest_area +
1686 offsetof(struct host_interest_area_t,
1687 hi_interconnect_state);
1688
1689 flag2_targ_addr = host_interest_area +
1690 offsetof(struct host_interest_area_t, hi_option_flag2);
1691
1692#else
1693 interconnect_targ_addr = hif_hia_item_address(target_type,
1694 offsetof(struct host_interest_s, hi_interconnect_state));
1695 ealloc_targ_addr = hif_hia_item_address(target_type,
1696 offsetof(struct host_interest_s, hi_early_alloc));
1697 flag2_targ_addr = hif_hia_item_address(target_type,
1698 offsetof(struct host_interest_s, hi_option_flag2));
1699#endif
1700 /* Supply Target-side CE configuration */
1701 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1702 &pcie_state_targ_addr);
1703 if (rv != QDF_STATUS_SUCCESS) {
1704 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1705 __func__, interconnect_targ_addr, rv);
1706 goto done;
1707 }
1708 if (pcie_state_targ_addr == 0) {
1709 rv = QDF_STATUS_E_FAILURE;
1710 HIF_ERROR("%s: pcie state addr is 0", __func__);
1711 goto done;
1712 }
1713 pipe_cfg_addr = pcie_state_targ_addr +
1714 offsetof(struct pcie_state_s,
1715 pipe_cfg_addr);
1716 rv = hif_diag_read_access(hif_hdl,
1717 pipe_cfg_addr,
1718 &pipe_cfg_targ_addr);
1719 if (rv != QDF_STATUS_SUCCESS) {
1720 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1721 __func__, pipe_cfg_addr, rv);
1722 goto done;
1723 }
1724 if (pipe_cfg_targ_addr == 0) {
1725 rv = QDF_STATUS_E_FAILURE;
1726 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1727 goto done;
1728 }
1729
1730 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1731 (uint8_t *) target_ce_config,
1732 target_ce_config_sz);
1733
1734 if (rv != QDF_STATUS_SUCCESS) {
1735 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1736 goto done;
1737 }
1738
1739 rv = hif_diag_read_access(hif_hdl,
1740 pcie_state_targ_addr +
1741 offsetof(struct pcie_state_s,
1742 svc_to_pipe_map),
1743 &svc_to_pipe_map);
1744 if (rv != QDF_STATUS_SUCCESS) {
1745 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1746 goto done;
1747 }
1748 if (svc_to_pipe_map == 0) {
1749 rv = QDF_STATUS_E_FAILURE;
1750 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1751 goto done;
1752 }
1753
1754 rv = hif_diag_write_mem(hif_hdl,
1755 svc_to_pipe_map,
1756 (uint8_t *) target_service_to_ce_map,
1757 target_service_to_ce_map_sz);
1758 if (rv != QDF_STATUS_SUCCESS) {
1759 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1760 goto done;
1761 }
1762
1763 rv = hif_diag_read_access(hif_hdl,
1764 pcie_state_targ_addr +
1765 offsetof(struct pcie_state_s,
1766 config_flags),
1767 &pcie_config_flags);
1768 if (rv != QDF_STATUS_SUCCESS) {
1769 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1770 goto done;
1771 }
1772#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1773 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1774#else
1775 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1776#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1777 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1778#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1779 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1780#endif
1781 rv = hif_diag_write_mem(hif_hdl,
1782 pcie_state_targ_addr +
1783 offsetof(struct pcie_state_s,
1784 config_flags),
1785 (uint8_t *) &pcie_config_flags,
1786 sizeof(pcie_config_flags));
1787 if (rv != QDF_STATUS_SUCCESS) {
1788 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1789 goto done;
1790 }
1791
1792#ifndef QCA_WIFI_3_0
1793 /* configure early allocation */
1794 ealloc_targ_addr = hif_hia_item_address(target_type,
1795 offsetof(
1796 struct host_interest_s,
1797 hi_early_alloc));
1798
1799 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1800 &ealloc_value);
1801 if (rv != QDF_STATUS_SUCCESS) {
1802 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1803 goto done;
1804 }
1805
1806 /* 1 bank is switched to IRAM, except ROME 1.0 */
1807 ealloc_value |=
1808 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1809 HI_EARLY_ALLOC_MAGIC_MASK);
1810
1811 rv = hif_diag_read_access(hif_hdl,
1812 CHIP_ID_ADDRESS |
1813 RTC_SOC_BASE_ADDRESS, &chip_id);
1814 if (rv != QDF_STATUS_SUCCESS) {
1815 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1816 goto done;
1817 }
1818 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1819 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1820 switch (CHIP_ID_REVISION_GET(chip_id)) {
1821 case 0x2: /* ROME 1.3 */
1822 /* 2 banks are switched to IRAM */
1823 banks_switched = 2;
1824 break;
1825 case 0x4: /* ROME 2.1 */
1826 case 0x5: /* ROME 2.2 */
1827 banks_switched = 6;
1828 break;
1829 case 0x8: /* ROME 3.0 */
1830 case 0x9: /* ROME 3.1 */
1831 case 0xA: /* ROME 3.2 */
1832 banks_switched = 9;
1833 break;
1834 case 0x0: /* ROME 1.0 */
1835 case 0x1: /* ROME 1.1 */
1836 default:
1837 /* 3 banks are switched to IRAM */
1838 banks_switched = 3;
1839 break;
1840 }
1841 }
1842
1843 ealloc_value |=
1844 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1845 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1846
1847 rv = hif_diag_write_access(hif_hdl,
1848 ealloc_targ_addr,
1849 ealloc_value);
1850 if (rv != QDF_STATUS_SUCCESS) {
1851 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1852 goto done;
1853 }
1854#endif
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001855 if ((target_type == TARGET_TYPE_AR900B)
1856 || (target_type == TARGET_TYPE_QCA9984)
1857 || (target_type == TARGET_TYPE_QCA9888)
1858 || (target_type == TARGET_TYPE_QCA9888)) {
1859 hif_set_hia_extnd(scn);
1860 }
Houston Hoffman854e67f2016-03-14 21:11:39 -07001861
1862 /* Tell Target to proceed with initialization */
1863 flag2_targ_addr = hif_hia_item_address(target_type,
1864 offsetof(
1865 struct host_interest_s,
1866 hi_option_flag2));
1867
1868 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1869 &flag2_value);
1870 if (rv != QDF_STATUS_SUCCESS) {
1871 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1872 goto done;
1873 }
1874
1875 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1876 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1877 flag2_value);
1878 if (rv != QDF_STATUS_SUCCESS) {
1879 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1880 goto done;
1881 }
1882
1883 hif_wake_target_cpu(scn);
1884
1885done:
1886
1887 return rv;
1888}
1889
1890/**
Houston Hoffman108da402016-03-14 21:11:24 -07001891 * hif_bus_configure() - configure the pcie bus
1892 * @hif_sc: pointer to the hif context.
1893 *
1894 * return: 0 for success. nonzero for failure.
1895 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07001896int hif_pci_bus_configure(struct hif_softc *hif_sc)
Houston Hoffman108da402016-03-14 21:11:24 -07001897{
1898 int status = 0;
Houston Hoffman63777f22016-03-14 21:11:49 -07001899 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001900 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
1901 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Houston Hoffman108da402016-03-14 21:11:24 -07001902
1903 hif_ce_prepare_config(hif_sc);
1904
Houston Hoffman63777f22016-03-14 21:11:49 -07001905 /* initialize sleep state adjust variables */
1906 hif_state->sleep_timer_init = true;
1907 hif_state->keep_awake_count = 0;
1908 hif_state->fake_sleep = false;
1909 hif_state->sleep_ticks = 0;
1910
1911 qdf_timer_init(NULL, &hif_state->sleep_timer,
1912 hif_sleep_entry, (void *)hif_state,
1913 QDF_TIMER_TYPE_WAKE_APPS);
1914 hif_state->sleep_timer_init = true;
1915
Houston Hoffman108da402016-03-14 21:11:24 -07001916 if (ADRASTEA_BU) {
1917 status = hif_wlan_enable(hif_sc);
1918
1919 if (status) {
1920 HIF_ERROR("%s: hif_wlan_enable error = %d",
1921 __func__, status);
Houston Hoffman63777f22016-03-14 21:11:49 -07001922 goto timer_free;
Houston Hoffman108da402016-03-14 21:11:24 -07001923 }
1924 }
1925
1926 A_TARGET_ACCESS_LIKELY(hif_sc);
Houston Hoffmanf7718622016-03-14 21:11:37 -07001927
1928 if (CONFIG_ATH_PCIE_MAX_PERF ||
1929 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
1930 /* Force AWAKE forever/till the driver is loaded */
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001931 if (tgt_info->target_type != TARGET_TYPE_IPQ4019) {
1932 if (hif_pci_target_sleep_state_adjust(hif_sc,
1933 false, true) < 0) {
1934 status = -EACCES;
1935 goto disable_wlan;
1936 }
Houston Hoffmanf7718622016-03-14 21:11:37 -07001937 }
1938 }
1939
Houston Hoffman108da402016-03-14 21:11:24 -07001940 status = hif_config_ce(hif_sc);
1941 if (status)
1942 goto disable_wlan;
1943
1944 status = hif_set_hia(hif_sc);
1945 if (status)
1946 goto unconfig_ce;
1947
1948 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1949
1950 hif_register_bmi_callbacks(hif_sc);
1951
1952 status = hif_configure_irq(hif_sc);
1953 if (status < 0)
1954 goto unconfig_ce;
1955
1956 A_TARGET_ACCESS_UNLIKELY(hif_sc);
1957
1958 return status;
1959
1960unconfig_ce:
1961 hif_unconfig_ce(hif_sc);
1962disable_wlan:
1963 A_TARGET_ACCESS_UNLIKELY(hif_sc);
1964 if (ADRASTEA_BU)
1965 hif_wlan_disable(hif_sc);
1966
Houston Hoffman63777f22016-03-14 21:11:49 -07001967timer_free:
1968 qdf_timer_stop(&hif_state->sleep_timer);
1969 qdf_timer_free(&hif_state->sleep_timer);
1970 hif_state->sleep_timer_init = false;
1971
Houston Hoffman108da402016-03-14 21:11:24 -07001972 HIF_ERROR("%s: failed, status = %d", __func__, status);
1973 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001974}
1975
1976/**
1977 * hif_bus_close(): hif_bus_close
1978 *
1979 * Return: n/a
1980 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001981void hif_pci_close(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001982{
Houston Hoffman108da402016-03-14 21:11:24 -07001983 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07001984 hif_pm_runtime_close(hif_pci_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07001985 hif_ce_close(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986}
1987
1988#define BAR_NUM 0
1989
1990int hif_enable_pci(struct hif_pci_softc *sc,
1991 struct pci_dev *pdev,
1992 const struct pci_device_id *id)
1993{
1994 void __iomem *mem;
1995 int ret = 0;
1996 uint16_t device_id;
Komal Seelam644263d2016-02-22 20:45:49 +05301997 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001998
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301999 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2000 if (device_id != id->device) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002001 HIF_ERROR(
2002 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2003 __func__, device_id, id->device);
2004 /* pci link is down, so returing with error code */
2005 return -EIO;
2006 }
2007
2008 /* FIXME: temp. commenting out assign_resource
2009 * call for dev_attach to work on 2.6.38 kernel
2010 */
Amar Singhal901e33f2015-10-08 11:55:32 -07002011#if (!defined(__LINUX_ARM_ARCH__))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002012 if (pci_assign_resource(pdev, BAR_NUM)) {
2013 HIF_ERROR("%s: pci_assign_resource error", __func__);
2014 return -EIO;
2015 }
2016#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002017 if (pci_enable_device(pdev)) {
2018 HIF_ERROR("%s: pci_enable_device error",
2019 __func__);
2020 return -EIO;
2021 }
2022
2023 /* Request MMIO resources */
2024 ret = pci_request_region(pdev, BAR_NUM, "ath");
2025 if (ret) {
2026 HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2027 ret = -EIO;
2028 goto err_region;
2029 }
2030#ifdef CONFIG_ARM_LPAE
2031 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2032 * for 32 bits device also. */
2033 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2034 if (ret) {
2035 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2036 goto err_dma;
2037 }
2038 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2039 if (ret) {
2040 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2041 goto err_dma;
2042 }
2043#else
2044 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2045 if (ret) {
2046 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2047 goto err_dma;
2048 }
2049 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2050 if (ret) {
2051 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2052 __func__);
2053 goto err_dma;
2054 }
2055#endif
2056
2057 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2058
2059 /* Set bus master bit in PCI_COMMAND to enable DMA */
2060 pci_set_master(pdev);
2061
2062 /* Arrange for access to Target SoC registers. */
2063 mem = pci_iomap(pdev, BAR_NUM, 0);
2064 if (!mem) {
2065 HIF_ERROR("%s: PCI iomap error", __func__);
2066 ret = -EIO;
2067 goto err_iomap;
2068 }
2069 sc->mem = mem;
2070 sc->pdev = pdev;
2071 sc->dev = &pdev->dev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002072 sc->devid = id->device;
2073 sc->cacheline_sz = dma_get_cache_alignment();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074 ol_sc->mem = mem;
2075 sc->pci_enabled = true;
2076 return ret;
2077
2078err_iomap:
2079 pci_clear_master(pdev);
2080err_dma:
2081 pci_release_region(pdev, BAR_NUM);
2082err_region:
2083 pci_disable_device(pdev);
2084 return ret;
2085}
2086
2087void hif_disable_pci(struct hif_pci_softc *sc)
2088{
Komal Seelam644263d2016-02-22 20:45:49 +05302089 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2090
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002091 if (ol_sc == NULL) {
2092 HIF_ERROR("%s: ol_sc = NULL", __func__);
2093 return;
2094 }
2095 pci_set_drvdata(sc->pdev, NULL);
2096 hif_pci_device_reset(sc);
2097 pci_iounmap(sc->pdev, sc->mem);
2098 sc->mem = NULL;
2099 ol_sc->mem = NULL;
2100 pci_clear_master(sc->pdev);
2101 pci_release_region(sc->pdev, BAR_NUM);
2102 pci_disable_device(sc->pdev);
2103}
2104
2105int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2106{
2107 int ret = 0;
2108 int targ_awake_limit = 500;
2109#ifndef QCA_WIFI_3_0
2110 uint32_t fw_indicator;
2111#endif
Komal Seelam644263d2016-02-22 20:45:49 +05302112 struct hif_softc *scn = HIF_GET_SOFTC(sc);
2113
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002114 /*
2115 * Verify that the Target was started cleanly.*
2116 * The case where this is most likely is with an AUX-powered
2117 * Target and a Host in WoW mode. If the Host crashes,
2118 * loses power, or is restarted (without unloading the driver)
2119 * then the Target is left (aux) powered and running. On a
2120 * subsequent driver load, the Target is in an unexpected state.
2121 * We try to catch that here in order to reset the Target and
2122 * retry the probe.
2123 */
2124 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2125 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2126 while (!hif_targ_is_awake(scn, sc->mem)) {
2127 if (0 == targ_awake_limit) {
2128 HIF_ERROR("%s: target awake timeout", __func__);
2129 ret = -EAGAIN;
2130 goto end;
2131 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302132 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002133 targ_awake_limit--;
2134 }
2135
2136#if PCIE_BAR0_READY_CHECKING
2137 {
2138 int wait_limit = 200;
2139 /* Synchronization point: wait the BAR0 is configured */
2140 while (wait_limit-- &&
2141 !(hif_read32_mb(sc->mem +
2142 PCIE_LOCAL_BASE_ADDRESS +
2143 PCIE_SOC_RDY_STATUS_ADDRESS) \
2144 & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302145 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002146 }
2147 if (wait_limit < 0) {
2148 /* AR6320v1 doesn't support checking of BAR0 configuration,
2149 takes one sec to wait BAR0 ready */
2150 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2151 __func__);
2152 }
2153 }
2154#endif
2155
2156#ifndef QCA_WIFI_3_0
2157 fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
2158 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2159 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2160
2161 if (fw_indicator & FW_IND_INITIALIZED) {
2162 HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2163 __func__);
2164 ret = -EAGAIN;
2165 goto end;
2166 }
2167#endif
2168
2169end:
2170 return ret;
2171}
2172
Houston Hoffman3db96a42016-05-05 19:54:39 -07002173void wlan_tasklet_msi(unsigned long data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002174{
2175 struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2176 struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
Komal Seelam644263d2016-02-22 20:45:49 +05302177 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002178
Komal Seelam02cf2f82016-02-22 20:44:25 +05302179 if (scn->hif_init_done == false)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002180 goto irq_handled;
2181
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302182 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002183 goto irq_handled;
2184
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302185 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002186
2187 if (entry->id == HIF_MAX_TASKLET_NUM) {
2188 /* the last tasklet is for fw IRQ */
Komal Seelam02cf2f82016-02-22 20:44:25 +05302189 (irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +05302190 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002191 goto irq_handled;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302192 } else if (entry->id < scn->ce_count) {
2193 ce_per_engine_service(scn, entry->id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002194 } else {
2195 HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2196 __func__, entry->id);
2197 }
2198 return;
2199
2200irq_handled:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302201 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002202
2203}
2204
2205int hif_configure_msi(struct hif_pci_softc *sc)
2206{
2207 int ret = 0;
2208 int num_msi_desired;
2209 int rv = -1;
Komal Seelam644263d2016-02-22 20:45:49 +05302210 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002211
2212 HIF_TRACE("%s: E", __func__);
2213
2214 num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2215 if (num_msi_desired < 1) {
2216 HIF_ERROR("%s: MSI is not configured", __func__);
2217 return -EINVAL;
2218 }
2219
2220 if (num_msi_desired > 1) {
2221#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2222 rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2223 num_msi_desired);
2224#else
2225 rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2226#endif
2227 }
2228 HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2229 __func__, num_msi_desired, rv);
2230
2231 if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2232 int i;
2233
2234 sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302235 sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002236 (void *)sc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302237 sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002238 HIF_MAX_TASKLET_NUM;
2239 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2240 (unsigned long)&sc->tasklet_entries[
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302241 HIF_MAX_TASKLET_NUM-1]);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002242 ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2243 hif_pci_msi_fw_handler,
2244 IRQF_SHARED, "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302245 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002246 HIF_ERROR("%s: request_irq failed", __func__);
2247 goto err_intr;
2248 }
2249 for (i = 0; i <= scn->ce_count; i++) {
2250 sc->tasklet_entries[i].hif_handler = (void *)sc;
2251 sc->tasklet_entries[i].id = i;
2252 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2253 (unsigned long)&sc->tasklet_entries[i]);
2254 ret = request_irq((sc->pdev->irq +
2255 i + MSI_ASSIGN_CE_INITIAL),
2256 ce_per_engine_handler, IRQF_SHARED,
2257 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302258 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002259 HIF_ERROR("%s: request_irq failed", __func__);
2260 goto err_intr;
2261 }
2262 }
2263 } else if (rv > 0) {
2264 HIF_TRACE("%s: use single msi", __func__);
2265
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302266 ret = pci_enable_msi(sc->pdev);
2267 if (ret < 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002268 HIF_ERROR("%s: single MSI allocation failed",
2269 __func__);
2270 /* Try for legacy PCI line interrupts */
2271 sc->num_msi_intrs = 0;
2272 } else {
2273 sc->num_msi_intrs = 1;
2274 tasklet_init(&sc->intr_tq,
2275 wlan_tasklet, (unsigned long)sc);
2276 ret = request_irq(sc->pdev->irq,
2277 hif_pci_interrupt_handler,
2278 IRQF_SHARED, "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302279 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002280 HIF_ERROR("%s: request_irq failed", __func__);
2281 goto err_intr;
2282 }
2283 }
2284 } else {
2285 sc->num_msi_intrs = 0;
2286 ret = -EIO;
2287 HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2288 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302289 ret = pci_enable_msi(sc->pdev);
2290 if (ret < 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002291 HIF_ERROR("%s: single MSI interrupt allocation failed",
2292 __func__);
2293 /* Try for legacy PCI line interrupts */
2294 sc->num_msi_intrs = 0;
2295 } else {
2296 sc->num_msi_intrs = 1;
2297 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2298 ret = request_irq(sc->pdev->irq,
2299 hif_pci_interrupt_handler, IRQF_SHARED,
2300 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302301 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002302 HIF_ERROR("%s: request_irq failed", __func__);
2303 goto err_intr;
2304 }
2305 }
2306
2307 if (ret == 0) {
2308 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2309 PCIE_INTR_ENABLE_ADDRESS),
2310 HOST_GROUP0_MASK);
2311 hif_write32_mb(sc->mem +
2312 PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2313 PCIE_SOC_WAKE_RESET);
2314 }
2315 HIF_TRACE("%s: X, ret = %d", __func__, ret);
2316
2317 return ret;
2318
2319err_intr:
2320if (sc->num_msi_intrs >= 1)
2321 pci_disable_msi(sc->pdev);
2322 return ret;
2323}
2324
2325static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2326{
2327 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05302328 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002329
2330 HIF_TRACE("%s: E", __func__);
2331
2332 /* do notn support MSI or MSI IRQ failed */
2333 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2334 ret = request_irq(sc->pdev->irq,
2335 hif_pci_interrupt_handler, IRQF_SHARED,
2336 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302337 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002338 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2339 goto end;
2340 }
Houston Hoffman3db96a42016-05-05 19:54:39 -07002341 /* Use sc->irq instead of sc->pdev-irq
2342 platform_device pdev doesn't have an irq field */
2343 sc->irq = sc->pdev->irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002344 /* Use Legacy PCI Interrupts */
2345 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2346 PCIE_INTR_ENABLE_ADDRESS),
2347 HOST_GROUP0_MASK);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002348 hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2349 PCIE_INTR_ENABLE_ADDRESS));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002350 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002351 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002352end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302353 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002354 "%s: X, ret = %d", __func__, ret);
2355 return ret;
2356}
2357
2358/**
2359 * hif_nointrs(): disable IRQ
2360 *
2361 * This function stops interrupt(s)
2362 *
Komal Seelam644263d2016-02-22 20:45:49 +05302363 * @scn: struct hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002364 *
2365 * Return: none
2366 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002367void hif_pci_nointrs(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002368{
2369 int i;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302370 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2371 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002372
2373 if (scn->request_irq_done == false)
2374 return;
2375 if (sc->num_msi_intrs > 0) {
2376 /* MSI interrupt(s) */
2377 for (i = 0; i < sc->num_msi_intrs; i++) {
Houston Hoffman3db96a42016-05-05 19:54:39 -07002378 free_irq(sc->irq + i, sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002379 }
2380 sc->num_msi_intrs = 0;
2381 } else {
Houston Hoffman3db96a42016-05-05 19:54:39 -07002382 /* Legacy PCI line interrupt
2383 Use sc->irq instead of sc->pdev-irq
2384 platform_device pdev doesn't have an irq field */
2385 free_irq(sc->irq, sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002386 }
Komal Seelam02cf2f82016-02-22 20:44:25 +05302387 ce_unregister_irq(hif_state, 0xfff);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002388 scn->request_irq_done = false;
2389}
2390
2391/**
2392 * hif_disable_bus(): hif_disable_bus
2393 *
2394 * This function disables the bus
2395 *
2396 * @bdev: bus dev
2397 *
2398 * Return: none
2399 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002400void hif_pci_disable_bus(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002401{
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302402 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2403 struct pci_dev *pdev = sc->pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002404 void __iomem *mem;
2405
2406 /* Attach did not succeed, all resources have been
2407 * freed in error handler
2408 */
2409 if (!sc)
2410 return;
2411
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002412 if (ADRASTEA_BU) {
2413 hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2414 hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
2415 HOST_GROUP0_MASK);
2416 }
2417
Houston Hoffmanf241eb02016-05-10 17:07:36 -07002418 hif_pci_device_reset(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002419 mem = (void __iomem *)sc->mem;
2420 if (mem) {
2421 pci_disable_msi(pdev);
2422 hif_dump_pipe_debug_count(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002423 if (scn->athdiag_procfs_inited) {
2424 athdiag_procfs_remove();
2425 scn->athdiag_procfs_inited = false;
2426 }
2427 pci_set_drvdata(pdev, NULL);
2428 pci_iounmap(pdev, mem);
2429 scn->mem = NULL;
2430 pci_release_region(pdev, BAR_NUM);
2431 pci_clear_master(pdev);
2432 pci_disable_device(pdev);
2433 }
2434 HIF_INFO("%s: X", __func__);
2435}
2436
2437#define OL_ATH_PCI_PM_CONTROL 0x44
2438
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002439#ifdef FEATURE_RUNTIME_PM
Houston Hoffmancceec342015-11-11 11:37:20 -08002440/**
2441 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
2442 * @scn: hif context
2443 * @flag: prevent linkdown if true otherwise allow
2444 *
2445 * this api should only be called as part of bus prevent linkdown
2446 */
Komal Seelam644263d2016-02-22 20:45:49 +05302447static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002448{
Komal Seelam644263d2016-02-22 20:45:49 +05302449 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Komal Seelam5584a7c2016-02-24 19:22:48 +05302450 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Houston Hoffmancceec342015-11-11 11:37:20 -08002451
2452 if (flag)
Komal Seelam644263d2016-02-22 20:45:49 +05302453 hif_pm_runtime_prevent_suspend(hif_hdl,
2454 sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002455 else
Komal Seelam644263d2016-02-22 20:45:49 +05302456 hif_pm_runtime_allow_suspend(hif_hdl,
2457 sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002458}
2459#else
Komal Seelam644263d2016-02-22 20:45:49 +05302460static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002461{
2462}
2463#endif
2464
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002465#if defined(CONFIG_CNSS) && defined(CONFIG_PCI_MSM)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002466/**
2467 * hif_bus_prevent_linkdown(): allow or permit linkdown
2468 * @flag: true prevents linkdown, false allows
2469 *
2470 * Calls into the platform driver to vote against taking down the
2471 * pcie link.
2472 *
2473 * Return: n/a
2474 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002475void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002476{
2477 HIF_ERROR("wlan: %s pcie power collapse",
2478 (flag ? "disable" : "enable"));
Houston Hoffmancceec342015-11-11 11:37:20 -08002479 hif_runtime_prevent_linkdown(scn, flag);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002480 cnss_wlan_pm_control(flag);
2481}
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002482#else
Houston Hoffman4849fcc2016-05-05 15:42:35 -07002483void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002484{
2485 HIF_ERROR("wlan: %s pcie power collapse",
2486 (flag ? "disable" : "enable"));
2487 hif_runtime_prevent_linkdown(scn, flag);
2488}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002489#endif
2490
2491/**
2492 * hif_drain_tasklets(): wait untill no tasklet is pending
2493 * @scn: hif context
2494 *
2495 * Let running tasklets clear pending trafic.
2496 *
2497 * Return: 0 if no bottom half is in progress when it returns.
2498 * -EFAULT if it times out.
2499 */
Komal Seelam644263d2016-02-22 20:45:49 +05302500static inline int hif_drain_tasklets(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002501{
2502 uint32_t ce_drain_wait_cnt = 0;
2503
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302504 while (qdf_atomic_read(&scn->active_tasklet_cnt)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002505 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
2506 HIF_ERROR("%s: CE still not done with access",
2507 __func__);
2508
2509 return -EFAULT;
2510 }
2511 HIF_INFO("%s: Waiting for CE to finish access", __func__);
2512 msleep(10);
2513 }
2514 return 0;
2515}
2516
2517/**
2518 * hif_bus_suspend_link_up() - suspend the bus
2519 *
2520 * Configures the pci irq line as a wakeup source.
2521 *
2522 * Return: 0 for success and non-zero for failure
2523 */
Komal Seelam644263d2016-02-22 20:45:49 +05302524static int hif_bus_suspend_link_up(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002525{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002526 struct pci_dev *pdev;
2527 int status;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302528 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002529
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302530 if (!sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002531 return -EFAULT;
2532
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302533 pdev = sc->pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002534
2535 status = hif_drain_tasklets(scn);
2536 if (status != 0)
2537 return status;
2538
2539 if (unlikely(enable_irq_wake(pdev->irq))) {
2540 HIF_ERROR("%s: Fail to enable wake IRQ!", __func__);
2541 return -EINVAL;
2542 }
2543
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302544 hif_cancel_deferred_target_sleep(scn);
Houston Hoffmane61d4e12016-03-14 21:11:48 -07002545
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002546 return 0;
2547}
2548
2549/**
2550 * hif_bus_resume_link_up() - hif bus resume API
2551 *
2552 * This function disables the wakeup source.
2553 *
2554 * Return: 0 for success and non-zero for failure
2555 */
Komal Seelam644263d2016-02-22 20:45:49 +05302556static int hif_bus_resume_link_up(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002557{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002558 struct pci_dev *pdev;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302559 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002560
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302561 if (!sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002562 return -EFAULT;
2563
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302564 pdev = sc->pdev;
2565
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002566 if (!pdev) {
2567 HIF_ERROR("%s: pci_dev is null", __func__);
2568 return -EFAULT;
2569 }
2570
2571 if (unlikely(disable_irq_wake(pdev->irq))) {
2572 HIF_ERROR("%s: Fail to disable wake IRQ!", __func__);
2573 return -EFAULT;
2574 }
2575
2576 return 0;
2577}
2578
2579/**
2580 * hif_bus_suspend_link_down() - suspend the bus
2581 *
2582 * Suspends the hif layer taking care of draining recieve queues and
2583 * shutting down copy engines if needed. Ensures opy engine interrupts
2584 * are disabled when it returns. Prevents register access after it
2585 * returns.
2586 *
2587 * Return: 0 for success and non-zero for failure
2588 */
Komal Seelam644263d2016-02-22 20:45:49 +05302589static int hif_bus_suspend_link_down(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002590{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002591 struct pci_dev *pdev;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302592 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002593 int status = 0;
2594
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302595 pdev = sc->pdev;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302596
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002597 disable_irq(pdev->irq);
2598
2599 status = hif_drain_tasklets(scn);
2600 if (status != 0) {
2601 enable_irq(pdev->irq);
2602 return status;
2603 }
2604
2605 /* Stop the HIF Sleep Timer */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302606 hif_cancel_deferred_target_sleep(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002607
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302608 qdf_atomic_set(&scn->link_suspended, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002609
2610 return 0;
2611}
2612
2613/**
2614 * hif_bus_resume_link_down() - hif bus resume API
2615 *
2616 * This function resumes the bus reenabling interupts.
2617 *
2618 * Return: 0 for success and non-zero for failure
2619 */
Komal Seelam644263d2016-02-22 20:45:49 +05302620static int hif_bus_resume_link_down(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002621{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002622 struct pci_dev *pdev;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302623 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002624
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302625 if (!sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002626 return -EFAULT;
2627
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302628 pdev = sc->pdev;
2629
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002630 if (!pdev) {
2631 HIF_ERROR("%s: pci_dev is null", __func__);
2632 return -EFAULT;
2633 }
2634
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302635 qdf_atomic_set(&scn->link_suspended, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002636
2637 enable_irq(pdev->irq);
2638
2639 return 0;
2640}
2641
2642/**
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002643 * hif_pci_suspend(): prepare hif for suspend
Houston Hoffman1688fba2015-11-10 16:47:27 -08002644 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002645 * chose suspend type based on link suspend voting.
2646 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08002647 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002648 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002649int hif_pci_bus_suspend(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002650{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002651 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
Komal Seelamf8600682016-02-02 18:17:13 +05302652 return hif_bus_suspend_link_down(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002653 else
Komal Seelamf8600682016-02-02 18:17:13 +05302654 return hif_bus_suspend_link_up(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002655}
2656
2657/**
Houston Hoffman1688fba2015-11-10 16:47:27 -08002658 * hif_bus_resume(): prepare hif for resume
2659 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002660 * chose suspend type based on link suspend voting.
2661 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08002662 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002663 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002664int hif_pci_bus_resume(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002665{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002666 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
Komal Seelamf8600682016-02-02 18:17:13 +05302667 return hif_bus_resume_link_down(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002668 else
Komal Seelamf8600682016-02-02 18:17:13 +05302669 return hif_bus_resume_link_up(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002670}
2671
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002672#ifdef FEATURE_RUNTIME_PM
2673/**
2674 * __hif_runtime_pm_set_state(): utility function
2675 * @state: state to set
2676 *
2677 * indexes into the runtime pm state and sets it.
2678 */
Komal Seelam644263d2016-02-22 20:45:49 +05302679static void __hif_runtime_pm_set_state(struct hif_softc *scn,
Komal Seelamf8600682016-02-02 18:17:13 +05302680 enum hif_pm_runtime_state state)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002681{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002682 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002683
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002684 if (NULL == sc) {
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002685 HIF_ERROR("%s: HIF_CTX not initialized",
2686 __func__);
2687 return;
2688 }
2689
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302690 qdf_atomic_set(&sc->pm_state, state);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002691}
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002692
2693/**
2694 * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2695 *
2696 * Notify hif that a runtime pm opperation has started
2697 */
Komal Seelam644263d2016-02-22 20:45:49 +05302698static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002699{
Komal Seelamf8600682016-02-02 18:17:13 +05302700 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002701}
2702
2703/**
2704 * hif_runtime_pm_set_state_on(): adjust runtime pm state
2705 *
2706 * Notify hif that a the runtime pm state should be on
2707 */
Komal Seelam644263d2016-02-22 20:45:49 +05302708static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002709{
Komal Seelamf8600682016-02-02 18:17:13 +05302710 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002711}
2712
2713/**
2714 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
2715 *
2716 * Notify hif that a runtime suspend attempt has been completed successfully
2717 */
Komal Seelam644263d2016-02-22 20:45:49 +05302718static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002719{
Komal Seelamf8600682016-02-02 18:17:13 +05302720 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002721}
2722
Houston Hoffman692cc052015-11-10 18:42:47 -08002723/**
2724 * hif_log_runtime_suspend_success() - log a successful runtime suspend
2725 */
Komal Seelam644263d2016-02-22 20:45:49 +05302726static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002727{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002728 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002729 if (sc == NULL)
2730 return;
2731
2732 sc->pm_stats.suspended++;
2733 sc->pm_stats.suspend_jiffies = jiffies;
2734}
2735
2736/**
2737 * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2738 *
2739 * log a failed runtime suspend
2740 * mark last busy to prevent immediate runtime suspend
2741 */
Komal Seelamf8600682016-02-02 18:17:13 +05302742static void hif_log_runtime_suspend_failure(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002743{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002744 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002745 if (sc == NULL)
2746 return;
2747
2748 sc->pm_stats.suspend_err++;
Houston Hoffman692cc052015-11-10 18:42:47 -08002749}
2750
2751/**
2752 * hif_log_runtime_resume_success() - log a successful runtime resume
2753 *
2754 * log a successfull runtime resume
2755 * mark last busy to prevent immediate runtime suspend
2756 */
Komal Seelamf8600682016-02-02 18:17:13 +05302757static void hif_log_runtime_resume_success(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002758{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002759 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002760 if (sc == NULL)
2761 return;
2762
2763 sc->pm_stats.resumed++;
Houston Hoffman78467a82016-01-05 20:08:56 -08002764}
2765
2766/**
2767 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2768 *
2769 * Record the failure.
2770 * mark last busy to delay a retry.
2771 * adjust the runtime_pm state.
2772 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302773void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002774{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002775 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2776 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08002777
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002778 hif_log_runtime_suspend_failure(hif_ctx);
2779 if (hif_pci_sc != NULL)
2780 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2781 hif_runtime_pm_set_state_on(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002782}
2783
2784/**
2785 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2786 *
2787 * Makes sure that the pci link will be taken down by the suspend opperation.
2788 * If the hif layer is configured to leave the bus on, runtime suspend will
2789 * not save any power.
2790 *
2791 * Set the runtime suspend state to in progress.
2792 *
2793 * return -EINVAL if the bus won't go down. otherwise return 0
2794 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302795int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002796{
Komal Seelam644263d2016-02-22 20:45:49 +05302797 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2798
Komal Seelamf8600682016-02-02 18:17:13 +05302799 if (!hif_can_suspend_link(hif_ctx)) {
Houston Hoffman78467a82016-01-05 20:08:56 -08002800 HIF_ERROR("Runtime PM not supported for link up suspend");
2801 return -EINVAL;
2802 }
2803
Komal Seelam644263d2016-02-22 20:45:49 +05302804 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002805 return 0;
2806}
2807
2808/**
2809 * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2810 *
2811 * Record the success.
2812 * adjust the runtime_pm state
2813 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302814void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002815{
Komal Seelam644263d2016-02-22 20:45:49 +05302816 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2817
2818 hif_runtime_pm_set_state_suspended(scn);
2819 hif_log_runtime_suspend_success(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002820}
2821
2822/**
2823 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2824 *
2825 * update the runtime pm state.
2826 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302827void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002828{
Komal Seelam644263d2016-02-22 20:45:49 +05302829 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2830
2831 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002832}
2833
2834/**
2835 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2836 *
2837 * record the success.
2838 * adjust the runtime_pm state
2839 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302840void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002841{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002842 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2843 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08002844
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002845 hif_log_runtime_resume_success(hif_ctx);
2846 if (hif_pci_sc != NULL)
2847 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2848 hif_runtime_pm_set_state_on(scn);
Houston Hoffman692cc052015-11-10 18:42:47 -08002849}
2850#endif
2851
Houston Hoffman1688fba2015-11-10 16:47:27 -08002852/**
2853 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2854 *
2855 * Return: 0 for success and non-zero error code for failure
2856 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302857int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08002858{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002859 return hif_pci_bus_suspend(HIF_GET_SOFTC(hif_ctx));
Houston Hoffman1688fba2015-11-10 16:47:27 -08002860}
2861
Houston Hoffmanf4607852015-12-17 17:14:40 -08002862#ifdef WLAN_FEATURE_FASTPATH
2863/**
2864 * hif_fastpath_resume() - resume fastpath for runtimepm
2865 *
2866 * ensure that the fastpath write index register is up to date
2867 * since runtime pm may cause ce_send_fast to skip the register
2868 * write.
2869 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302870static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08002871{
Komal Seelam644263d2016-02-22 20:45:49 +05302872 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002873 struct CE_state *ce_state;
2874
2875 if (!scn)
2876 return;
2877
2878 if (scn->fastpath_mode_on) {
2879 if (Q_TARGET_ACCESS_BEGIN(scn)) {
2880 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302881 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002882
2883 /*war_ce_src_ring_write_idx_set */
2884 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2885 ce_state->src_ring->write_index);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302886 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002887 Q_TARGET_ACCESS_END(scn);
2888 }
2889 }
2890}
2891#else
Komal Seelam5584a7c2016-02-24 19:22:48 +05302892static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
Houston Hoffmanf4607852015-12-17 17:14:40 -08002893#endif
2894
2895
Houston Hoffman1688fba2015-11-10 16:47:27 -08002896/**
2897 * hif_runtime_resume() - do the bus resume part of a runtime resume
2898 *
2899 * Return: 0 for success and non-zero error code for failure
2900 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302901int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08002902{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002903 int status = hif_pci_bus_resume(HIF_GET_SOFTC(hif_ctx));
Houston Hoffmanf4607852015-12-17 17:14:40 -08002904
Komal Seelamf8600682016-02-02 18:17:13 +05302905 hif_fastpath_resume(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002906
2907 return status;
Houston Hoffman1688fba2015-11-10 16:47:27 -08002908}
2909
Komal Seelamaa72bb72016-02-01 17:22:50 +05302910#if CONFIG_PCIE_64BIT_MSI
Komal Seelam644263d2016-02-22 20:45:49 +05302911static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05302912{
2913 struct hif_pci_softc *sc = scn->hif_sc;
2914 struct hif_msi_info *info = &sc->msi_info;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302915 struct device *dev = scn->qdf_dev->dev;
Komal Seelamaa72bb72016-02-01 17:22:50 +05302916
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302917 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2918 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
Komal Seelamaa72bb72016-02-01 17:22:50 +05302919 info->magic = NULL;
2920 info->magic_dma = 0;
2921}
2922#else
Komal Seelam644263d2016-02-22 20:45:49 +05302923static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05302924{
2925}
2926#endif
2927
Houston Hoffman8f239f62016-03-14 21:12:05 -07002928void hif_pci_disable_isr(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002929{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302930 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002931
Komal Seelam644263d2016-02-22 20:45:49 +05302932 hif_nointrs(scn);
Komal Seelamaa72bb72016-02-01 17:22:50 +05302933 hif_free_msi_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002934 /* Cancel the pending tasklet */
Komal Seelam644263d2016-02-22 20:45:49 +05302935 ce_tasklet_kill(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002936 tasklet_kill(&sc->intr_tq);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302937 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002938}
2939
2940/* Function to reset SoC */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002941void hif_pci_reset_soc(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002942{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002943 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2944 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
Komal Seelam644263d2016-02-22 20:45:49 +05302945 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002946
2947#if defined(CPU_WARM_RESET_WAR)
2948 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
2949 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2950 * verified for AR9888_REV1
2951 */
Komal Seelam91553ce2016-01-27 18:57:10 +05302952 if (tgt_info->target_version == AR9888_REV2_VERSION)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002953 hif_pci_device_warm_reset(sc);
Komal Seelam91553ce2016-01-27 18:57:10 +05302954 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002955 hif_pci_device_reset(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002956#else
2957 hif_pci_device_reset(sc);
2958#endif
2959}
2960
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002961#ifdef CONFIG_PCI_MSM
2962static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2963{
2964 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2965 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2966}
2967#else
2968static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2969#endif
2970
Komal Seelambd7c51d2016-02-24 10:27:30 +05302971/**
2972 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2973 * @sc: HIF PCIe Context
2974 *
2975 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2976 *
2977 * Return: Failure to caller
2978 */
2979static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2980{
2981 uint16_t val;
2982 uint32_t bar;
2983 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2984 struct hif_softc *scn = HIF_GET_SOFTC(sc);
2985 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2986 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
Komal Seelam75080122016-03-02 15:18:25 +05302987 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
Komal Seelambd7c51d2016-02-24 10:27:30 +05302988 A_target_id_t pci_addr = scn->mem;
2989
2990 HIF_ERROR("%s: keep_awake_count = %d",
2991 __func__, hif_state->keep_awake_count);
2992
2993 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2994
2995 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
2996
2997 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2998
2999 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3000
3001 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3002
3003 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3004
3005 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3006
3007 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3008
3009 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3010
3011 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3012
3013 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3014 hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3015 PCIE_SOC_WAKE_ADDRESS));
3016
3017 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3018 hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3019 RTC_STATE_ADDRESS));
3020
3021 HIF_ERROR("%s:error, wakeup target", __func__);
3022 hif_msm_pcie_debug_info(sc);
3023
3024 if (!cfg->enable_self_recovery)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303025 QDF_BUG(0);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303026
3027 scn->recovery = true;
3028
3029 if (cbk->set_recovery_in_progress)
3030 cbk->set_recovery_in_progress(cbk->context, true);
3031
3032 cnss_wlan_pci_link_down();
3033 return -EACCES;
3034}
3035
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003036/*
3037 * For now, we use simple on-demand sleep/wake.
3038 * Some possible improvements:
3039 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3040 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3041 * Careful, though, these functions may be used by
3042 * interrupt handlers ("atomic")
3043 * -Don't use host_reg_table for this code; instead use values directly
3044 * -Use a separate timer to track activity and allow Target to sleep only
3045 * if it hasn't done anything for a while; may even want to delay some
3046 * processing for a short while in order to "batch" (e.g.) transmit
3047 * requests with completion processing into "windows of up time". Costs
3048 * some performance, but improves power utilization.
3049 * -On some platforms, it might be possible to eliminate explicit
3050 * sleep/wakeup. Instead, take a chance that each access works OK. If not,
3051 * recover from the failure by forcing the Target awake.
3052 * -Change keep_awake_count to an atomic_t in order to avoid spin lock
3053 * overhead in some cases. Perhaps this makes more sense when
3054 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3055 * disabled.
3056 * -It is possible to compile this code out and simply force the Target
3057 * to remain awake. That would yield optimal performance at the cost of
3058 * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3059 *
3060 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3061 */
3062/**
3063 * hif_target_sleep_state_adjust() - on-demand sleep/wake
Komal Seelam644263d2016-02-22 20:45:49 +05303064 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003065 * @sleep_ok: bool
3066 * @wait_for_it: bool
3067 *
3068 * Output the pipe error counts of each pipe to log file
3069 *
3070 * Return: int
3071 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07003072int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003073 bool sleep_ok, bool wait_for_it)
3074{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303075 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003076 A_target_id_t pci_addr = scn->mem;
3077 static int max_delay;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303078 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003079 static int debug;
3080
3081 if (scn->recovery)
3082 return -EACCES;
3083
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303084 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003085 HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3086 debug = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303087 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003088 return -EACCES;
3089 }
3090
3091 if (debug) {
3092 wait_for_it = true;
3093 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3094 __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303095 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003096 }
3097
3098 if (sleep_ok) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303099 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003100 hif_state->keep_awake_count--;
3101 if (hif_state->keep_awake_count == 0) {
3102 /* Allow sleep */
3103 hif_state->verified_awake = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303104 hif_state->sleep_ticks = qdf_system_ticks();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003105 }
3106 if (hif_state->fake_sleep == false) {
3107 /* Set the Fake Sleep */
3108 hif_state->fake_sleep = true;
3109
3110 /* Start the Sleep Timer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303111 qdf_timer_stop(&hif_state->sleep_timer);
3112 qdf_timer_start(&hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003113 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3114 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303115 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003116 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303117 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003118
3119 if (hif_state->fake_sleep) {
3120 hif_state->verified_awake = true;
3121 } else {
3122 if (hif_state->keep_awake_count == 0) {
3123 /* Force AWAKE */
3124 hif_write32_mb(pci_addr +
3125 PCIE_LOCAL_BASE_ADDRESS +
3126 PCIE_SOC_WAKE_ADDRESS,
3127 PCIE_SOC_WAKE_V_MASK);
3128 }
3129 }
3130 hif_state->keep_awake_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303131 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003132
3133 if (wait_for_it && !hif_state->verified_awake) {
3134#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
3135 int tot_delay = 0;
3136 int curr_delay = 5;
3137
3138 for (;; ) {
3139 if (hif_targ_is_awake(scn, pci_addr)) {
3140 hif_state->verified_awake = true;
3141 break;
3142 } else
3143 if (!hif_pci_targ_is_present
3144 (scn, pci_addr)) {
3145 break;
3146 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003147
Komal Seelambd7c51d2016-02-24 10:27:30 +05303148 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3149 return hif_log_soc_wakeup_timeout(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003150
3151 OS_DELAY(curr_delay);
3152 tot_delay += curr_delay;
3153
3154 if (curr_delay < 50)
3155 curr_delay += 5;
3156 }
3157
3158 /*
3159 * NB: If Target has to come out of Deep Sleep,
3160 * this may take a few Msecs. Typically, though
3161 * this delay should be <30us.
3162 */
3163 if (tot_delay > max_delay)
3164 max_delay = tot_delay;
3165 }
3166 }
3167
3168 if (debug && hif_state->verified_awake) {
3169 debug = 0;
3170 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3171 __func__,
3172 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3173 PCIE_INTR_ENABLE_ADDRESS),
3174 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3175 PCIE_INTR_CAUSE_ADDRESS),
3176 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3177 CPU_INTR_ADDRESS),
3178 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3179 PCIE_INTR_CLR_ADDRESS),
3180 hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
3181 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3182 }
3183
3184 return 0;
3185}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003186
3187#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303188uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003189{
3190 uint32_t value;
3191 void *addr;
3192
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003193 addr = scn->mem + offset;
Houston Hoffman56e0d702016-05-05 17:48:06 -07003194 value = hif_read32_mb(addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003195
3196 {
3197 unsigned long irq_flags;
3198 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3199
3200 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3201 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3202 pcie_access_log[idx].is_write = false;
3203 pcie_access_log[idx].addr = addr;
3204 pcie_access_log[idx].value = value;
3205 pcie_access_log_seqnum++;
3206 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3207 }
3208
3209 return value;
3210}
3211
3212void
Komal Seelam644263d2016-02-22 20:45:49 +05303213hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003214{
3215 void *addr;
3216
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003217 addr = scn->mem + (offset);
3218 hif_write32_mb(addr, value);
3219
3220 {
3221 unsigned long irq_flags;
3222 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3223
3224 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3225 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3226 pcie_access_log[idx].is_write = true;
3227 pcie_access_log[idx].addr = addr;
3228 pcie_access_log[idx].value = value;
3229 pcie_access_log_seqnum++;
3230 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3231 }
3232}
3233
3234/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003235 * hif_target_dump_access_log() - dump access log
3236 *
3237 * dump access log
3238 *
3239 * Return: n/a
3240 */
3241void hif_target_dump_access_log(void)
3242{
3243 int idx, len, start_idx, cur_idx;
3244 unsigned long irq_flags;
3245
3246 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3247 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3248 len = PCIE_ACCESS_LOG_NUM;
3249 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3250 } else {
3251 len = pcie_access_log_seqnum;
3252 start_idx = 0;
3253 }
3254
3255 for (idx = 0; idx < len; idx++) {
3256 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3257 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%p val:%u.",
3258 __func__, idx,
3259 pcie_access_log[cur_idx].seqnum,
3260 pcie_access_log[cur_idx].is_write,
3261 pcie_access_log[cur_idx].addr,
3262 pcie_access_log[cur_idx].value);
3263 }
3264
3265 pcie_access_log_seqnum = 0;
3266 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3267}
3268#endif
3269
Houston Hoffman3db96a42016-05-05 19:54:39 -07003270#ifndef HIF_AHB
3271int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3272{
3273 QDF_BUG(0);
3274 return -EINVAL;
3275}
3276#endif
3277
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003278/**
Houston Hoffman3db96a42016-05-05 19:54:39 -07003279 * hif_configure_irq() - configure interrupt
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003280 *
3281 * This function configures interrupt(s)
3282 *
3283 * @sc: PCIe control struct
3284 * @hif_hdl: struct HIF_CE_state
3285 *
3286 * Return: 0 - for success
3287 */
Komal Seelam644263d2016-02-22 20:45:49 +05303288int hif_configure_irq(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003289{
3290 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05303291 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003292
3293 HIF_TRACE("%s: E", __func__);
3294
Komal Seelamaa72bb72016-02-01 17:22:50 +05303295 hif_init_reschedule_tasklet_work(sc);
3296
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003297 if (ENABLE_MSI) {
3298 ret = hif_configure_msi(sc);
3299 if (ret == 0)
3300 goto end;
3301 }
3302 /* MSI failed. Try legacy irq */
Houston Hoffman3db96a42016-05-05 19:54:39 -07003303 switch (scn->target_info.target_type) {
3304 case TARGET_TYPE_IPQ4019:
3305 ret = hif_ahb_configure_legacy_irq(sc);
3306 break;
3307 default:
3308 ret = hif_pci_configure_legacy_irq(sc);
3309 break;
3310 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003311 if (ret < 0) {
3312 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3313 __func__, ret);
3314 return ret;
3315 }
3316end:
3317 scn->request_irq_done = true;
3318 return 0;
3319}
3320
3321/**
3322 * hif_target_sync() : ensure the target is ready
3323 * @scn: hif controll structure
3324 *
3325 * Informs fw that we plan to use legacy interupts so that
3326 * it can begin booting. Ensures that the fw finishes booting
3327 * before continuing. Should be called before trying to write
3328 * to the targets other registers for the first time.
3329 *
3330 * Return: none
3331 */
Komal Seelam644263d2016-02-22 20:45:49 +05303332void hif_target_sync(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003333{
3334 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3335 PCIE_INTR_ENABLE_ADDRESS),
3336 PCIE_INTR_FIRMWARE_MASK);
3337
3338 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3339 PCIE_SOC_WAKE_ADDRESS,
3340 PCIE_SOC_WAKE_V_MASK);
3341 while (!hif_targ_is_awake(scn, scn->mem))
3342 ;
3343
3344 if (HAS_FW_INDICATOR) {
3345 int wait_limit = 500;
3346 int fw_ind = 0;
3347 HIF_TRACE("%s: Loop checking FW signal", __func__);
3348 while (1) {
Komal Seelam02cf2f82016-02-22 20:44:25 +05303349 fw_ind = hif_read32_mb(scn->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003350 FW_INDICATOR_ADDRESS);
3351 if (fw_ind & FW_IND_INITIALIZED)
3352 break;
3353 if (wait_limit-- < 0)
3354 break;
3355 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3356 PCIE_INTR_ENABLE_ADDRESS),
3357 PCIE_INTR_FIRMWARE_MASK);
3358
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303359 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003360 }
3361 if (wait_limit < 0)
3362 HIF_TRACE("%s: FW signal timed out",
3363 __func__);
3364 else
3365 HIF_TRACE("%s: Got FW signal, retries = %x",
3366 __func__, 500-wait_limit);
3367 }
3368 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3369 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3370}
3371
3372/**
3373 * hif_enable_bus(): enable bus
3374 *
3375 * This function enables the bus
3376 *
3377 * @ol_sc: soft_sc struct
3378 * @dev: device pointer
3379 * @bdev: bus dev pointer
3380 * bid: bus id pointer
3381 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303382 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003383 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07003384QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003385 struct device *dev, void *bdev,
3386 const hif_bus_id *bid,
3387 enum hif_enable_type type)
3388{
3389 int ret = 0;
3390 uint32_t hif_type, target_type;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303391 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
Komal Seelam5584a7c2016-02-24 19:22:48 +05303392 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003393 uint16_t revision_id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003394 int probe_again = 0;
3395 struct pci_dev *pdev = bdev;
Houston Hoffmanf303f912016-03-14 21:11:42 -07003396 const struct pci_device_id *id = (const struct pci_device_id *)bid;
Komal Seelam91553ce2016-01-27 18:57:10 +05303397 struct hif_target_info *tgt_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003398
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003399 if (!ol_sc) {
3400 HIF_ERROR("%s: hif_ctx is NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303401 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003402 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003403
Komal Seelambd7c51d2016-02-24 10:27:30 +05303404 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3405 __func__, hif_get_conparam(ol_sc), id->device);
3406
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003407 sc->pdev = pdev;
3408 sc->dev = &pdev->dev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003409 sc->devid = id->device;
3410 sc->cacheline_sz = dma_get_cache_alignment();
Komal Seelam644263d2016-02-22 20:45:49 +05303411 tgt_info = hif_get_target_info_handle(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003412again:
3413 ret = hif_enable_pci(sc, pdev, id);
3414 if (ret < 0) {
3415 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3416 __func__, ret);
3417 goto err_enable_pci;
3418 }
3419 HIF_TRACE("%s: hif_enable_pci done", __func__);
3420
3421 /* Temporary FIX: disable ASPM on peregrine.
3422 * Will be removed after the OTP is programmed
3423 */
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07003424 hif_disable_power_gating(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003425
3426 device_disable_async_suspend(&pdev->dev);
3427 pci_read_config_word(pdev, 0x08, &revision_id);
3428
3429 ret = hif_get_device_type(id->device, revision_id,
3430 &hif_type, &target_type);
3431 if (ret < 0) {
3432 HIF_ERROR("%s: invalid device id/revision_id", __func__);
3433 goto err_tgtstate;
3434 }
3435 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3436 __func__, hif_type, target_type);
3437
Komal Seelam02cf2f82016-02-22 20:44:25 +05303438 hif_register_tbl_attach(ol_sc, hif_type);
3439 target_register_tbl_attach(ol_sc, target_type);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003440
3441 ret = hif_pci_probe_tgt_wakeup(sc);
3442 if (ret < 0) {
3443 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3444 __func__, ret);
3445 if (ret == -EAGAIN)
3446 probe_again++;
3447 goto err_tgtstate;
3448 }
3449 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3450
Komal Seelam91553ce2016-01-27 18:57:10 +05303451 tgt_info->target_type = target_type;
3452
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003453 sc->soc_pcie_bar0 = pci_resource_start(pdev, BAR_NUM);
3454 if (!sc->soc_pcie_bar0) {
3455 HIF_ERROR("%s: ERROR - cannot get CE BAR0", __func__);
3456 ret = -EIO;
3457 goto err_tgtstate;
3458 }
3459 ol_sc->mem_pa = sc->soc_pcie_bar0;
3460
3461 BUG_ON(pci_get_drvdata(sc->pdev) != NULL);
3462 pci_set_drvdata(sc->pdev, sc);
3463
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003464 hif_target_sync(ol_sc);
3465 return 0;
3466
3467err_tgtstate:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003468 hif_disable_pci(sc);
3469 sc->pci_enabled = false;
3470 HIF_ERROR("%s: error, hif_disable_pci done", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303471 return QDF_STATUS_E_ABORTED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003472
3473err_enable_pci:
3474 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3475 int delay_time;
3476
3477 HIF_INFO("%s: pci reprobe", __func__);
3478 /* 10, 40, 90, 100, 100, ... */
3479 delay_time = max(100, 10 * (probe_again * probe_again));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303480 qdf_mdelay(delay_time);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003481 goto again;
3482 }
3483 return ret;
3484}
3485
3486/**
Houston Hoffman8f239f62016-03-14 21:12:05 -07003487 * hif_pci_irq_enable() - ce_irq_enable
3488 * @scn: hif_softc
3489 * @ce_id: ce_id
3490 *
3491 * Return: void
3492 */
3493void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3494{
3495 uint32_t tmp = 1 << ce_id;
3496 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3497
3498 qdf_spin_lock_irqsave(&sc->irq_lock);
3499 scn->ce_irq_summary &= ~tmp;
3500 if (scn->ce_irq_summary == 0) {
3501 /* Enable Legacy PCI line interrupts */
3502 if (LEGACY_INTERRUPTS(sc) &&
Komal Seelam6ee55902016-04-11 17:11:07 +05303503 (scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman8f239f62016-03-14 21:12:05 -07003504 (!qdf_atomic_read(&scn->link_suspended))) {
3505
3506 hif_write32_mb(scn->mem +
3507 (SOC_CORE_BASE_ADDRESS |
3508 PCIE_INTR_ENABLE_ADDRESS),
3509 HOST_GROUP0_MASK);
3510
3511 hif_read32_mb(scn->mem +
3512 (SOC_CORE_BASE_ADDRESS |
3513 PCIE_INTR_ENABLE_ADDRESS));
3514 }
3515 }
3516 if (scn->hif_init_done == true)
3517 Q_TARGET_ACCESS_END(scn);
3518 qdf_spin_unlock_irqrestore(&sc->irq_lock);
3519
3520 /* check for missed firmware crash */
3521 hif_fw_interrupt_handler(0, scn);
3522}
3523/**
3524 * hif_pci_irq_disable() - ce_irq_disable
3525 * @scn: hif_softc
3526 * @ce_id: ce_id
3527 *
3528 * Return: void
3529 */
3530void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3531{
3532 /* For Rome only need to wake up target */
3533 /* target access is maintained untill interrupts are re-enabled */
3534 Q_TARGET_ACCESS_BEGIN(scn);
3535}
3536
Houston Hoffman9078a152015-11-02 16:15:02 -08003537#ifdef FEATURE_RUNTIME_PM
Houston Hoffmanf4607852015-12-17 17:14:40 -08003538
Komal Seelam5584a7c2016-02-24 19:22:48 +05303539void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08003540{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303541 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003542
Houston Hoffmanf4607852015-12-17 17:14:40 -08003543 if (NULL == sc)
3544 return;
3545
3546 sc->pm_stats.runtime_get++;
3547 pm_runtime_get_noresume(sc->dev);
3548}
3549
Houston Hoffman9078a152015-11-02 16:15:02 -08003550/**
3551 * hif_pm_runtime_get() - do a get opperation on the device
3552 *
3553 * A get opperation will prevent a runtime suspend untill a
3554 * corresponding put is done. This api should be used when sending
3555 * data.
3556 *
3557 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3558 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3559 *
3560 * return: success if the bus is up and a get has been issued
3561 * otherwise an error code.
3562 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303563int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003564{
Komal Seelam644263d2016-02-22 20:45:49 +05303565 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303566 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003567 int ret;
3568 int pm_state;
3569
3570 if (NULL == scn) {
3571 HIF_ERROR("%s: Could not do runtime get, scn is null",
3572 __func__);
3573 return -EFAULT;
3574 }
Houston Hoffman9078a152015-11-02 16:15:02 -08003575
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303576 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08003577
3578 if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
3579 pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3580 sc->pm_stats.runtime_get++;
3581 ret = __hif_pm_runtime_get(sc->dev);
3582
3583 /* Get can return 1 if the device is already active, just return
3584 * success in that case
3585 */
3586 if (ret > 0)
3587 ret = 0;
3588
3589 if (ret)
3590 hif_pm_runtime_put(hif_ctx);
3591
3592 if (ret && ret != -EINPROGRESS) {
3593 sc->pm_stats.runtime_get_err++;
3594 HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303595 __func__, qdf_atomic_read(&sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003596 }
3597
3598 return ret;
3599 }
3600
3601 sc->pm_stats.request_resume++;
3602 sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3603 ret = hif_pm_request_resume(sc->dev);
3604
3605 return -EAGAIN;
3606}
3607
3608/**
3609 * hif_pm_runtime_put() - do a put opperation on the device
3610 *
3611 * A put opperation will allow a runtime suspend after a corresponding
3612 * get was done. This api should be used when sending data.
3613 *
3614 * This api will return a failure if runtime pm is stopped
3615 * This api will return failure if it would decrement the usage count below 0.
3616 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303617 * return: QDF_STATUS_SUCCESS if the put is performed
Houston Hoffman9078a152015-11-02 16:15:02 -08003618 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303619int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003620{
Komal Seelam644263d2016-02-22 20:45:49 +05303621 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303622 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003623 int pm_state, usage_count;
3624 unsigned long flags;
3625 char *error = NULL;
3626
3627 if (NULL == scn) {
3628 HIF_ERROR("%s: Could not do runtime put, scn is null",
3629 __func__);
3630 return -EFAULT;
3631 }
Houston Hoffman9078a152015-11-02 16:15:02 -08003632 usage_count = atomic_read(&sc->dev->power.usage_count);
3633
3634 if (usage_count == 1) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303635 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08003636
3637 if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3638 error = "Ignoring unexpected put when runtime pm is disabled";
3639
3640 } else if (usage_count == 0) {
3641 error = "PUT Without a Get Operation";
3642 }
3643
3644 if (error) {
3645 spin_lock_irqsave(&sc->runtime_lock, flags);
3646 hif_pci_runtime_pm_warn(sc, error);
3647 spin_unlock_irqrestore(&sc->runtime_lock, flags);
3648 return -EINVAL;
3649 }
3650
3651 sc->pm_stats.runtime_put++;
3652
3653 hif_pm_runtime_mark_last_busy(sc->dev);
3654 hif_pm_runtime_put_auto(sc->dev);
3655
3656 return 0;
3657}
3658
3659
3660/**
3661 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol reason
3662 * @hif_sc: pci context
3663 * @lock: runtime_pm lock being acquired
3664 *
3665 * Return 0 if successful.
3666 */
3667static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
3668 *hif_sc, struct hif_pm_runtime_lock *lock)
3669{
3670 int ret = 0;
3671
3672 /*
3673 * We shouldn't be setting context->timeout to zero here when
3674 * context is active as we will have a case where Timeout API's
3675 * for the same context called back to back.
3676 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
3677 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
3678 * API to ensure the timeout version is no more active and
3679 * list entry of this context will be deleted during allow suspend.
3680 */
3681 if (lock->active)
3682 return 0;
3683
3684 ret = __hif_pm_runtime_get(hif_sc->dev);
3685
3686 /**
3687 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
3688 * RPM_SUSPENDING. Any other negative value is an error.
3689 * We shouldn't be do runtime_put here as in later point allow
3690 * suspend gets called with the the context and there the usage count
3691 * is decremented, so suspend will be prevented.
3692 */
3693
3694 if (ret < 0 && ret != -EINPROGRESS) {
3695 hif_sc->pm_stats.runtime_get_err++;
3696 hif_pci_runtime_pm_warn(hif_sc,
3697 "Prevent Suspend Runtime PM Error");
3698 }
3699
3700 hif_sc->prevent_suspend_cnt++;
3701
3702 lock->active = true;
3703
3704 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
3705
3706 hif_sc->pm_stats.prevent_suspend++;
3707
3708 HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303709 qdf_atomic_read(&hif_sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003710
3711 return ret;
3712}
3713
3714static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
3715 struct hif_pm_runtime_lock *lock)
3716{
3717 int ret = 0;
3718 int usage_count;
3719
3720 if (hif_sc->prevent_suspend_cnt == 0)
3721 return ret;
3722
3723 if (!lock->active)
3724 return ret;
3725
3726 usage_count = atomic_read(&hif_sc->dev->power.usage_count);
3727
3728 /*
3729 * During Driver unload, platform driver increments the usage
3730 * count to prevent any runtime suspend getting called.
3731 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
3732 * usage_count should be one. Ideally this shouldn't happen as
3733 * context->active should be active for allow suspend to happen
3734 * Handling this case here to prevent any failures.
3735 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303736 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
Houston Hoffman9078a152015-11-02 16:15:02 -08003737 && usage_count == 1) || usage_count == 0) {
3738 hif_pci_runtime_pm_warn(hif_sc,
3739 "Allow without a prevent suspend");
3740 return -EINVAL;
3741 }
3742
3743 list_del(&lock->list);
3744
3745 hif_sc->prevent_suspend_cnt--;
3746
3747 lock->active = false;
3748 lock->timeout = 0;
3749
3750 hif_pm_runtime_mark_last_busy(hif_sc->dev);
3751 ret = hif_pm_runtime_put_auto(hif_sc->dev);
3752
3753 HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303754 qdf_atomic_read(&hif_sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003755
3756 hif_sc->pm_stats.allow_suspend++;
3757 return ret;
3758}
3759
3760/**
3761 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
3762 * @data: calback data that is the pci context
3763 *
3764 * if runtime locks are aquired with a timeout, this function releases
3765 * the locks when the last runtime lock expires.
3766 *
3767 * dummy implementation until lock acquisition is implemented.
3768 */
3769static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
3770{
3771 struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
3772 unsigned long flags;
3773 unsigned long timer_expires;
3774 struct hif_pm_runtime_lock *context, *temp;
3775
3776 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3777
3778 timer_expires = hif_sc->runtime_timer_expires;
3779
3780 /* Make sure we are not called too early, this should take care of
3781 * following case
3782 *
3783 * CPU0 CPU1 (timeout function)
3784 * ---- ----------------------
3785 * spin_lock_irq
3786 * timeout function called
3787 *
3788 * mod_timer()
3789 *
3790 * spin_unlock_irq
3791 * spin_lock_irq
3792 */
3793 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
3794 hif_sc->runtime_timer_expires = 0;
3795 list_for_each_entry_safe(context, temp,
3796 &hif_sc->prevent_suspend_list, list) {
3797 if (context->timeout) {
3798 __hif_pm_runtime_allow_suspend(hif_sc, context);
3799 hif_sc->pm_stats.allow_suspend_timeout++;
3800 }
3801 }
3802 }
3803
3804 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3805}
3806
Komal Seelam5584a7c2016-02-24 19:22:48 +05303807int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08003808 struct hif_pm_runtime_lock *data)
3809{
Komal Seelam644263d2016-02-22 20:45:49 +05303810 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
3811 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08003812 struct hif_pm_runtime_lock *context = data;
3813 unsigned long flags;
3814
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003815 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08003816 return 0;
3817
3818 if (!context)
3819 return -EINVAL;
3820
3821 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3822 context->timeout = 0;
3823 __hif_pm_runtime_prevent_suspend(hif_sc, context);
3824 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3825
3826 return 0;
3827}
3828
Komal Seelam5584a7c2016-02-24 19:22:48 +05303829int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
Komal Seelam644263d2016-02-22 20:45:49 +05303830 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08003831{
Komal Seelam644263d2016-02-22 20:45:49 +05303832 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
3833 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08003834 struct hif_pm_runtime_lock *context = data;
3835
3836 unsigned long flags;
3837
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003838 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08003839 return 0;
3840
3841 if (!context)
3842 return -EINVAL;
3843
3844 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3845
3846 __hif_pm_runtime_allow_suspend(hif_sc, context);
3847
3848 /* The list can be empty as well in cases where
3849 * we have one context in the list and the allow
3850 * suspend came before the timer expires and we delete
3851 * context above from the list.
3852 * When list is empty prevent_suspend count will be zero.
3853 */
3854 if (hif_sc->prevent_suspend_cnt == 0 &&
3855 hif_sc->runtime_timer_expires > 0) {
3856 del_timer(&hif_sc->runtime_timer);
3857 hif_sc->runtime_timer_expires = 0;
3858 }
3859
3860 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3861
3862 return 0;
3863}
3864
3865/**
3866 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
3867 * @ol_sc: HIF context
3868 * @lock: which lock is being acquired
3869 * @delay: Timeout in milliseconds
3870 *
3871 * Prevent runtime suspend with a timeout after which runtime suspend would be
3872 * allowed. This API uses a single timer to allow the suspend and timer is
3873 * modified if the timeout is changed before timer fires.
3874 * If the timeout is less than autosuspend_delay then use mark_last_busy instead
3875 * of starting the timer.
3876 *
3877 * It is wise to try not to use this API and correct the design if possible.
3878 *
3879 * Return: 0 on success and negative error code on failure
3880 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303881int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08003882 struct hif_pm_runtime_lock *lock, unsigned int delay)
3883{
Komal Seelam644263d2016-02-22 20:45:49 +05303884 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
3885 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
3886
Houston Hoffman9078a152015-11-02 16:15:02 -08003887 int ret = 0;
3888 unsigned long expires;
3889 unsigned long flags;
3890 struct hif_pm_runtime_lock *context = lock;
3891
Komal Seelambd7c51d2016-02-24 10:27:30 +05303892 if (hif_is_load_or_unload_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08003893 HIF_ERROR("%s: Load/unload in progress, ignore!",
3894 __func__);
3895 return -EINVAL;
3896 }
3897
Komal Seelambd7c51d2016-02-24 10:27:30 +05303898 if (hif_is_recovery_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08003899 HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
3900 return -EINVAL;
3901 }
3902
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003903 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08003904 return 0;
3905
3906 if (!context)
3907 return -EINVAL;
3908
3909 /*
3910 * Don't use internal timer if the timeout is less than auto suspend
3911 * delay.
3912 */
3913 if (delay <= hif_sc->dev->power.autosuspend_delay) {
3914 hif_pm_request_resume(hif_sc->dev);
3915 hif_pm_runtime_mark_last_busy(hif_sc->dev);
3916 return ret;
3917 }
3918
3919 expires = jiffies + msecs_to_jiffies(delay);
3920 expires += !expires;
3921
3922 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3923
3924 context->timeout = delay;
3925 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
3926 hif_sc->pm_stats.prevent_suspend_timeout++;
3927
3928 /* Modify the timer only if new timeout is after already configured
3929 * timeout
3930 */
3931 if (time_after(expires, hif_sc->runtime_timer_expires)) {
3932 mod_timer(&hif_sc->runtime_timer, expires);
3933 hif_sc->runtime_timer_expires = expires;
3934 }
3935
3936 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3937
3938 HIF_ERROR("%s: pm_state: %d delay: %dms ret: %d\n", __func__,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303939 qdf_atomic_read(&hif_sc->pm_state), delay, ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003940
3941 return ret;
3942}
3943
3944/**
3945 * hif_runtime_lock_init() - API to initialize Runtime PM context
3946 * @name: Context name
3947 *
3948 * This API initalizes the Runtime PM context of the caller and
3949 * return the pointer.
3950 *
3951 * Return: void *
3952 */
3953struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name)
3954{
3955 struct hif_pm_runtime_lock *context;
3956
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303957 context = qdf_mem_malloc(sizeof(*context));
Houston Hoffman9078a152015-11-02 16:15:02 -08003958 if (!context) {
3959 HIF_ERROR("%s: No memory for Runtime PM wakelock context\n",
3960 __func__);
3961 return NULL;
3962 }
3963
3964 context->name = name ? name : "Default";
3965 return context;
3966}
3967
3968/**
3969 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
3970 * @data: Runtime PM context
3971 *
3972 * Return: void
3973 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303974void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
Komal Seelam644263d2016-02-22 20:45:49 +05303975 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08003976{
3977 unsigned long flags;
3978 struct hif_pm_runtime_lock *context = data;
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003979 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003980
3981 if (!sc)
3982 return;
3983
3984 if (!context)
3985 return;
3986
3987 /*
3988 * Ensure to delete the context list entry and reduce the usage count
3989 * before freeing the context if context is active.
3990 */
3991 spin_lock_irqsave(&sc->runtime_lock, flags);
3992 __hif_pm_runtime_allow_suspend(sc, context);
3993 spin_unlock_irqrestore(&sc->runtime_lock, flags);
3994
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303995 qdf_mem_free(context);
Houston Hoffman9078a152015-11-02 16:15:02 -08003996}
3997
3998#endif /* FEATURE_RUNTIME_PM */