blob: ec8d72f3050517b689ab4d81f33ea54eee3af101 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/if_arp.h>
32#ifdef CONFIG_PCI_MSM
33#include <linux/msm_pcie.h>
34#endif
35#include "hif_io32.h"
36#include "if_pci.h"
37#include "hif.h"
38#include "hif_main.h"
Houston Hoffman63777f22016-03-14 21:11:49 -070039#include "ce_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "ce_api.h"
41#include "ce_internal.h"
42#include "ce_reg.h"
Houston Hoffman108da402016-03-14 21:11:24 -070043#include "ce_bmi.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044#include "regtable.h"
Houston Hoffmanec93ab02016-05-03 20:09:55 -070045#include "hif_hw_version.h"
Houston Hoffman62aa58d2015-11-02 21:14:55 -080046#include <linux/debugfs.h>
47#include <linux/seq_file.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053048#include "qdf_status.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053049#include "qdf_atomic.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050#ifdef CONFIG_CNSS
51#include <net/cnss.h>
52#else
53#include "cnss_stub.h"
54#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080055#include "mp_dev.h"
56#include "hif_debug.h"
57
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080058#include "if_pci_internal.h"
59#include "icnss_stub.h"
60#include "ce_tasklet.h"
Houston Hoffmanf303f912016-03-14 21:11:42 -070061#include "targaddrs.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
Houston Hoffman32bc8eb2016-03-14 21:11:34 -070063#include "pci_api.h"
64
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065/* Maximum ms timeout for host to wake up target */
66#define PCIE_WAKE_TIMEOUT 1000
67#define RAMDUMP_EVENT_TIMEOUT 2500
68
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080069/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
70 * PCIe data bus error
71 * As workaround for this issue - changing the reset sequence to
72 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
73 */
74#define CPU_WARM_RESET_WAR
75/*
76 * Top-level interrupt handler for all PCI interrupts from a Target.
77 * When a block of MSI interrupts is allocated, this top-level handler
78 * is not used; instead, we directly call the correct sub-handler.
79 */
80struct ce_irq_reg_table {
81 uint32_t irq_enable;
82 uint32_t irq_status;
83};
84
Houston Hoffman06bc4f52015-12-16 18:43:34 -080085#if !defined(QCA_WIFI_3_0_ADRASTEA)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080086static inline void cnss_intr_notify_q6(void)
87{
88}
89#endif
90
Houston Hoffman06bc4f52015-12-16 18:43:34 -080091#if !defined(QCA_WIFI_3_0_ADRASTEA)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080092static inline void *cnss_get_target_smem(void)
93{
94 return NULL;
95}
96#endif
97
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080098#ifndef QCA_WIFI_3_0_ADRASTEA
99static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
100{
101 return;
102}
103#else
104void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
105{
Komal Seelam644263d2016-02-22 20:45:49 +0530106 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800107 unsigned int target_enable0, target_enable1;
108 unsigned int target_cause0, target_cause1;
109
110 target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
111 target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
112 target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
113 target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
114
115 if ((target_enable0 & target_cause0) ||
116 (target_enable1 & target_cause1)) {
117 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
118 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
119
120 if (scn->notice_send)
121 cnss_intr_notify_q6();
122 }
123}
124#endif
125
Houston Hoffman247f09b2016-04-06 21:21:40 -0700126/**
127 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
128 * @scn: scn
129 *
130 * Return: N/A
131 */
132static void pci_dispatch_interrupt(struct hif_softc *scn)
133{
134 uint32_t intr_summary;
135 int id;
136 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
137
138 if (scn->hif_init_done != true)
139 return;
140
141 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
142 return;
143
144 intr_summary = CE_INTERRUPT_SUMMARY(scn);
145
146 if (intr_summary == 0) {
Komal Seelam6ee55902016-04-11 17:11:07 +0530147 if ((scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman247f09b2016-04-06 21:21:40 -0700148 (!qdf_atomic_read(&scn->link_suspended))) {
149
150 hif_write32_mb(scn->mem +
151 (SOC_CORE_BASE_ADDRESS |
152 PCIE_INTR_ENABLE_ADDRESS),
153 HOST_GROUP0_MASK);
154
155 hif_read32_mb(scn->mem +
156 (SOC_CORE_BASE_ADDRESS |
157 PCIE_INTR_ENABLE_ADDRESS));
158 }
159 Q_TARGET_ACCESS_END(scn);
160 return;
161 } else {
162 Q_TARGET_ACCESS_END(scn);
163 }
164
165 scn->ce_irq_summary = intr_summary;
166 for (id = 0; intr_summary && (id < scn->ce_count); id++) {
167 if (intr_summary & (1 << id)) {
168 intr_summary &= ~(1 << id);
169 ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
170 }
171 }
172}
173
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800174static irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
175{
176 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530177 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530178 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800179 volatile int tmp;
180 uint16_t val;
181 uint32_t bar0;
182 uint32_t fw_indicator_address, fw_indicator;
183 bool ssr_irq = false;
184 unsigned int host_cause, host_enable;
185
186 if (LEGACY_INTERRUPTS(sc)) {
187 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
188 return IRQ_HANDLED;
189
190 if (ADRASTEA_BU) {
191 host_enable = hif_read32_mb(sc->mem +
192 PCIE_INTR_ENABLE_ADDRESS);
193 host_cause = hif_read32_mb(sc->mem +
194 PCIE_INTR_CAUSE_ADDRESS);
195 if (!(host_enable & host_cause)) {
196 hif_pci_route_adrastea_interrupt(sc);
197 return IRQ_HANDLED;
198 }
199 }
200
201 /* Clear Legacy PCI line interrupts
202 * IMPORTANT: INTR_CLR regiser has to be set
203 * after INTR_ENABLE is set to 0,
204 * otherwise interrupt can not be really cleared */
205 hif_write32_mb(sc->mem +
206 (SOC_CORE_BASE_ADDRESS |
207 PCIE_INTR_ENABLE_ADDRESS), 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800208
209 hif_write32_mb(sc->mem +
210 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
211 ADRASTEA_BU ?
212 (host_enable & host_cause) :
213 HOST_GROUP0_MASK);
214
215 if (ADRASTEA_BU)
216 hif_write32_mb(sc->mem + 0x2f100c , (host_cause >> 1));
217
218 /* IMPORTANT: this extra read transaction is required to
219 * flush the posted write buffer */
220 if (!ADRASTEA_BU) {
221 tmp =
222 hif_read32_mb(sc->mem +
223 (SOC_CORE_BASE_ADDRESS |
224 PCIE_INTR_ENABLE_ADDRESS));
225
226 if (tmp == 0xdeadbeef) {
227 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
228 __func__);
229
230 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
231 HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
232 __func__, val);
233
234 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
235 HIF_ERROR("%s: PCI Device ID = 0x%04x",
236 __func__, val);
237
238 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
239 HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
240 val);
241
242 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
243 HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
244 val);
245
246 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
247 &bar0);
248 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
249 bar0);
250
251 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
252 __func__,
253 hif_read32_mb(sc->mem +
254 PCIE_LOCAL_BASE_ADDRESS
255 + RTC_STATE_ADDRESS));
256 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
257 __func__,
258 hif_read32_mb(sc->mem +
259 PCIE_LOCAL_BASE_ADDRESS
260 + PCIE_SOC_WAKE_ADDRESS));
261 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
262 __func__,
263 hif_read32_mb(sc->mem + 0x80008),
264 hif_read32_mb(sc->mem + 0x8000c));
265 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
266 __func__,
267 hif_read32_mb(sc->mem + 0x80010),
268 hif_read32_mb(sc->mem + 0x80014));
269 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
270 __func__,
271 hif_read32_mb(sc->mem + 0x80018),
272 hif_read32_mb(sc->mem + 0x8001c));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530273 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800274 }
275
276 PCI_CLR_CAUSE0_REGISTER(sc);
277 }
278
279 if (HAS_FW_INDICATOR) {
280 fw_indicator_address = hif_state->fw_indicator_address;
281 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
282 if ((fw_indicator != ~0) &&
283 (fw_indicator & FW_IND_EVENT_PENDING))
284 ssr_irq = true;
285 }
286
287 if (Q_TARGET_ACCESS_END(scn) < 0)
288 return IRQ_HANDLED;
289 }
290 /* TBDXXX: Add support for WMAC */
291
292 if (ssr_irq) {
293 sc->irq_event = irq;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530294 qdf_atomic_set(&scn->tasklet_from_intr, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800295
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530296 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800297 tasklet_schedule(&sc->intr_tq);
298 } else {
Houston Hoffman247f09b2016-04-06 21:21:40 -0700299 pci_dispatch_interrupt(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800300 }
301
302 return IRQ_HANDLED;
303}
304
305static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
306{
307 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
308
Komal Seelam02cf2f82016-02-22 20:44:25 +0530309 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800310
311 return IRQ_HANDLED;
312}
313
Komal Seelam644263d2016-02-22 20:45:49 +0530314bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800315{
316 return 1; /* FIX THIS */
317}
318
319/**
320 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
Komal Seelam644263d2016-02-22 20:45:49 +0530321 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800322 *
323 * Return: void
324 */
325#if CONFIG_ATH_PCIE_MAX_PERF == 0
Komal Seelam644263d2016-02-22 20:45:49 +0530326void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800327{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530328 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800329 A_target_id_t pci_addr = scn->mem;
330
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530331 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800332 /*
333 * If the deferred sleep timer is running cancel it
334 * and put the soc into sleep.
335 */
336 if (hif_state->fake_sleep == true) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530337 qdf_timer_stop(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800338 if (hif_state->verified_awake == false) {
339 hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
340 PCIE_SOC_WAKE_ADDRESS,
341 PCIE_SOC_WAKE_RESET);
342 }
343 hif_state->fake_sleep = false;
344 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530345 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800346}
347#else
Komal Seelam644263d2016-02-22 20:45:49 +0530348inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800349{
350 return;
351}
352#endif
353
354#define A_PCIE_LOCAL_REG_READ(mem, addr) \
355 hif_read32_mb((char *)(mem) + \
356 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
357
358#define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
359 hif_write32_mb(((char *)(mem) + \
360 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
361
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700362#ifdef QCA_WIFI_3_0
363/**
364 * hif_targ_is_awake() - check to see if the target is awake
365 * @hif_ctx: hif context
366 *
367 * emulation never goes to sleep
368 *
369 * Return: true if target is awake
370 */
371bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
372{
373 return true;
374}
375#else
376/**
377 * hif_targ_is_awake() - check to see if the target is awake
378 * @hif_ctx: hif context
379 *
380 * Return: true if the targets clocks are on
381 */
382bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
383{
384 uint32_t val;
385
386 if (scn->recovery)
387 return false;
388 val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS
389 + RTC_STATE_ADDRESS);
390 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
391}
392#endif
393
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800394#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
395static void hif_pci_device_reset(struct hif_pci_softc *sc)
396{
397 void __iomem *mem = sc->mem;
398 int i;
399 uint32_t val;
Komal Seelam644263d2016-02-22 20:45:49 +0530400 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800401
402 if (!scn->hostdef)
403 return;
404
405 /* NB: Don't check resetok here. This form of reset
406 * is integral to correct operation. */
407
408 if (!SOC_GLOBAL_RESET_ADDRESS) {
409 return;
410 }
411
412 if (!mem) {
413 return;
414 }
415
416 HIF_ERROR("%s: Reset Device", __func__);
417
418 /*
419 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
420 * writing WAKE_V, the Target may scribble over Host memory!
421 */
422 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
423 PCIE_SOC_WAKE_V_MASK);
424 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
425 if (hif_targ_is_awake(scn, mem))
426 break;
427
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530428 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800429 }
430
431 /* Put Target, including PCIe, into RESET. */
432 val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
433 val |= 1;
434 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
435 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
436 if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
437 RTC_STATE_COLD_RESET_MASK)
438 break;
439
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530440 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800441 }
442
443 /* Pull Target, including PCIe, out of RESET. */
444 val &= ~1;
445 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
446 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
447 if (!
448 (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
449 RTC_STATE_COLD_RESET_MASK))
450 break;
451
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530452 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800453 }
454
455 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
456}
457
458/* CPU warm reset function
459 * Steps:
460 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
461 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
462 * correctly on WARM reset
463 * 3. Clear TARGET CPU LF timer interrupt
464 * 4. Reset all CEs to clear any pending CE tarnsactions
465 * 5. Warm reset CPU
466 */
467void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
468{
469 void __iomem *mem = sc->mem;
470 int i;
471 uint32_t val;
472 uint32_t fw_indicator;
Komal Seelam644263d2016-02-22 20:45:49 +0530473 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800474
475 /* NB: Don't check resetok here. This form of reset is
476 * integral to correct operation. */
477
478 if (!mem) {
479 return;
480 }
481
482 HIF_INFO_MED("%s: Target Warm Reset", __func__);
483
484 /*
485 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
486 * writing WAKE_V, the Target may scribble over Host memory!
487 */
488 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
489 PCIE_SOC_WAKE_V_MASK);
490 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
491 if (hif_targ_is_awake(scn, mem))
492 break;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530493 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800494 }
495
496 /*
497 * Disable Pending interrupts
498 */
499 val =
500 hif_read32_mb(mem +
501 (SOC_CORE_BASE_ADDRESS |
502 PCIE_INTR_CAUSE_ADDRESS));
503 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
504 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
505 /* Target CPU Intr Cause */
506 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
507 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
508
509 val =
510 hif_read32_mb(mem +
511 (SOC_CORE_BASE_ADDRESS |
512 PCIE_INTR_ENABLE_ADDRESS));
513 hif_write32_mb((mem +
514 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
515 hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
516 HOST_GROUP0_MASK);
517
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530518 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800519
520 /* Clear FW_INDICATOR_ADDRESS */
521 if (HAS_FW_INDICATOR) {
522 fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
523 hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
524 }
525
526 /* Clear Target LF Timer interrupts */
527 val =
528 hif_read32_mb(mem +
529 (RTC_SOC_BASE_ADDRESS +
530 SOC_LF_TIMER_CONTROL0_ADDRESS));
531 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
532 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
533 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
534 hif_write32_mb(mem +
535 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
536 val);
537
538 /* Reset CE */
539 val =
540 hif_read32_mb(mem +
541 (RTC_SOC_BASE_ADDRESS |
542 SOC_RESET_CONTROL_ADDRESS));
543 val |= SOC_RESET_CONTROL_CE_RST_MASK;
544 hif_write32_mb((mem +
545 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
546 val);
547 val =
548 hif_read32_mb(mem +
549 (RTC_SOC_BASE_ADDRESS |
550 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530551 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800552
553 /* CE unreset */
554 val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
555 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
556 val);
557 val =
558 hif_read32_mb(mem +
559 (RTC_SOC_BASE_ADDRESS |
560 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530561 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800562
563 /* Read Target CPU Intr Cause */
564 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
565 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
566 __func__, val);
567
568 /* CPU warm RESET */
569 val =
570 hif_read32_mb(mem +
571 (RTC_SOC_BASE_ADDRESS |
572 SOC_RESET_CONTROL_ADDRESS));
573 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
574 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
575 val);
576 val =
577 hif_read32_mb(mem +
578 (RTC_SOC_BASE_ADDRESS |
579 SOC_RESET_CONTROL_ADDRESS));
580 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
581 __func__, val);
582
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530583 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800584 HIF_INFO_MED("%s: Target Warm reset complete", __func__);
585
586}
587
588#ifndef QCA_WIFI_3_0
Komal Seelam5584a7c2016-02-24 19:22:48 +0530589int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800590{
Komal Seelam644263d2016-02-22 20:45:49 +0530591 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530592 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800593 void __iomem *mem = sc->mem;
594 uint32_t val;
595
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700596 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
597 return ATH_ISR_NOSCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800598 val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700599 if (Q_TARGET_ACCESS_END(scn) < 0)
600 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800601
602 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
603
604 if (val & FW_IND_HELPER)
605 return 0;
606
607 return 1;
608}
609#endif
610
Komal Seelam5584a7c2016-02-24 19:22:48 +0530611int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800612{
Komal Seelam644263d2016-02-22 20:45:49 +0530613 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800614 uint16_t device_id;
615 uint32_t val;
616 uint16_t timeout_count = 0;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530617 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800618
619 /* Check device ID from PCIe configuration space for link status */
620 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
621 if (device_id != sc->devid) {
622 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
623 __func__, device_id, sc->devid);
624 return -EACCES;
625 }
626
627 /* Check PCIe local register for bar/memory access */
628 val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
629 RTC_STATE_ADDRESS);
630 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
631
632 /* Try to wake up taget if it sleeps */
633 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
634 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
635 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
636 hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
637 PCIE_SOC_WAKE_ADDRESS));
638
639 /* Check if taget can be woken up */
640 while (!hif_targ_is_awake(scn, sc->mem)) {
641 if (timeout_count >= PCIE_WAKE_TIMEOUT) {
642 HIF_ERROR("%s: wake up timeout, %08x, %08x",
643 __func__,
644 hif_read32_mb(sc->mem +
645 PCIE_LOCAL_BASE_ADDRESS +
646 RTC_STATE_ADDRESS),
647 hif_read32_mb(sc->mem +
648 PCIE_LOCAL_BASE_ADDRESS +
649 PCIE_SOC_WAKE_ADDRESS));
650 return -EACCES;
651 }
652
653 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
654 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
655
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530656 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800657 timeout_count += 100;
658 }
659
660 /* Check Power register for SoC internal bus issues */
661 val =
662 hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
663 SOC_POWER_REG_OFFSET);
664 HIF_INFO_MED("%s: Power register is %08x", __func__, val);
665
666 return 0;
667}
668
Govind Singh2443fb32016-01-13 17:44:48 +0530669/**
Houston Hoffman3c017e72016-03-14 21:12:11 -0700670 * __hif_pci_dump_registers(): dump other PCI debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530671 * @scn: struct hif_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530672 *
Houston Hoffman3c017e72016-03-14 21:12:11 -0700673 * This function dumps pci debug registers. The parrent function
674 * dumps the copy engine registers before calling this function.
Govind Singh2443fb32016-01-13 17:44:48 +0530675 *
676 * Return: void
677 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700678static void __hif_pci_dump_registers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800679{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530680 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800681 void __iomem *mem = sc->mem;
682 uint32_t val, i, j;
683 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
684 uint32_t ce_base;
685
Houston Hoffmanbac94542016-03-14 21:11:59 -0700686 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
687 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800688
689 /* DEBUG_INPUT_SEL_SRC = 0x6 */
690 val =
691 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
692 WLAN_DEBUG_INPUT_SEL_OFFSET);
693 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
694 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
695 hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
696 val);
697
698 /* DEBUG_CONTROL_ENABLE = 0x1 */
699 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
700 WLAN_DEBUG_CONTROL_OFFSET);
701 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
702 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
703 hif_write32_mb(mem + GPIO_BASE_ADDRESS +
704 WLAN_DEBUG_CONTROL_OFFSET, val);
705
706 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
707 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
708 WLAN_DEBUG_INPUT_SEL_OFFSET),
709 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
710 WLAN_DEBUG_CONTROL_OFFSET));
711
712 HIF_INFO_MED("%s: Debug CE", __func__);
713 /* Loop CE debug output */
714 /* AMBA_DEBUG_BUS_SEL = 0xc */
715 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
716 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
717 val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
718 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
719
720 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
721 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
722 val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
723 CE_WRAPPER_DEBUG_OFFSET);
724 val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
725 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
726 hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
727 CE_WRAPPER_DEBUG_OFFSET, val);
728
729 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
730 __func__, wrapper_idx[i],
731 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
732 AMBA_DEBUG_BUS_OFFSET),
733 hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
734 CE_WRAPPER_DEBUG_OFFSET));
735
736 if (wrapper_idx[i] <= 7) {
737 for (j = 0; j <= 5; j++) {
738 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
739 /* For (j=0~5) write CE_DEBUG_SEL = j */
740 val =
741 hif_read32_mb(mem + ce_base +
742 CE_DEBUG_OFFSET);
743 val &= ~CE_DEBUG_SEL_MASK;
744 val |= CE_DEBUG_SEL_SET(j);
745 hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
746 val);
747
748 /* read (@gpio_athr_wlan_reg)
749 * WLAN_DEBUG_OUT_DATA */
750 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
751 WLAN_DEBUG_OUT_OFFSET);
752 val = WLAN_DEBUG_OUT_DATA_GET(val);
753
754 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
755 __func__, j,
756 hif_read32_mb(mem + ce_base +
757 CE_DEBUG_OFFSET), val);
758 }
759 } else {
760 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
761 val =
762 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
763 WLAN_DEBUG_OUT_OFFSET);
764 val = WLAN_DEBUG_OUT_DATA_GET(val);
765
766 HIF_INFO_MED("%s: out: %x", __func__, val);
767 }
768 }
769
770 HIF_INFO_MED("%s: Debug PCIe:", __func__);
771 /* Loop PCIe debug output */
772 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
773 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
774 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
775 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
776 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
777
778 for (i = 0; i <= 8; i++) {
779 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
780 val =
781 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
782 AMBA_DEBUG_BUS_OFFSET);
783 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
784 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
785 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
786 val);
787
788 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
789 val =
790 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
791 WLAN_DEBUG_OUT_OFFSET);
792 val = WLAN_DEBUG_OUT_DATA_GET(val);
793
794 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
795 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
796 WLAN_DEBUG_OUT_OFFSET), val,
797 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
798 WLAN_DEBUG_OUT_OFFSET));
799 }
800
Houston Hoffmanbac94542016-03-14 21:11:59 -0700801 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802}
803
Govind Singh2443fb32016-01-13 17:44:48 +0530804/**
805 * hif_dump_registers(): dump bus debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530806 * @scn: struct hif_opaque_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530807 *
808 * This function dumps hif bus debug registers
809 *
810 * Return: 0 for success or error code
811 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700812int hif_pci_dump_registers(struct hif_softc *hif_ctx)
Govind Singh2443fb32016-01-13 17:44:48 +0530813{
814 int status;
Komal Seelam644263d2016-02-22 20:45:49 +0530815 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Govind Singh2443fb32016-01-13 17:44:48 +0530816
817 status = hif_dump_ce_registers(scn);
818
819 if (status)
820 HIF_ERROR("%s: Dump CE Registers Failed", __func__);
821
Houston Hoffman3c017e72016-03-14 21:12:11 -0700822 /* dump non copy engine pci registers */
823 __hif_pci_dump_registers(scn);
Govind Singh2443fb32016-01-13 17:44:48 +0530824
825 return 0;
826}
827
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800828/*
829 * Handler for a per-engine interrupt on a PARTICULAR CE.
830 * This is used in cases where each CE has a private
831 * MSI interrupt.
832 */
833static irqreturn_t ce_per_engine_handler(int irq, void *arg)
834{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800835 int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
836
837 /*
838 * NOTE: We are able to derive CE_id from irq because we
839 * use a one-to-one mapping for CE's 0..5.
840 * CE's 6 & 7 do not use interrupts at all.
841 *
842 * This mapping must be kept in sync with the mapping
843 * used by firmware.
844 */
845
Komal Seelam02cf2f82016-02-22 20:44:25 +0530846 ce_per_engine_service(arg, CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800847
848 return IRQ_HANDLED;
849}
850
851#ifdef CONFIG_SLUB_DEBUG_ON
852
853/* worker thread to schedule wlan_tasklet in SLUB debug build */
Komal Seelamaa72bb72016-02-01 17:22:50 +0530854static void reschedule_tasklet_work_handler(void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800855{
Komal Seelamaa72bb72016-02-01 17:22:50 +0530856 struct hif_pci_softc *sc = arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530857 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800858
Komal Seelamaa72bb72016-02-01 17:22:50 +0530859 if (!scn) {
Komal Seelam644263d2016-02-22 20:45:49 +0530860 HIF_ERROR("%s: hif_softc is NULL\n", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800861 return;
862 }
863
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800864 if (scn->hif_init_done == false) {
865 HIF_ERROR("%s: wlan driver is unloaded", __func__);
866 return;
867 }
868
869 tasklet_schedule(&sc->intr_tq);
870 return;
871}
872
Komal Seelamaa72bb72016-02-01 17:22:50 +0530873/**
874 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
875 * work
876 * @sc: HIF PCI Context
877 *
878 * Return: void
879 */
880static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
881{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530882 qdf_create_work(0, &sc->reschedule_tasklet_work,
883 reschedule_tasklet_work_handler, NULL);
Komal Seelamaa72bb72016-02-01 17:22:50 +0530884}
885#else
886static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
887#endif /* CONFIG_SLUB_DEBUG_ON */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800888
889static void wlan_tasklet(unsigned long data)
890{
891 struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
Komal Seelam644263d2016-02-22 20:45:49 +0530892 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893
894 if (scn->hif_init_done == false)
895 goto end;
896
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530897 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898 goto end;
899
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800900 if (!ADRASTEA_BU) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800901 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +0530902 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800903 goto end;
904 }
905
906end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530907 qdf_atomic_set(&scn->tasklet_from_intr, 0);
908 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800909}
910
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800911#ifdef FEATURE_RUNTIME_PM
912#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
913 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
914
915/**
916 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
917 * @sc: hif_pci_softc context
918 * @msg: log message
919 *
920 * log runtime pm stats when something seems off.
921 *
922 * Return: void
923 */
924void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
925{
926 struct hif_pm_runtime_lock *ctx;
927
928 HIF_ERROR("%s: usage_count: %d, pm_state: %d, prevent_suspend_cnt: %d",
929 msg, atomic_read(&sc->dev->power.usage_count),
930 atomic_read(&sc->pm_state),
931 sc->prevent_suspend_cnt);
932
933 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
934 sc->dev->power.runtime_status,
935 sc->dev->power.runtime_error,
936 sc->dev->power.disable_depth,
937 sc->dev->power.autosuspend_delay);
938
939 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
940 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
941 sc->pm_stats.request_resume);
942
943 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
944 sc->pm_stats.allow_suspend,
945 sc->pm_stats.prevent_suspend);
946
947 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
948 sc->pm_stats.prevent_suspend_timeout,
949 sc->pm_stats.allow_suspend_timeout);
950
951 HIF_ERROR("Suspended: %u, resumed: %u count",
952 sc->pm_stats.suspended,
953 sc->pm_stats.resumed);
954
955 HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
956 sc->pm_stats.suspend_err,
957 sc->pm_stats.runtime_get_err);
958
959 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
960
961 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
962 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
963 }
964
965 WARN_ON(1);
966}
967
968/**
969 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
970 * @s: file to print to
971 * @data: unused
972 *
973 * debugging tool added to the debug fs for displaying runtimepm stats
974 *
975 * Return: 0
976 */
977static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
978{
979 struct hif_pci_softc *sc = s->private;
980 static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
981 "SUSPENDED"};
982 unsigned int msecs_age;
983 int pm_state = atomic_read(&sc->pm_state);
984 unsigned long timer_expires, flags;
985 struct hif_pm_runtime_lock *ctx;
986
987 seq_printf(s, "%30s: %s\n", "Runtime PM state",
988 autopm_state[pm_state]);
989 seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
990 sc->pm_stats.last_resume_caller);
991
992 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
993 msecs_age = jiffies_to_msecs(
994 jiffies - sc->pm_stats.suspend_jiffies);
995 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
996 msecs_age / 1000, msecs_age % 1000);
997 }
998
999 seq_printf(s, "%30s: %d\n", "PM Usage count",
1000 atomic_read(&sc->dev->power.usage_count));
1001
1002 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1003 sc->prevent_suspend_cnt);
1004
1005 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1006 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1007 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1008 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1009 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1010 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1011 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1012 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1013 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1014 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1015 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1016
1017 timer_expires = sc->runtime_timer_expires;
1018 if (timer_expires > 0) {
1019 msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1020 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1021 msecs_age / 1000, msecs_age % 1000);
1022 }
1023
1024 spin_lock_irqsave(&sc->runtime_lock, flags);
1025 if (list_empty(&sc->prevent_suspend_list)) {
1026 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1027 return 0;
1028 }
1029
1030 seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1031 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1032 seq_printf(s, "%s", ctx->name);
1033 if (ctx->timeout)
1034 seq_printf(s, "(%d ms)", ctx->timeout);
1035 seq_puts(s, " ");
1036 }
1037 seq_puts(s, "\n");
1038 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1039
1040 return 0;
1041}
1042#undef HIF_PCI_RUNTIME_PM_STATS
1043
1044/**
1045 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1046 * @inode
1047 * @file
1048 *
1049 * Return: linux error code of single_open.
1050 */
1051static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1052{
1053 return single_open(file, hif_pci_pm_runtime_debugfs_show,
1054 inode->i_private);
1055}
1056
1057#ifdef WLAN_OPEN_SOURCE
1058static const struct file_operations hif_pci_runtime_pm_fops = {
1059 .owner = THIS_MODULE,
1060 .open = hif_pci_runtime_pm_open,
1061 .release = single_release,
1062 .read = seq_read,
1063 .llseek = seq_lseek,
1064};
1065
1066/**
1067 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1068 * @sc: pci context
1069 *
1070 * creates a debugfs entry to debug the runtime pm feature.
1071 */
1072static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1073{
1074 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1075 S_IRUSR, NULL, sc,
1076 &hif_pci_runtime_pm_fops);
1077}
1078/**
1079 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1080 * @sc: pci context
1081 *
1082 * removes the debugfs entry to debug the runtime pm feature.
1083 */
1084static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1085{
1086 debugfs_remove(sc->pm_dentry);
1087}
1088#else
1089static inline void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1090{
1091}
1092static inline void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1093{
1094}
1095#endif
1096
Houston Hoffman9078a152015-11-02 16:15:02 -08001097static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001098
1099/**
1100 * hif_pm_runtime_start(): start the runtime pm
1101 * @sc: pci context
1102 *
1103 * After this call, runtime pm will be active.
1104 */
1105static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1106{
Komal Seelam644263d2016-02-22 20:45:49 +05301107 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001108 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001109
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001110 if (!ol_sc->hif_config.enable_runtime_pm) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001111 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1112 return;
1113 }
1114
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001115 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001116 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1117 __func__);
1118 return;
1119 }
1120
1121 setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1122 (unsigned long)sc);
1123
1124 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1125 ol_sc->runtime_pm_delay);
1126
1127 cnss_runtime_init(sc->dev, ol_sc->runtime_pm_delay);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301128 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001129 hif_runtime_pm_debugfs_create(sc);
1130}
1131
1132/**
1133 * hif_pm_runtime_stop(): stop runtime pm
1134 * @sc: pci context
1135 *
1136 * Turns off runtime pm and frees corresponding resources
1137 * that were acquired by hif_runtime_pm_start().
1138 */
1139static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1140{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001141 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Komal Seelambd7c51d2016-02-24 10:27:30 +05301142 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001143
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001144 if (!ol_sc->hif_config.enable_runtime_pm)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001145 return;
1146
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001147 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001148 return;
1149
1150 cnss_runtime_exit(sc->dev);
1151 cnss_pm_runtime_request(sc->dev, CNSS_PM_RUNTIME_RESUME);
1152
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301153 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001154
1155 hif_runtime_pm_debugfs_remove(sc);
1156 del_timer_sync(&sc->runtime_timer);
1157 /* doesn't wait for penting trafic unlike cld-2.0 */
1158}
1159
1160/**
1161 * hif_pm_runtime_open(): initialize runtime pm
1162 * @sc: pci data structure
1163 *
1164 * Early initialization
1165 */
1166static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1167{
1168 spin_lock_init(&sc->runtime_lock);
1169
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301170 qdf_atomic_init(&sc->pm_state);
Houston Hoffmancceec342015-11-11 11:37:20 -08001171 sc->prevent_linkdown_lock =
1172 hif_runtime_lock_init("linkdown suspend disabled");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301173 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001174 INIT_LIST_HEAD(&sc->prevent_suspend_list);
1175}
1176
1177/**
Houston Hoffman20968292016-03-23 17:55:47 -07001178 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1179 * @sc: pci context
1180 *
1181 * Ensure we have only one vote against runtime suspend before closing
1182 * the runtime suspend feature.
1183 *
1184 * all gets by the wlan driver should have been returned
1185 * one vote should remain as part of cnss_runtime_exit
1186 *
1187 * needs to be revisited if we share the root complex.
1188 */
1189static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1190{
1191 if (atomic_read(&sc->dev->power.usage_count) != 1)
1192 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1193
1194 /* ensure 1 and only 1 usage count so that when the wlan
1195 * driver is re-insmodded runtime pm won't be
1196 * disabled also ensures runtime pm doesn't get
1197 * broken on by being less than 1.
1198 */
1199 if (atomic_read(&sc->dev->power.usage_count) <= 0)
1200 atomic_set(&sc->dev->power.usage_count, 1);
1201 while (atomic_read(&sc->dev->power.usage_count) > 1)
1202 hif_pm_runtime_put_auto(sc->dev);
1203}
1204
1205/**
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001206 * hif_pm_runtime_close(): close runtime pm
1207 * @sc: pci bus handle
1208 *
1209 * ensure runtime_pm is stopped before closing the driver
1210 */
1211static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1212{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301213 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001214 return;
1215 else
1216 hif_pm_runtime_stop(sc);
Houston Hoffman20968292016-03-23 17:55:47 -07001217
1218 hif_pm_runtime_sanitize_on_exit(sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001219}
1220
1221
1222#else
1223
1224static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1225static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1226static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
Houston Hoffman53b34c42015-11-18 15:51:32 -08001227static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001228#endif
1229
1230/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001231 * hif_disable_power_gating() - disable HW power gating
1232 * @hif_ctx: hif context
1233 *
1234 * disables pcie L1 power states
1235 */
1236static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1237{
1238 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1239 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1240
1241 if (NULL == scn) {
1242 HIF_ERROR("%s: Could not disable ASPM scn is null",
1243 __func__);
1244 return;
1245 }
1246
1247 /* Disable ASPM when pkt log is enabled */
1248 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1249 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1250}
1251
1252/**
1253 * hif_enable_power_gating() - enable HW power gating
1254 * @hif_ctx: hif context
1255 *
1256 * enables pcie L1 power states
1257 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001258static void hif_enable_power_gating(struct hif_pci_softc *sc)
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001259{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001260 if (NULL == sc) {
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001261 HIF_ERROR("%s: Could not disable ASPM scn is null",
1262 __func__);
1263 return;
1264 }
1265
1266 /* Re-enable ASPM after firmware/OTP download is complete */
1267 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1268}
1269
1270/**
1271 * hif_enable_power_management() - enable power management
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001272 * @hif_ctx: hif context
1273 *
1274 * Currently only does runtime pm. Eventually this function could
1275 * consolidate other power state features such as only letting
1276 * the soc sleep after the driver finishes loading and re-enabling
1277 * aspm (hif_enable_power_gating).
1278 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001279void hif_pci_enable_power_management(struct hif_softc *hif_sc,
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001280 bool is_packet_log_enabled)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001281{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001282 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001283
Komal Seelam02cf2f82016-02-22 20:44:25 +05301284 if (pci_ctx == NULL) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001285 HIF_ERROR("%s, hif_ctx null", __func__);
1286 return;
1287 }
1288
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001289 hif_pm_runtime_start(pci_ctx);
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001290
1291 if (!is_packet_log_enabled)
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001292 hif_enable_power_gating(pci_ctx);
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001293
1294 if (!CONFIG_ATH_PCIE_MAX_PERF &&
1295 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
Houston Hoffman4ca03b62016-03-14 21:11:51 -07001296 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001297 HIF_ERROR("%s, failed to set target to sleep",
1298 __func__);
1299 }
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001300}
1301
Houston Hoffman53b34c42015-11-18 15:51:32 -08001302/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001303 * hif_disable_power_management() - disable power management
Houston Hoffman53b34c42015-11-18 15:51:32 -08001304 * @hif_ctx: hif context
1305 *
1306 * Currently disables runtime pm. Should be updated to behave
1307 * if runtime pm is not started. Should be updated to take care
1308 * of aspm and soc sleep for driver load.
1309 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001310void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
Houston Hoffman53b34c42015-11-18 15:51:32 -08001311{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301312 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman53b34c42015-11-18 15:51:32 -08001313
Komal Seelam02cf2f82016-02-22 20:44:25 +05301314 if (pci_ctx == NULL) {
Houston Hoffman53b34c42015-11-18 15:51:32 -08001315 HIF_ERROR("%s, hif_ctx null", __func__);
1316 return;
1317 }
1318
Houston Hoffman53b34c42015-11-18 15:51:32 -08001319 hif_pm_runtime_stop(pci_ctx);
1320}
1321
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001322#define ATH_PCI_PROBE_RETRY_MAX 3
1323/**
1324 * hif_bus_open(): hif_bus_open
1325 * @scn: scn
1326 * @bus_type: bus type
1327 *
1328 * Return: n/a
1329 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001330QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001331{
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001332 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001333
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001334 hif_ctx->bus_type = bus_type;
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001335 hif_pm_runtime_open(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001336
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301337 qdf_spinlock_create(&sc->irq_lock);
Houston Hoffman8a13e5c2015-10-29 16:12:09 -07001338
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001339 return hif_ce_open(hif_ctx);
Houston Hoffman108da402016-03-14 21:11:24 -07001340}
1341
1342#ifdef BMI_RSP_POLLING
1343#define BMI_RSP_CB_REGISTER 0
1344#else
1345#define BMI_RSP_CB_REGISTER 1
1346#endif
1347
1348/**
1349 * hif_register_bmi_callbacks() - register bmi callbacks
1350 * @hif_sc: hif context
1351 *
1352 * Bmi phase uses different copy complete callbacks than mission mode.
1353 */
1354static void hif_register_bmi_callbacks(struct hif_softc *hif_sc)
1355{
1356 struct HIF_CE_pipe_info *pipe_info;
1357 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1358
1359 /*
1360 * Initially, establish CE completion handlers for use with BMI.
1361 * These are overwritten with generic handlers after we exit BMI phase.
1362 */
1363 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1364 ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1365
1366 if (BMI_RSP_CB_REGISTER) {
1367 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1368 ce_recv_cb_register(
1369 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1370 }
1371}
1372
1373/**
Houston Hoffman854e67f2016-03-14 21:11:39 -07001374 * hif_wake_target_cpu() - wake the target's cpu
1375 * @scn: hif context
1376 *
1377 * Send an interrupt to the device to wake up the Target CPU
1378 * so it has an opportunity to notice any changed state.
1379 */
1380void hif_wake_target_cpu(struct hif_softc *scn)
1381{
1382 QDF_STATUS rv;
1383 uint32_t core_ctrl;
1384 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1385
1386 rv = hif_diag_read_access(hif_hdl,
1387 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1388 &core_ctrl);
1389 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1390 /* A_INUM_FIRMWARE interrupt to Target CPU */
1391 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1392
1393 rv = hif_diag_write_access(hif_hdl,
1394 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1395 core_ctrl);
1396 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1397}
1398
Houston Hoffman63777f22016-03-14 21:11:49 -07001399/**
1400 * soc_wake_reset() - allow the target to go to sleep
1401 * @scn: hif_softc
1402 *
1403 * Clear the force wake register. This is done by
1404 * hif_sleep_entry and cancel defered timer sleep.
1405 */
1406static void soc_wake_reset(struct hif_softc *scn)
1407{
1408 hif_write32_mb(scn->mem +
1409 PCIE_LOCAL_BASE_ADDRESS +
1410 PCIE_SOC_WAKE_ADDRESS,
1411 PCIE_SOC_WAKE_RESET);
1412}
1413
1414/**
1415 * hif_sleep_entry() - gate target sleep
1416 * @arg: hif context
1417 *
1418 * This function is the callback for the sleep timer.
1419 * Check if last force awake critical section was at least
1420 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
1421 * allow the target to go to sleep and cancel the sleep timer.
1422 * otherwise reschedule the sleep timer.
1423 */
1424static void hif_sleep_entry(void *arg)
1425{
1426 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1427 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1428 uint32_t idle_ms;
1429
1430 if (scn->recovery)
1431 return;
1432
1433 if (hif_is_driver_unloading(scn))
1434 return;
1435
1436 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1437 if (hif_state->verified_awake == false) {
1438 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1439 - hif_state->sleep_ticks);
1440 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1441 if (!qdf_atomic_read(&scn->link_suspended)) {
1442 soc_wake_reset(scn);
1443 hif_state->fake_sleep = false;
1444 }
1445 } else {
1446 qdf_timer_stop(&hif_state->sleep_timer);
1447 qdf_timer_start(&hif_state->sleep_timer,
1448 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1449 }
1450 } else {
1451 qdf_timer_stop(&hif_state->sleep_timer);
1452 qdf_timer_start(&hif_state->sleep_timer,
1453 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1454 }
1455 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1456}
1457
Houston Hoffman854e67f2016-03-14 21:11:39 -07001458#define HIF_HIA_MAX_POLL_LOOP 1000000
1459#define HIF_HIA_POLLING_DELAY_MS 10
1460
1461/**
1462 * hif_set_hia() - fill out the host interest area
1463 * @scn: hif context
1464 *
1465 * This is replaced by hif_wlan_enable for integrated targets.
1466 * This fills out the host interest area. The firmware will
1467 * process these memory addresses when it is first brought out
1468 * of reset.
1469 *
1470 * Return: 0 for success.
1471 */
1472int hif_set_hia(struct hif_softc *scn)
1473{
1474 QDF_STATUS rv;
1475 uint32_t interconnect_targ_addr = 0;
1476 uint32_t pcie_state_targ_addr = 0;
1477 uint32_t pipe_cfg_targ_addr = 0;
1478 uint32_t svc_to_pipe_map = 0;
1479 uint32_t pcie_config_flags = 0;
1480 uint32_t flag2_value = 0;
1481 uint32_t flag2_targ_addr = 0;
1482#ifdef QCA_WIFI_3_0
1483 uint32_t host_interest_area = 0;
1484 uint8_t i;
1485#else
1486 uint32_t ealloc_value = 0;
1487 uint32_t ealloc_targ_addr = 0;
1488 uint8_t banks_switched = 1;
1489 uint32_t chip_id;
1490#endif
1491 uint32_t pipe_cfg_addr;
1492 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1493 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1494 uint32_t target_type = tgt_info->target_type;
1495 int target_ce_config_sz, target_service_to_ce_map_sz;
1496 static struct CE_pipe_config *target_ce_config;
1497 struct service_to_pipe *target_service_to_ce_map;
1498
1499 HIF_TRACE("%s: E", __func__);
1500
1501 hif_get_target_ce_config(&target_ce_config, &target_ce_config_sz,
1502 &target_service_to_ce_map,
1503 &target_service_to_ce_map_sz,
1504 NULL, NULL);
1505
1506 if (ADRASTEA_BU)
1507 return QDF_STATUS_SUCCESS;
1508
1509#ifdef QCA_WIFI_3_0
1510 i = 0;
1511 while (i < HIF_HIA_MAX_POLL_LOOP) {
1512 host_interest_area = hif_read32_mb(scn->mem +
1513 A_SOC_CORE_SCRATCH_0_ADDRESS);
1514 if ((host_interest_area & 0x01) == 0) {
1515 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1516 host_interest_area = 0;
1517 i++;
1518 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1519 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1520 } else {
1521 host_interest_area &= (~0x01);
1522 hif_write32_mb(scn->mem + 0x113014, 0);
1523 break;
1524 }
1525 }
1526
1527 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1528 HIF_ERROR("%s: hia polling timeout", __func__);
1529 return -EIO;
1530 }
1531
1532 if (host_interest_area == 0) {
1533 HIF_ERROR("%s: host_interest_area = 0", __func__);
1534 return -EIO;
1535 }
1536
1537 interconnect_targ_addr = host_interest_area +
1538 offsetof(struct host_interest_area_t,
1539 hi_interconnect_state);
1540
1541 flag2_targ_addr = host_interest_area +
1542 offsetof(struct host_interest_area_t, hi_option_flag2);
1543
1544#else
1545 interconnect_targ_addr = hif_hia_item_address(target_type,
1546 offsetof(struct host_interest_s, hi_interconnect_state));
1547 ealloc_targ_addr = hif_hia_item_address(target_type,
1548 offsetof(struct host_interest_s, hi_early_alloc));
1549 flag2_targ_addr = hif_hia_item_address(target_type,
1550 offsetof(struct host_interest_s, hi_option_flag2));
1551#endif
1552 /* Supply Target-side CE configuration */
1553 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1554 &pcie_state_targ_addr);
1555 if (rv != QDF_STATUS_SUCCESS) {
1556 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1557 __func__, interconnect_targ_addr, rv);
1558 goto done;
1559 }
1560 if (pcie_state_targ_addr == 0) {
1561 rv = QDF_STATUS_E_FAILURE;
1562 HIF_ERROR("%s: pcie state addr is 0", __func__);
1563 goto done;
1564 }
1565 pipe_cfg_addr = pcie_state_targ_addr +
1566 offsetof(struct pcie_state_s,
1567 pipe_cfg_addr);
1568 rv = hif_diag_read_access(hif_hdl,
1569 pipe_cfg_addr,
1570 &pipe_cfg_targ_addr);
1571 if (rv != QDF_STATUS_SUCCESS) {
1572 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1573 __func__, pipe_cfg_addr, rv);
1574 goto done;
1575 }
1576 if (pipe_cfg_targ_addr == 0) {
1577 rv = QDF_STATUS_E_FAILURE;
1578 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1579 goto done;
1580 }
1581
1582 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1583 (uint8_t *) target_ce_config,
1584 target_ce_config_sz);
1585
1586 if (rv != QDF_STATUS_SUCCESS) {
1587 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1588 goto done;
1589 }
1590
1591 rv = hif_diag_read_access(hif_hdl,
1592 pcie_state_targ_addr +
1593 offsetof(struct pcie_state_s,
1594 svc_to_pipe_map),
1595 &svc_to_pipe_map);
1596 if (rv != QDF_STATUS_SUCCESS) {
1597 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1598 goto done;
1599 }
1600 if (svc_to_pipe_map == 0) {
1601 rv = QDF_STATUS_E_FAILURE;
1602 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1603 goto done;
1604 }
1605
1606 rv = hif_diag_write_mem(hif_hdl,
1607 svc_to_pipe_map,
1608 (uint8_t *) target_service_to_ce_map,
1609 target_service_to_ce_map_sz);
1610 if (rv != QDF_STATUS_SUCCESS) {
1611 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1612 goto done;
1613 }
1614
1615 rv = hif_diag_read_access(hif_hdl,
1616 pcie_state_targ_addr +
1617 offsetof(struct pcie_state_s,
1618 config_flags),
1619 &pcie_config_flags);
1620 if (rv != QDF_STATUS_SUCCESS) {
1621 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1622 goto done;
1623 }
1624#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1625 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1626#else
1627 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1628#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1629 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1630#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1631 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1632#endif
1633 rv = hif_diag_write_mem(hif_hdl,
1634 pcie_state_targ_addr +
1635 offsetof(struct pcie_state_s,
1636 config_flags),
1637 (uint8_t *) &pcie_config_flags,
1638 sizeof(pcie_config_flags));
1639 if (rv != QDF_STATUS_SUCCESS) {
1640 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1641 goto done;
1642 }
1643
1644#ifndef QCA_WIFI_3_0
1645 /* configure early allocation */
1646 ealloc_targ_addr = hif_hia_item_address(target_type,
1647 offsetof(
1648 struct host_interest_s,
1649 hi_early_alloc));
1650
1651 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1652 &ealloc_value);
1653 if (rv != QDF_STATUS_SUCCESS) {
1654 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1655 goto done;
1656 }
1657
1658 /* 1 bank is switched to IRAM, except ROME 1.0 */
1659 ealloc_value |=
1660 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1661 HI_EARLY_ALLOC_MAGIC_MASK);
1662
1663 rv = hif_diag_read_access(hif_hdl,
1664 CHIP_ID_ADDRESS |
1665 RTC_SOC_BASE_ADDRESS, &chip_id);
1666 if (rv != QDF_STATUS_SUCCESS) {
1667 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1668 goto done;
1669 }
1670 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1671 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1672 switch (CHIP_ID_REVISION_GET(chip_id)) {
1673 case 0x2: /* ROME 1.3 */
1674 /* 2 banks are switched to IRAM */
1675 banks_switched = 2;
1676 break;
1677 case 0x4: /* ROME 2.1 */
1678 case 0x5: /* ROME 2.2 */
1679 banks_switched = 6;
1680 break;
1681 case 0x8: /* ROME 3.0 */
1682 case 0x9: /* ROME 3.1 */
1683 case 0xA: /* ROME 3.2 */
1684 banks_switched = 9;
1685 break;
1686 case 0x0: /* ROME 1.0 */
1687 case 0x1: /* ROME 1.1 */
1688 default:
1689 /* 3 banks are switched to IRAM */
1690 banks_switched = 3;
1691 break;
1692 }
1693 }
1694
1695 ealloc_value |=
1696 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1697 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1698
1699 rv = hif_diag_write_access(hif_hdl,
1700 ealloc_targ_addr,
1701 ealloc_value);
1702 if (rv != QDF_STATUS_SUCCESS) {
1703 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1704 goto done;
1705 }
1706#endif
1707
1708 /* Tell Target to proceed with initialization */
1709 flag2_targ_addr = hif_hia_item_address(target_type,
1710 offsetof(
1711 struct host_interest_s,
1712 hi_option_flag2));
1713
1714 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1715 &flag2_value);
1716 if (rv != QDF_STATUS_SUCCESS) {
1717 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1718 goto done;
1719 }
1720
1721 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1722 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1723 flag2_value);
1724 if (rv != QDF_STATUS_SUCCESS) {
1725 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1726 goto done;
1727 }
1728
1729 hif_wake_target_cpu(scn);
1730
1731done:
1732
1733 return rv;
1734}
1735
1736/**
Houston Hoffman108da402016-03-14 21:11:24 -07001737 * hif_bus_configure() - configure the pcie bus
1738 * @hif_sc: pointer to the hif context.
1739 *
1740 * return: 0 for success. nonzero for failure.
1741 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07001742int hif_pci_bus_configure(struct hif_softc *hif_sc)
Houston Hoffman108da402016-03-14 21:11:24 -07001743{
1744 int status = 0;
Houston Hoffman63777f22016-03-14 21:11:49 -07001745 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07001746
1747 hif_ce_prepare_config(hif_sc);
1748
Houston Hoffman63777f22016-03-14 21:11:49 -07001749 /* initialize sleep state adjust variables */
1750 hif_state->sleep_timer_init = true;
1751 hif_state->keep_awake_count = 0;
1752 hif_state->fake_sleep = false;
1753 hif_state->sleep_ticks = 0;
1754
1755 qdf_timer_init(NULL, &hif_state->sleep_timer,
1756 hif_sleep_entry, (void *)hif_state,
1757 QDF_TIMER_TYPE_WAKE_APPS);
1758 hif_state->sleep_timer_init = true;
1759
Houston Hoffman108da402016-03-14 21:11:24 -07001760 if (ADRASTEA_BU) {
1761 status = hif_wlan_enable(hif_sc);
1762
1763 if (status) {
1764 HIF_ERROR("%s: hif_wlan_enable error = %d",
1765 __func__, status);
Houston Hoffman63777f22016-03-14 21:11:49 -07001766 goto timer_free;
Houston Hoffman108da402016-03-14 21:11:24 -07001767 }
1768 }
1769
1770 A_TARGET_ACCESS_LIKELY(hif_sc);
Houston Hoffmanf7718622016-03-14 21:11:37 -07001771
1772 if (CONFIG_ATH_PCIE_MAX_PERF ||
1773 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
1774 /* Force AWAKE forever/till the driver is loaded */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07001775 if (hif_pci_target_sleep_state_adjust(hif_sc, false, true)
1776 < 0) {
Houston Hoffmanf7718622016-03-14 21:11:37 -07001777 status = -EACCES;
1778 goto disable_wlan;
1779 }
1780 }
1781
Houston Hoffman108da402016-03-14 21:11:24 -07001782 status = hif_config_ce(hif_sc);
1783 if (status)
1784 goto disable_wlan;
1785
1786 status = hif_set_hia(hif_sc);
1787 if (status)
1788 goto unconfig_ce;
1789
1790 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1791
1792 hif_register_bmi_callbacks(hif_sc);
1793
1794 status = hif_configure_irq(hif_sc);
1795 if (status < 0)
1796 goto unconfig_ce;
1797
1798 A_TARGET_ACCESS_UNLIKELY(hif_sc);
1799
1800 return status;
1801
1802unconfig_ce:
1803 hif_unconfig_ce(hif_sc);
1804disable_wlan:
1805 A_TARGET_ACCESS_UNLIKELY(hif_sc);
1806 if (ADRASTEA_BU)
1807 hif_wlan_disable(hif_sc);
1808
Houston Hoffman63777f22016-03-14 21:11:49 -07001809timer_free:
1810 qdf_timer_stop(&hif_state->sleep_timer);
1811 qdf_timer_free(&hif_state->sleep_timer);
1812 hif_state->sleep_timer_init = false;
1813
Houston Hoffman108da402016-03-14 21:11:24 -07001814 HIF_ERROR("%s: failed, status = %d", __func__, status);
1815 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001816}
1817
1818/**
1819 * hif_bus_close(): hif_bus_close
1820 *
1821 * Return: n/a
1822 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001823void hif_pci_close(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001824{
Houston Hoffman108da402016-03-14 21:11:24 -07001825 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07001826 hif_pm_runtime_close(hif_pci_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07001827 hif_ce_close(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001828}
1829
1830#define BAR_NUM 0
1831
1832int hif_enable_pci(struct hif_pci_softc *sc,
1833 struct pci_dev *pdev,
1834 const struct pci_device_id *id)
1835{
1836 void __iomem *mem;
1837 int ret = 0;
1838 uint16_t device_id;
Komal Seelam644263d2016-02-22 20:45:49 +05301839 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301841 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1842 if (device_id != id->device) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001843 HIF_ERROR(
1844 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
1845 __func__, device_id, id->device);
1846 /* pci link is down, so returing with error code */
1847 return -EIO;
1848 }
1849
1850 /* FIXME: temp. commenting out assign_resource
1851 * call for dev_attach to work on 2.6.38 kernel
1852 */
Amar Singhal901e33f2015-10-08 11:55:32 -07001853#if (!defined(__LINUX_ARM_ARCH__))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001854 if (pci_assign_resource(pdev, BAR_NUM)) {
1855 HIF_ERROR("%s: pci_assign_resource error", __func__);
1856 return -EIO;
1857 }
1858#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001859 if (pci_enable_device(pdev)) {
1860 HIF_ERROR("%s: pci_enable_device error",
1861 __func__);
1862 return -EIO;
1863 }
1864
1865 /* Request MMIO resources */
1866 ret = pci_request_region(pdev, BAR_NUM, "ath");
1867 if (ret) {
1868 HIF_ERROR("%s: PCI MMIO reservation error", __func__);
1869 ret = -EIO;
1870 goto err_region;
1871 }
1872#ifdef CONFIG_ARM_LPAE
1873 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1874 * for 32 bits device also. */
1875 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1876 if (ret) {
1877 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
1878 goto err_dma;
1879 }
1880 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1881 if (ret) {
1882 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
1883 goto err_dma;
1884 }
1885#else
1886 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1887 if (ret) {
1888 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
1889 goto err_dma;
1890 }
1891 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1892 if (ret) {
1893 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
1894 __func__);
1895 goto err_dma;
1896 }
1897#endif
1898
1899 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1900
1901 /* Set bus master bit in PCI_COMMAND to enable DMA */
1902 pci_set_master(pdev);
1903
1904 /* Arrange for access to Target SoC registers. */
1905 mem = pci_iomap(pdev, BAR_NUM, 0);
1906 if (!mem) {
1907 HIF_ERROR("%s: PCI iomap error", __func__);
1908 ret = -EIO;
1909 goto err_iomap;
1910 }
1911 sc->mem = mem;
1912 sc->pdev = pdev;
1913 sc->dev = &pdev->dev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914 sc->devid = id->device;
1915 sc->cacheline_sz = dma_get_cache_alignment();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916 ol_sc->mem = mem;
1917 sc->pci_enabled = true;
1918 return ret;
1919
1920err_iomap:
1921 pci_clear_master(pdev);
1922err_dma:
1923 pci_release_region(pdev, BAR_NUM);
1924err_region:
1925 pci_disable_device(pdev);
1926 return ret;
1927}
1928
1929void hif_disable_pci(struct hif_pci_softc *sc)
1930{
Komal Seelam644263d2016-02-22 20:45:49 +05301931 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1932
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001933 if (ol_sc == NULL) {
1934 HIF_ERROR("%s: ol_sc = NULL", __func__);
1935 return;
1936 }
1937 pci_set_drvdata(sc->pdev, NULL);
1938 hif_pci_device_reset(sc);
1939 pci_iounmap(sc->pdev, sc->mem);
1940 sc->mem = NULL;
1941 ol_sc->mem = NULL;
1942 pci_clear_master(sc->pdev);
1943 pci_release_region(sc->pdev, BAR_NUM);
1944 pci_disable_device(sc->pdev);
1945}
1946
1947int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1948{
1949 int ret = 0;
1950 int targ_awake_limit = 500;
1951#ifndef QCA_WIFI_3_0
1952 uint32_t fw_indicator;
1953#endif
Komal Seelam644263d2016-02-22 20:45:49 +05301954 struct hif_softc *scn = HIF_GET_SOFTC(sc);
1955
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956 /*
1957 * Verify that the Target was started cleanly.*
1958 * The case where this is most likely is with an AUX-powered
1959 * Target and a Host in WoW mode. If the Host crashes,
1960 * loses power, or is restarted (without unloading the driver)
1961 * then the Target is left (aux) powered and running. On a
1962 * subsequent driver load, the Target is in an unexpected state.
1963 * We try to catch that here in order to reset the Target and
1964 * retry the probe.
1965 */
1966 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1967 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1968 while (!hif_targ_is_awake(scn, sc->mem)) {
1969 if (0 == targ_awake_limit) {
1970 HIF_ERROR("%s: target awake timeout", __func__);
1971 ret = -EAGAIN;
1972 goto end;
1973 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301974 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001975 targ_awake_limit--;
1976 }
1977
1978#if PCIE_BAR0_READY_CHECKING
1979 {
1980 int wait_limit = 200;
1981 /* Synchronization point: wait the BAR0 is configured */
1982 while (wait_limit-- &&
1983 !(hif_read32_mb(sc->mem +
1984 PCIE_LOCAL_BASE_ADDRESS +
1985 PCIE_SOC_RDY_STATUS_ADDRESS) \
1986 & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301987 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001988 }
1989 if (wait_limit < 0) {
1990 /* AR6320v1 doesn't support checking of BAR0 configuration,
1991 takes one sec to wait BAR0 ready */
1992 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
1993 __func__);
1994 }
1995 }
1996#endif
1997
1998#ifndef QCA_WIFI_3_0
1999 fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
2000 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2001 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2002
2003 if (fw_indicator & FW_IND_INITIALIZED) {
2004 HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2005 __func__);
2006 ret = -EAGAIN;
2007 goto end;
2008 }
2009#endif
2010
2011end:
2012 return ret;
2013}
2014
2015static void wlan_tasklet_msi(unsigned long data)
2016{
2017 struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2018 struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
Komal Seelam644263d2016-02-22 20:45:49 +05302019 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002020
Komal Seelam02cf2f82016-02-22 20:44:25 +05302021 if (scn->hif_init_done == false)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002022 goto irq_handled;
2023
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302024 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002025 goto irq_handled;
2026
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302027 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002028
2029 if (entry->id == HIF_MAX_TASKLET_NUM) {
2030 /* the last tasklet is for fw IRQ */
Komal Seelam02cf2f82016-02-22 20:44:25 +05302031 (irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +05302032 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002033 goto irq_handled;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302034 } else if (entry->id < scn->ce_count) {
2035 ce_per_engine_service(scn, entry->id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002036 } else {
2037 HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2038 __func__, entry->id);
2039 }
2040 return;
2041
2042irq_handled:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302043 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002044
2045}
2046
2047int hif_configure_msi(struct hif_pci_softc *sc)
2048{
2049 int ret = 0;
2050 int num_msi_desired;
2051 int rv = -1;
Komal Seelam644263d2016-02-22 20:45:49 +05302052 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002053
2054 HIF_TRACE("%s: E", __func__);
2055
2056 num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2057 if (num_msi_desired < 1) {
2058 HIF_ERROR("%s: MSI is not configured", __func__);
2059 return -EINVAL;
2060 }
2061
2062 if (num_msi_desired > 1) {
2063#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2064 rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2065 num_msi_desired);
2066#else
2067 rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2068#endif
2069 }
2070 HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2071 __func__, num_msi_desired, rv);
2072
2073 if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2074 int i;
2075
2076 sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302077 sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002078 (void *)sc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302079 sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002080 HIF_MAX_TASKLET_NUM;
2081 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2082 (unsigned long)&sc->tasklet_entries[
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302083 HIF_MAX_TASKLET_NUM-1]);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002084 ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2085 hif_pci_msi_fw_handler,
2086 IRQF_SHARED, "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302087 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002088 HIF_ERROR("%s: request_irq failed", __func__);
2089 goto err_intr;
2090 }
2091 for (i = 0; i <= scn->ce_count; i++) {
2092 sc->tasklet_entries[i].hif_handler = (void *)sc;
2093 sc->tasklet_entries[i].id = i;
2094 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2095 (unsigned long)&sc->tasklet_entries[i]);
2096 ret = request_irq((sc->pdev->irq +
2097 i + MSI_ASSIGN_CE_INITIAL),
2098 ce_per_engine_handler, IRQF_SHARED,
2099 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302100 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002101 HIF_ERROR("%s: request_irq failed", __func__);
2102 goto err_intr;
2103 }
2104 }
2105 } else if (rv > 0) {
2106 HIF_TRACE("%s: use single msi", __func__);
2107
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302108 ret = pci_enable_msi(sc->pdev);
2109 if (ret < 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002110 HIF_ERROR("%s: single MSI allocation failed",
2111 __func__);
2112 /* Try for legacy PCI line interrupts */
2113 sc->num_msi_intrs = 0;
2114 } else {
2115 sc->num_msi_intrs = 1;
2116 tasklet_init(&sc->intr_tq,
2117 wlan_tasklet, (unsigned long)sc);
2118 ret = request_irq(sc->pdev->irq,
2119 hif_pci_interrupt_handler,
2120 IRQF_SHARED, "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302121 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002122 HIF_ERROR("%s: request_irq failed", __func__);
2123 goto err_intr;
2124 }
2125 }
2126 } else {
2127 sc->num_msi_intrs = 0;
2128 ret = -EIO;
2129 HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2130 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302131 ret = pci_enable_msi(sc->pdev);
2132 if (ret < 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002133 HIF_ERROR("%s: single MSI interrupt allocation failed",
2134 __func__);
2135 /* Try for legacy PCI line interrupts */
2136 sc->num_msi_intrs = 0;
2137 } else {
2138 sc->num_msi_intrs = 1;
2139 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2140 ret = request_irq(sc->pdev->irq,
2141 hif_pci_interrupt_handler, IRQF_SHARED,
2142 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302143 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002144 HIF_ERROR("%s: request_irq failed", __func__);
2145 goto err_intr;
2146 }
2147 }
2148
2149 if (ret == 0) {
2150 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2151 PCIE_INTR_ENABLE_ADDRESS),
2152 HOST_GROUP0_MASK);
2153 hif_write32_mb(sc->mem +
2154 PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2155 PCIE_SOC_WAKE_RESET);
2156 }
2157 HIF_TRACE("%s: X, ret = %d", __func__, ret);
2158
2159 return ret;
2160
2161err_intr:
2162if (sc->num_msi_intrs >= 1)
2163 pci_disable_msi(sc->pdev);
2164 return ret;
2165}
2166
2167static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2168{
2169 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05302170 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002171
2172 HIF_TRACE("%s: E", __func__);
2173
2174 /* do notn support MSI or MSI IRQ failed */
2175 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2176 ret = request_irq(sc->pdev->irq,
2177 hif_pci_interrupt_handler, IRQF_SHARED,
2178 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302179 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002180 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2181 goto end;
2182 }
2183 /* Use Legacy PCI Interrupts */
2184 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2185 PCIE_INTR_ENABLE_ADDRESS),
2186 HOST_GROUP0_MASK);
2187 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2188 PCIE_SOC_WAKE_ADDRESS,
2189 PCIE_SOC_WAKE_RESET);
2190end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302191 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002192 "%s: X, ret = %d", __func__, ret);
2193 return ret;
2194}
2195
2196/**
2197 * hif_nointrs(): disable IRQ
2198 *
2199 * This function stops interrupt(s)
2200 *
Komal Seelam644263d2016-02-22 20:45:49 +05302201 * @scn: struct hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002202 *
2203 * Return: none
2204 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002205void hif_pci_nointrs(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002206{
2207 int i;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302208 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2209 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002210
2211 if (scn->request_irq_done == false)
2212 return;
2213 if (sc->num_msi_intrs > 0) {
2214 /* MSI interrupt(s) */
2215 for (i = 0; i < sc->num_msi_intrs; i++) {
2216 free_irq(sc->pdev->irq + i, sc);
2217 }
2218 sc->num_msi_intrs = 0;
2219 } else {
2220 /* Legacy PCI line interrupt */
2221 free_irq(sc->pdev->irq, sc);
2222 }
Komal Seelam02cf2f82016-02-22 20:44:25 +05302223 ce_unregister_irq(hif_state, 0xfff);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002224 scn->request_irq_done = false;
2225}
2226
2227/**
2228 * hif_disable_bus(): hif_disable_bus
2229 *
2230 * This function disables the bus
2231 *
2232 * @bdev: bus dev
2233 *
2234 * Return: none
2235 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002236void hif_pci_disable_bus(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002237{
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302238 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2239 struct pci_dev *pdev = sc->pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002240 void __iomem *mem;
2241
2242 /* Attach did not succeed, all resources have been
2243 * freed in error handler
2244 */
2245 if (!sc)
2246 return;
2247
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002248 if (ADRASTEA_BU) {
2249 hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2250 hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
2251 HOST_GROUP0_MASK);
2252 }
2253
2254 mem = (void __iomem *)sc->mem;
2255 if (mem) {
2256 pci_disable_msi(pdev);
2257 hif_dump_pipe_debug_count(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002258 if (scn->athdiag_procfs_inited) {
2259 athdiag_procfs_remove();
2260 scn->athdiag_procfs_inited = false;
2261 }
2262 pci_set_drvdata(pdev, NULL);
2263 pci_iounmap(pdev, mem);
2264 scn->mem = NULL;
2265 pci_release_region(pdev, BAR_NUM);
2266 pci_clear_master(pdev);
2267 pci_disable_device(pdev);
2268 }
2269 HIF_INFO("%s: X", __func__);
2270}
2271
2272#define OL_ATH_PCI_PM_CONTROL 0x44
2273
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002274#ifdef FEATURE_RUNTIME_PM
Houston Hoffmancceec342015-11-11 11:37:20 -08002275/**
2276 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
2277 * @scn: hif context
2278 * @flag: prevent linkdown if true otherwise allow
2279 *
2280 * this api should only be called as part of bus prevent linkdown
2281 */
Komal Seelam644263d2016-02-22 20:45:49 +05302282static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002283{
Komal Seelam644263d2016-02-22 20:45:49 +05302284 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Komal Seelam5584a7c2016-02-24 19:22:48 +05302285 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Houston Hoffmancceec342015-11-11 11:37:20 -08002286
2287 if (flag)
Komal Seelam644263d2016-02-22 20:45:49 +05302288 hif_pm_runtime_prevent_suspend(hif_hdl,
2289 sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002290 else
Komal Seelam644263d2016-02-22 20:45:49 +05302291 hif_pm_runtime_allow_suspend(hif_hdl,
2292 sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002293}
2294#else
Komal Seelam644263d2016-02-22 20:45:49 +05302295static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002296{
2297}
2298#endif
2299
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002300#if defined(CONFIG_CNSS) && defined(CONFIG_PCI_MSM)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002301/**
2302 * hif_bus_prevent_linkdown(): allow or permit linkdown
2303 * @flag: true prevents linkdown, false allows
2304 *
2305 * Calls into the platform driver to vote against taking down the
2306 * pcie link.
2307 *
2308 * Return: n/a
2309 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002310void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002311{
2312 HIF_ERROR("wlan: %s pcie power collapse",
2313 (flag ? "disable" : "enable"));
Houston Hoffmancceec342015-11-11 11:37:20 -08002314 hif_runtime_prevent_linkdown(scn, flag);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002315 cnss_wlan_pm_control(flag);
2316}
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002317#else
Houston Hoffman4849fcc2016-05-05 15:42:35 -07002318void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002319{
2320 HIF_ERROR("wlan: %s pcie power collapse",
2321 (flag ? "disable" : "enable"));
2322 hif_runtime_prevent_linkdown(scn, flag);
2323}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002324#endif
2325
2326/**
2327 * hif_drain_tasklets(): wait untill no tasklet is pending
2328 * @scn: hif context
2329 *
2330 * Let running tasklets clear pending trafic.
2331 *
2332 * Return: 0 if no bottom half is in progress when it returns.
2333 * -EFAULT if it times out.
2334 */
Komal Seelam644263d2016-02-22 20:45:49 +05302335static inline int hif_drain_tasklets(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002336{
2337 uint32_t ce_drain_wait_cnt = 0;
2338
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302339 while (qdf_atomic_read(&scn->active_tasklet_cnt)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002340 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
2341 HIF_ERROR("%s: CE still not done with access",
2342 __func__);
2343
2344 return -EFAULT;
2345 }
2346 HIF_INFO("%s: Waiting for CE to finish access", __func__);
2347 msleep(10);
2348 }
2349 return 0;
2350}
2351
2352/**
2353 * hif_bus_suspend_link_up() - suspend the bus
2354 *
2355 * Configures the pci irq line as a wakeup source.
2356 *
2357 * Return: 0 for success and non-zero for failure
2358 */
Komal Seelam644263d2016-02-22 20:45:49 +05302359static int hif_bus_suspend_link_up(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002360{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361 struct pci_dev *pdev;
2362 int status;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302363 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002364
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302365 if (!sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002366 return -EFAULT;
2367
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302368 pdev = sc->pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002369
2370 status = hif_drain_tasklets(scn);
2371 if (status != 0)
2372 return status;
2373
2374 if (unlikely(enable_irq_wake(pdev->irq))) {
2375 HIF_ERROR("%s: Fail to enable wake IRQ!", __func__);
2376 return -EINVAL;
2377 }
2378
Houston Hoffmane61d4e12016-03-14 21:11:48 -07002379 hif_pci_cancel_deferred_target_sleep(scn);
2380
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002381 return 0;
2382}
2383
2384/**
2385 * hif_bus_resume_link_up() - hif bus resume API
2386 *
2387 * This function disables the wakeup source.
2388 *
2389 * Return: 0 for success and non-zero for failure
2390 */
Komal Seelam644263d2016-02-22 20:45:49 +05302391static int hif_bus_resume_link_up(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002392{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002393 struct pci_dev *pdev;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302394 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002395
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302396 if (!sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002397 return -EFAULT;
2398
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302399 pdev = sc->pdev;
2400
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002401 if (!pdev) {
2402 HIF_ERROR("%s: pci_dev is null", __func__);
2403 return -EFAULT;
2404 }
2405
2406 if (unlikely(disable_irq_wake(pdev->irq))) {
2407 HIF_ERROR("%s: Fail to disable wake IRQ!", __func__);
2408 return -EFAULT;
2409 }
2410
2411 return 0;
2412}
2413
2414/**
2415 * hif_bus_suspend_link_down() - suspend the bus
2416 *
2417 * Suspends the hif layer taking care of draining recieve queues and
2418 * shutting down copy engines if needed. Ensures opy engine interrupts
2419 * are disabled when it returns. Prevents register access after it
2420 * returns.
2421 *
2422 * Return: 0 for success and non-zero for failure
2423 */
Komal Seelam644263d2016-02-22 20:45:49 +05302424static int hif_bus_suspend_link_down(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002425{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002426 struct pci_dev *pdev;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302427 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002428 int status = 0;
2429
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302430 pdev = sc->pdev;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302431
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002432 disable_irq(pdev->irq);
2433
2434 status = hif_drain_tasklets(scn);
2435 if (status != 0) {
2436 enable_irq(pdev->irq);
2437 return status;
2438 }
2439
2440 /* Stop the HIF Sleep Timer */
Houston Hoffmane61d4e12016-03-14 21:11:48 -07002441 hif_pci_cancel_deferred_target_sleep(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002442
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302443 qdf_atomic_set(&scn->link_suspended, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002444
2445 return 0;
2446}
2447
2448/**
2449 * hif_bus_resume_link_down() - hif bus resume API
2450 *
2451 * This function resumes the bus reenabling interupts.
2452 *
2453 * Return: 0 for success and non-zero for failure
2454 */
Komal Seelam644263d2016-02-22 20:45:49 +05302455static int hif_bus_resume_link_down(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002456{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002457 struct pci_dev *pdev;
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302458 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002459
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302460 if (!sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002461 return -EFAULT;
2462
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302463 pdev = sc->pdev;
2464
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002465 if (!pdev) {
2466 HIF_ERROR("%s: pci_dev is null", __func__);
2467 return -EFAULT;
2468 }
2469
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302470 qdf_atomic_set(&scn->link_suspended, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002471
2472 enable_irq(pdev->irq);
2473
2474 return 0;
2475}
2476
2477/**
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002478 * hif_pci_suspend(): prepare hif for suspend
Houston Hoffman1688fba2015-11-10 16:47:27 -08002479 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002480 * chose suspend type based on link suspend voting.
2481 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08002482 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002483 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002484int hif_pci_bus_suspend(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002485{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002486 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
Komal Seelamf8600682016-02-02 18:17:13 +05302487 return hif_bus_suspend_link_down(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002488 else
Komal Seelamf8600682016-02-02 18:17:13 +05302489 return hif_bus_suspend_link_up(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002490}
2491
2492/**
Houston Hoffman1688fba2015-11-10 16:47:27 -08002493 * hif_bus_resume(): prepare hif for resume
2494 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002495 * chose suspend type based on link suspend voting.
2496 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08002497 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002498 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002499int hif_pci_bus_resume(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002500{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002501 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
Komal Seelamf8600682016-02-02 18:17:13 +05302502 return hif_bus_resume_link_down(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002503 else
Komal Seelamf8600682016-02-02 18:17:13 +05302504 return hif_bus_resume_link_up(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002505}
2506
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002507#ifdef FEATURE_RUNTIME_PM
2508/**
2509 * __hif_runtime_pm_set_state(): utility function
2510 * @state: state to set
2511 *
2512 * indexes into the runtime pm state and sets it.
2513 */
Komal Seelam644263d2016-02-22 20:45:49 +05302514static void __hif_runtime_pm_set_state(struct hif_softc *scn,
Komal Seelamf8600682016-02-02 18:17:13 +05302515 enum hif_pm_runtime_state state)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002516{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002517 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002518
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002519 if (NULL == sc) {
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002520 HIF_ERROR("%s: HIF_CTX not initialized",
2521 __func__);
2522 return;
2523 }
2524
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302525 qdf_atomic_set(&sc->pm_state, state);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002526}
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002527
2528/**
2529 * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2530 *
2531 * Notify hif that a runtime pm opperation has started
2532 */
Komal Seelam644263d2016-02-22 20:45:49 +05302533static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002534{
Komal Seelamf8600682016-02-02 18:17:13 +05302535 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002536}
2537
2538/**
2539 * hif_runtime_pm_set_state_on(): adjust runtime pm state
2540 *
2541 * Notify hif that a the runtime pm state should be on
2542 */
Komal Seelam644263d2016-02-22 20:45:49 +05302543static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002544{
Komal Seelamf8600682016-02-02 18:17:13 +05302545 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002546}
2547
2548/**
2549 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
2550 *
2551 * Notify hif that a runtime suspend attempt has been completed successfully
2552 */
Komal Seelam644263d2016-02-22 20:45:49 +05302553static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002554{
Komal Seelamf8600682016-02-02 18:17:13 +05302555 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002556}
2557
Houston Hoffman692cc052015-11-10 18:42:47 -08002558/**
2559 * hif_log_runtime_suspend_success() - log a successful runtime suspend
2560 */
Komal Seelam644263d2016-02-22 20:45:49 +05302561static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002562{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002563 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002564 if (sc == NULL)
2565 return;
2566
2567 sc->pm_stats.suspended++;
2568 sc->pm_stats.suspend_jiffies = jiffies;
2569}
2570
2571/**
2572 * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2573 *
2574 * log a failed runtime suspend
2575 * mark last busy to prevent immediate runtime suspend
2576 */
Komal Seelamf8600682016-02-02 18:17:13 +05302577static void hif_log_runtime_suspend_failure(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002578{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002579 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002580 if (sc == NULL)
2581 return;
2582
2583 sc->pm_stats.suspend_err++;
Houston Hoffman692cc052015-11-10 18:42:47 -08002584}
2585
2586/**
2587 * hif_log_runtime_resume_success() - log a successful runtime resume
2588 *
2589 * log a successfull runtime resume
2590 * mark last busy to prevent immediate runtime suspend
2591 */
Komal Seelamf8600682016-02-02 18:17:13 +05302592static void hif_log_runtime_resume_success(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002593{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002594 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002595 if (sc == NULL)
2596 return;
2597
2598 sc->pm_stats.resumed++;
Houston Hoffman78467a82016-01-05 20:08:56 -08002599}
2600
2601/**
2602 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2603 *
2604 * Record the failure.
2605 * mark last busy to delay a retry.
2606 * adjust the runtime_pm state.
2607 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302608void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002609{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002610 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2611 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08002612
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002613 hif_log_runtime_suspend_failure(hif_ctx);
2614 if (hif_pci_sc != NULL)
2615 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2616 hif_runtime_pm_set_state_on(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002617}
2618
2619/**
2620 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2621 *
2622 * Makes sure that the pci link will be taken down by the suspend opperation.
2623 * If the hif layer is configured to leave the bus on, runtime suspend will
2624 * not save any power.
2625 *
2626 * Set the runtime suspend state to in progress.
2627 *
2628 * return -EINVAL if the bus won't go down. otherwise return 0
2629 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302630int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002631{
Komal Seelam644263d2016-02-22 20:45:49 +05302632 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2633
Komal Seelamf8600682016-02-02 18:17:13 +05302634 if (!hif_can_suspend_link(hif_ctx)) {
Houston Hoffman78467a82016-01-05 20:08:56 -08002635 HIF_ERROR("Runtime PM not supported for link up suspend");
2636 return -EINVAL;
2637 }
2638
Komal Seelam644263d2016-02-22 20:45:49 +05302639 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002640 return 0;
2641}
2642
2643/**
2644 * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2645 *
2646 * Record the success.
2647 * adjust the runtime_pm state
2648 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302649void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002650{
Komal Seelam644263d2016-02-22 20:45:49 +05302651 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2652
2653 hif_runtime_pm_set_state_suspended(scn);
2654 hif_log_runtime_suspend_success(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002655}
2656
2657/**
2658 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2659 *
2660 * update the runtime pm state.
2661 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302662void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002663{
Komal Seelam644263d2016-02-22 20:45:49 +05302664 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2665
2666 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002667}
2668
2669/**
2670 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2671 *
2672 * record the success.
2673 * adjust the runtime_pm state
2674 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302675void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002676{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002677 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2678 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08002679
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002680 hif_log_runtime_resume_success(hif_ctx);
2681 if (hif_pci_sc != NULL)
2682 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2683 hif_runtime_pm_set_state_on(scn);
Houston Hoffman692cc052015-11-10 18:42:47 -08002684}
2685#endif
2686
Houston Hoffman1688fba2015-11-10 16:47:27 -08002687/**
2688 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2689 *
2690 * Return: 0 for success and non-zero error code for failure
2691 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302692int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08002693{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002694 return hif_pci_bus_suspend(HIF_GET_SOFTC(hif_ctx));
Houston Hoffman1688fba2015-11-10 16:47:27 -08002695}
2696
Houston Hoffmanf4607852015-12-17 17:14:40 -08002697#ifdef WLAN_FEATURE_FASTPATH
2698/**
2699 * hif_fastpath_resume() - resume fastpath for runtimepm
2700 *
2701 * ensure that the fastpath write index register is up to date
2702 * since runtime pm may cause ce_send_fast to skip the register
2703 * write.
2704 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302705static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08002706{
Komal Seelam644263d2016-02-22 20:45:49 +05302707 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002708 struct CE_state *ce_state;
2709
2710 if (!scn)
2711 return;
2712
2713 if (scn->fastpath_mode_on) {
2714 if (Q_TARGET_ACCESS_BEGIN(scn)) {
2715 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302716 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002717
2718 /*war_ce_src_ring_write_idx_set */
2719 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2720 ce_state->src_ring->write_index);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302721 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002722 Q_TARGET_ACCESS_END(scn);
2723 }
2724 }
2725}
2726#else
Komal Seelam5584a7c2016-02-24 19:22:48 +05302727static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
Houston Hoffmanf4607852015-12-17 17:14:40 -08002728#endif
2729
2730
Houston Hoffman1688fba2015-11-10 16:47:27 -08002731/**
2732 * hif_runtime_resume() - do the bus resume part of a runtime resume
2733 *
2734 * Return: 0 for success and non-zero error code for failure
2735 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302736int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08002737{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002738 int status = hif_pci_bus_resume(HIF_GET_SOFTC(hif_ctx));
Houston Hoffmanf4607852015-12-17 17:14:40 -08002739
Komal Seelamf8600682016-02-02 18:17:13 +05302740 hif_fastpath_resume(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002741
2742 return status;
Houston Hoffman1688fba2015-11-10 16:47:27 -08002743}
2744
Komal Seelamaa72bb72016-02-01 17:22:50 +05302745#if CONFIG_PCIE_64BIT_MSI
Komal Seelam644263d2016-02-22 20:45:49 +05302746static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05302747{
2748 struct hif_pci_softc *sc = scn->hif_sc;
2749 struct hif_msi_info *info = &sc->msi_info;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302750 struct device *dev = scn->qdf_dev->dev;
Komal Seelamaa72bb72016-02-01 17:22:50 +05302751
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302752 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2753 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
Komal Seelamaa72bb72016-02-01 17:22:50 +05302754 info->magic = NULL;
2755 info->magic_dma = 0;
2756}
2757#else
Komal Seelam644263d2016-02-22 20:45:49 +05302758static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05302759{
2760}
2761#endif
2762
Houston Hoffman8f239f62016-03-14 21:12:05 -07002763void hif_pci_disable_isr(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002764{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302765 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002766
Komal Seelam644263d2016-02-22 20:45:49 +05302767 hif_nointrs(scn);
Komal Seelamaa72bb72016-02-01 17:22:50 +05302768 hif_free_msi_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002769 /* Cancel the pending tasklet */
Komal Seelam644263d2016-02-22 20:45:49 +05302770 ce_tasklet_kill(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002771 tasklet_kill(&sc->intr_tq);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302772 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002773}
2774
2775/* Function to reset SoC */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002776void hif_pci_reset_soc(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002777{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002778 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2779 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
Komal Seelam644263d2016-02-22 20:45:49 +05302780 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002781
2782#if defined(CPU_WARM_RESET_WAR)
2783 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
2784 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2785 * verified for AR9888_REV1
2786 */
Komal Seelam91553ce2016-01-27 18:57:10 +05302787 if (tgt_info->target_version == AR9888_REV2_VERSION)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002788 hif_pci_device_warm_reset(sc);
Komal Seelam91553ce2016-01-27 18:57:10 +05302789 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002790 hif_pci_device_reset(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002791#else
2792 hif_pci_device_reset(sc);
2793#endif
2794}
2795
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002796#ifdef CONFIG_PCI_MSM
2797static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2798{
2799 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2800 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2801}
2802#else
2803static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2804#endif
2805
Komal Seelambd7c51d2016-02-24 10:27:30 +05302806/**
2807 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2808 * @sc: HIF PCIe Context
2809 *
2810 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2811 *
2812 * Return: Failure to caller
2813 */
2814static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2815{
2816 uint16_t val;
2817 uint32_t bar;
2818 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2819 struct hif_softc *scn = HIF_GET_SOFTC(sc);
2820 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2821 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
Komal Seelam75080122016-03-02 15:18:25 +05302822 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
Komal Seelambd7c51d2016-02-24 10:27:30 +05302823 A_target_id_t pci_addr = scn->mem;
2824
2825 HIF_ERROR("%s: keep_awake_count = %d",
2826 __func__, hif_state->keep_awake_count);
2827
2828 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2829
2830 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
2831
2832 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2833
2834 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
2835
2836 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
2837
2838 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
2839
2840 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
2841
2842 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
2843
2844 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2845
2846 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
2847
2848 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
2849 hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2850 PCIE_SOC_WAKE_ADDRESS));
2851
2852 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
2853 hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2854 RTC_STATE_ADDRESS));
2855
2856 HIF_ERROR("%s:error, wakeup target", __func__);
2857 hif_msm_pcie_debug_info(sc);
2858
2859 if (!cfg->enable_self_recovery)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302860 QDF_BUG(0);
Komal Seelambd7c51d2016-02-24 10:27:30 +05302861
2862 scn->recovery = true;
2863
2864 if (cbk->set_recovery_in_progress)
2865 cbk->set_recovery_in_progress(cbk->context, true);
2866
2867 cnss_wlan_pci_link_down();
2868 return -EACCES;
2869}
2870
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002871/*
2872 * For now, we use simple on-demand sleep/wake.
2873 * Some possible improvements:
2874 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2875 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2876 * Careful, though, these functions may be used by
2877 * interrupt handlers ("atomic")
2878 * -Don't use host_reg_table for this code; instead use values directly
2879 * -Use a separate timer to track activity and allow Target to sleep only
2880 * if it hasn't done anything for a while; may even want to delay some
2881 * processing for a short while in order to "batch" (e.g.) transmit
2882 * requests with completion processing into "windows of up time". Costs
2883 * some performance, but improves power utilization.
2884 * -On some platforms, it might be possible to eliminate explicit
2885 * sleep/wakeup. Instead, take a chance that each access works OK. If not,
2886 * recover from the failure by forcing the Target awake.
2887 * -Change keep_awake_count to an atomic_t in order to avoid spin lock
2888 * overhead in some cases. Perhaps this makes more sense when
2889 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2890 * disabled.
2891 * -It is possible to compile this code out and simply force the Target
2892 * to remain awake. That would yield optimal performance at the cost of
2893 * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2894 *
2895 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2896 */
2897/**
2898 * hif_target_sleep_state_adjust() - on-demand sleep/wake
Komal Seelam644263d2016-02-22 20:45:49 +05302899 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002900 * @sleep_ok: bool
2901 * @wait_for_it: bool
2902 *
2903 * Output the pipe error counts of each pipe to log file
2904 *
2905 * Return: int
2906 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002907int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002908 bool sleep_ok, bool wait_for_it)
2909{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302910 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002911 A_target_id_t pci_addr = scn->mem;
2912 static int max_delay;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302913 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002914 static int debug;
2915
2916 if (scn->recovery)
2917 return -EACCES;
2918
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302919 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002920 HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
2921 debug = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302922 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002923 return -EACCES;
2924 }
2925
2926 if (debug) {
2927 wait_for_it = true;
2928 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
2929 __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302930 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002931 }
2932
2933 if (sleep_ok) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302934 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002935 hif_state->keep_awake_count--;
2936 if (hif_state->keep_awake_count == 0) {
2937 /* Allow sleep */
2938 hif_state->verified_awake = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302939 hif_state->sleep_ticks = qdf_system_ticks();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002940 }
2941 if (hif_state->fake_sleep == false) {
2942 /* Set the Fake Sleep */
2943 hif_state->fake_sleep = true;
2944
2945 /* Start the Sleep Timer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302946 qdf_timer_stop(&hif_state->sleep_timer);
2947 qdf_timer_start(&hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002948 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2949 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302950 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002951 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302952 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002953
2954 if (hif_state->fake_sleep) {
2955 hif_state->verified_awake = true;
2956 } else {
2957 if (hif_state->keep_awake_count == 0) {
2958 /* Force AWAKE */
2959 hif_write32_mb(pci_addr +
2960 PCIE_LOCAL_BASE_ADDRESS +
2961 PCIE_SOC_WAKE_ADDRESS,
2962 PCIE_SOC_WAKE_V_MASK);
2963 }
2964 }
2965 hif_state->keep_awake_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302966 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002967
2968 if (wait_for_it && !hif_state->verified_awake) {
2969#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
2970 int tot_delay = 0;
2971 int curr_delay = 5;
2972
2973 for (;; ) {
2974 if (hif_targ_is_awake(scn, pci_addr)) {
2975 hif_state->verified_awake = true;
2976 break;
2977 } else
2978 if (!hif_pci_targ_is_present
2979 (scn, pci_addr)) {
2980 break;
2981 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002982
Komal Seelambd7c51d2016-02-24 10:27:30 +05302983 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2984 return hif_log_soc_wakeup_timeout(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002985
2986 OS_DELAY(curr_delay);
2987 tot_delay += curr_delay;
2988
2989 if (curr_delay < 50)
2990 curr_delay += 5;
2991 }
2992
2993 /*
2994 * NB: If Target has to come out of Deep Sleep,
2995 * this may take a few Msecs. Typically, though
2996 * this delay should be <30us.
2997 */
2998 if (tot_delay > max_delay)
2999 max_delay = tot_delay;
3000 }
3001 }
3002
3003 if (debug && hif_state->verified_awake) {
3004 debug = 0;
3005 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3006 __func__,
3007 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3008 PCIE_INTR_ENABLE_ADDRESS),
3009 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3010 PCIE_INTR_CAUSE_ADDRESS),
3011 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3012 CPU_INTR_ADDRESS),
3013 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3014 PCIE_INTR_CLR_ADDRESS),
3015 hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
3016 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3017 }
3018
3019 return 0;
3020}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003021
3022#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303023uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003024{
3025 uint32_t value;
3026 void *addr;
3027
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003028 addr = scn->mem + offset;
Houston Hoffman56e0d702016-05-05 17:48:06 -07003029 value = hif_read32_mb(addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003030
3031 {
3032 unsigned long irq_flags;
3033 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3034
3035 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3036 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3037 pcie_access_log[idx].is_write = false;
3038 pcie_access_log[idx].addr = addr;
3039 pcie_access_log[idx].value = value;
3040 pcie_access_log_seqnum++;
3041 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3042 }
3043
3044 return value;
3045}
3046
3047void
Komal Seelam644263d2016-02-22 20:45:49 +05303048hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003049{
3050 void *addr;
3051
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003052 addr = scn->mem + (offset);
3053 hif_write32_mb(addr, value);
3054
3055 {
3056 unsigned long irq_flags;
3057 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3058
3059 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3060 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3061 pcie_access_log[idx].is_write = true;
3062 pcie_access_log[idx].addr = addr;
3063 pcie_access_log[idx].value = value;
3064 pcie_access_log_seqnum++;
3065 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3066 }
3067}
3068
3069/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003070 * hif_target_dump_access_log() - dump access log
3071 *
3072 * dump access log
3073 *
3074 * Return: n/a
3075 */
3076void hif_target_dump_access_log(void)
3077{
3078 int idx, len, start_idx, cur_idx;
3079 unsigned long irq_flags;
3080
3081 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3082 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3083 len = PCIE_ACCESS_LOG_NUM;
3084 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3085 } else {
3086 len = pcie_access_log_seqnum;
3087 start_idx = 0;
3088 }
3089
3090 for (idx = 0; idx < len; idx++) {
3091 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3092 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%p val:%u.",
3093 __func__, idx,
3094 pcie_access_log[cur_idx].seqnum,
3095 pcie_access_log[cur_idx].is_write,
3096 pcie_access_log[cur_idx].addr,
3097 pcie_access_log[cur_idx].value);
3098 }
3099
3100 pcie_access_log_seqnum = 0;
3101 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3102}
3103#endif
3104
3105/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003106 * hif_configure_irq(): configure interrupt
3107 *
3108 * This function configures interrupt(s)
3109 *
3110 * @sc: PCIe control struct
3111 * @hif_hdl: struct HIF_CE_state
3112 *
3113 * Return: 0 - for success
3114 */
Komal Seelam644263d2016-02-22 20:45:49 +05303115int hif_configure_irq(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003116{
3117 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05303118 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003119
3120 HIF_TRACE("%s: E", __func__);
3121
Komal Seelamaa72bb72016-02-01 17:22:50 +05303122 hif_init_reschedule_tasklet_work(sc);
3123
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003124 if (ENABLE_MSI) {
3125 ret = hif_configure_msi(sc);
3126 if (ret == 0)
3127 goto end;
3128 }
3129 /* MSI failed. Try legacy irq */
3130 ret = hif_pci_configure_legacy_irq(sc);
3131 if (ret < 0) {
3132 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3133 __func__, ret);
3134 return ret;
3135 }
3136end:
3137 scn->request_irq_done = true;
3138 return 0;
3139}
3140
3141/**
3142 * hif_target_sync() : ensure the target is ready
3143 * @scn: hif controll structure
3144 *
3145 * Informs fw that we plan to use legacy interupts so that
3146 * it can begin booting. Ensures that the fw finishes booting
3147 * before continuing. Should be called before trying to write
3148 * to the targets other registers for the first time.
3149 *
3150 * Return: none
3151 */
Komal Seelam644263d2016-02-22 20:45:49 +05303152void hif_target_sync(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003153{
3154 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3155 PCIE_INTR_ENABLE_ADDRESS),
3156 PCIE_INTR_FIRMWARE_MASK);
3157
3158 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3159 PCIE_SOC_WAKE_ADDRESS,
3160 PCIE_SOC_WAKE_V_MASK);
3161 while (!hif_targ_is_awake(scn, scn->mem))
3162 ;
3163
3164 if (HAS_FW_INDICATOR) {
3165 int wait_limit = 500;
3166 int fw_ind = 0;
3167 HIF_TRACE("%s: Loop checking FW signal", __func__);
3168 while (1) {
Komal Seelam02cf2f82016-02-22 20:44:25 +05303169 fw_ind = hif_read32_mb(scn->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003170 FW_INDICATOR_ADDRESS);
3171 if (fw_ind & FW_IND_INITIALIZED)
3172 break;
3173 if (wait_limit-- < 0)
3174 break;
3175 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3176 PCIE_INTR_ENABLE_ADDRESS),
3177 PCIE_INTR_FIRMWARE_MASK);
3178
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303179 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003180 }
3181 if (wait_limit < 0)
3182 HIF_TRACE("%s: FW signal timed out",
3183 __func__);
3184 else
3185 HIF_TRACE("%s: Got FW signal, retries = %x",
3186 __func__, 500-wait_limit);
3187 }
3188 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3189 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3190}
3191
3192/**
3193 * hif_enable_bus(): enable bus
3194 *
3195 * This function enables the bus
3196 *
3197 * @ol_sc: soft_sc struct
3198 * @dev: device pointer
3199 * @bdev: bus dev pointer
3200 * bid: bus id pointer
3201 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303202 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003203 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07003204QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003205 struct device *dev, void *bdev,
3206 const hif_bus_id *bid,
3207 enum hif_enable_type type)
3208{
3209 int ret = 0;
3210 uint32_t hif_type, target_type;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303211 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
Komal Seelam5584a7c2016-02-24 19:22:48 +05303212 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003213 uint16_t revision_id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003214 int probe_again = 0;
3215 struct pci_dev *pdev = bdev;
Houston Hoffmanf303f912016-03-14 21:11:42 -07003216 const struct pci_device_id *id = (const struct pci_device_id *)bid;
Komal Seelam91553ce2016-01-27 18:57:10 +05303217 struct hif_target_info *tgt_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003218
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003219 if (!ol_sc) {
3220 HIF_ERROR("%s: hif_ctx is NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303221 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003222 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003223
Komal Seelambd7c51d2016-02-24 10:27:30 +05303224 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3225 __func__, hif_get_conparam(ol_sc), id->device);
3226
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003227 sc->pdev = pdev;
3228 sc->dev = &pdev->dev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003229 sc->devid = id->device;
3230 sc->cacheline_sz = dma_get_cache_alignment();
Komal Seelam644263d2016-02-22 20:45:49 +05303231 tgt_info = hif_get_target_info_handle(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003232again:
3233 ret = hif_enable_pci(sc, pdev, id);
3234 if (ret < 0) {
3235 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3236 __func__, ret);
3237 goto err_enable_pci;
3238 }
3239 HIF_TRACE("%s: hif_enable_pci done", __func__);
3240
3241 /* Temporary FIX: disable ASPM on peregrine.
3242 * Will be removed after the OTP is programmed
3243 */
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07003244 hif_disable_power_gating(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003245
3246 device_disable_async_suspend(&pdev->dev);
3247 pci_read_config_word(pdev, 0x08, &revision_id);
3248
3249 ret = hif_get_device_type(id->device, revision_id,
3250 &hif_type, &target_type);
3251 if (ret < 0) {
3252 HIF_ERROR("%s: invalid device id/revision_id", __func__);
3253 goto err_tgtstate;
3254 }
3255 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3256 __func__, hif_type, target_type);
3257
Komal Seelam02cf2f82016-02-22 20:44:25 +05303258 hif_register_tbl_attach(ol_sc, hif_type);
3259 target_register_tbl_attach(ol_sc, target_type);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003260
3261 ret = hif_pci_probe_tgt_wakeup(sc);
3262 if (ret < 0) {
3263 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3264 __func__, ret);
3265 if (ret == -EAGAIN)
3266 probe_again++;
3267 goto err_tgtstate;
3268 }
3269 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3270
Komal Seelam91553ce2016-01-27 18:57:10 +05303271 tgt_info->target_type = target_type;
3272
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003273 sc->soc_pcie_bar0 = pci_resource_start(pdev, BAR_NUM);
3274 if (!sc->soc_pcie_bar0) {
3275 HIF_ERROR("%s: ERROR - cannot get CE BAR0", __func__);
3276 ret = -EIO;
3277 goto err_tgtstate;
3278 }
3279 ol_sc->mem_pa = sc->soc_pcie_bar0;
3280
3281 BUG_ON(pci_get_drvdata(sc->pdev) != NULL);
3282 pci_set_drvdata(sc->pdev, sc);
3283
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003284 hif_target_sync(ol_sc);
3285 return 0;
3286
3287err_tgtstate:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003288 hif_disable_pci(sc);
3289 sc->pci_enabled = false;
3290 HIF_ERROR("%s: error, hif_disable_pci done", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303291 return QDF_STATUS_E_ABORTED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003292
3293err_enable_pci:
3294 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3295 int delay_time;
3296
3297 HIF_INFO("%s: pci reprobe", __func__);
3298 /* 10, 40, 90, 100, 100, ... */
3299 delay_time = max(100, 10 * (probe_again * probe_again));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303300 qdf_mdelay(delay_time);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003301 goto again;
3302 }
3303 return ret;
3304}
3305
3306/**
Houston Hoffman8f239f62016-03-14 21:12:05 -07003307 * hif_pci_irq_enable() - ce_irq_enable
3308 * @scn: hif_softc
3309 * @ce_id: ce_id
3310 *
3311 * Return: void
3312 */
3313void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3314{
3315 uint32_t tmp = 1 << ce_id;
3316 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3317
3318 qdf_spin_lock_irqsave(&sc->irq_lock);
3319 scn->ce_irq_summary &= ~tmp;
3320 if (scn->ce_irq_summary == 0) {
3321 /* Enable Legacy PCI line interrupts */
3322 if (LEGACY_INTERRUPTS(sc) &&
Komal Seelam6ee55902016-04-11 17:11:07 +05303323 (scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman8f239f62016-03-14 21:12:05 -07003324 (!qdf_atomic_read(&scn->link_suspended))) {
3325
3326 hif_write32_mb(scn->mem +
3327 (SOC_CORE_BASE_ADDRESS |
3328 PCIE_INTR_ENABLE_ADDRESS),
3329 HOST_GROUP0_MASK);
3330
3331 hif_read32_mb(scn->mem +
3332 (SOC_CORE_BASE_ADDRESS |
3333 PCIE_INTR_ENABLE_ADDRESS));
3334 }
3335 }
3336 if (scn->hif_init_done == true)
3337 Q_TARGET_ACCESS_END(scn);
3338 qdf_spin_unlock_irqrestore(&sc->irq_lock);
3339
3340 /* check for missed firmware crash */
3341 hif_fw_interrupt_handler(0, scn);
3342}
3343/**
3344 * hif_pci_irq_disable() - ce_irq_disable
3345 * @scn: hif_softc
3346 * @ce_id: ce_id
3347 *
3348 * Return: void
3349 */
3350void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3351{
3352 /* For Rome only need to wake up target */
3353 /* target access is maintained untill interrupts are re-enabled */
3354 Q_TARGET_ACCESS_BEGIN(scn);
3355}
3356
Houston Hoffman9078a152015-11-02 16:15:02 -08003357#ifdef FEATURE_RUNTIME_PM
Houston Hoffmanf4607852015-12-17 17:14:40 -08003358
Komal Seelam5584a7c2016-02-24 19:22:48 +05303359void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08003360{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303361 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003362
Houston Hoffmanf4607852015-12-17 17:14:40 -08003363 if (NULL == sc)
3364 return;
3365
3366 sc->pm_stats.runtime_get++;
3367 pm_runtime_get_noresume(sc->dev);
3368}
3369
Houston Hoffman9078a152015-11-02 16:15:02 -08003370/**
3371 * hif_pm_runtime_get() - do a get opperation on the device
3372 *
3373 * A get opperation will prevent a runtime suspend untill a
3374 * corresponding put is done. This api should be used when sending
3375 * data.
3376 *
3377 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3378 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3379 *
3380 * return: success if the bus is up and a get has been issued
3381 * otherwise an error code.
3382 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303383int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003384{
Komal Seelam644263d2016-02-22 20:45:49 +05303385 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303386 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003387 int ret;
3388 int pm_state;
3389
3390 if (NULL == scn) {
3391 HIF_ERROR("%s: Could not do runtime get, scn is null",
3392 __func__);
3393 return -EFAULT;
3394 }
Houston Hoffman9078a152015-11-02 16:15:02 -08003395
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303396 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08003397
3398 if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
3399 pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3400 sc->pm_stats.runtime_get++;
3401 ret = __hif_pm_runtime_get(sc->dev);
3402
3403 /* Get can return 1 if the device is already active, just return
3404 * success in that case
3405 */
3406 if (ret > 0)
3407 ret = 0;
3408
3409 if (ret)
3410 hif_pm_runtime_put(hif_ctx);
3411
3412 if (ret && ret != -EINPROGRESS) {
3413 sc->pm_stats.runtime_get_err++;
3414 HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303415 __func__, qdf_atomic_read(&sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003416 }
3417
3418 return ret;
3419 }
3420
3421 sc->pm_stats.request_resume++;
3422 sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3423 ret = hif_pm_request_resume(sc->dev);
3424
3425 return -EAGAIN;
3426}
3427
3428/**
3429 * hif_pm_runtime_put() - do a put opperation on the device
3430 *
3431 * A put opperation will allow a runtime suspend after a corresponding
3432 * get was done. This api should be used when sending data.
3433 *
3434 * This api will return a failure if runtime pm is stopped
3435 * This api will return failure if it would decrement the usage count below 0.
3436 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303437 * return: QDF_STATUS_SUCCESS if the put is performed
Houston Hoffman9078a152015-11-02 16:15:02 -08003438 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303439int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003440{
Komal Seelam644263d2016-02-22 20:45:49 +05303441 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303442 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003443 int pm_state, usage_count;
3444 unsigned long flags;
3445 char *error = NULL;
3446
3447 if (NULL == scn) {
3448 HIF_ERROR("%s: Could not do runtime put, scn is null",
3449 __func__);
3450 return -EFAULT;
3451 }
Houston Hoffman9078a152015-11-02 16:15:02 -08003452 usage_count = atomic_read(&sc->dev->power.usage_count);
3453
3454 if (usage_count == 1) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303455 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08003456
3457 if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3458 error = "Ignoring unexpected put when runtime pm is disabled";
3459
3460 } else if (usage_count == 0) {
3461 error = "PUT Without a Get Operation";
3462 }
3463
3464 if (error) {
3465 spin_lock_irqsave(&sc->runtime_lock, flags);
3466 hif_pci_runtime_pm_warn(sc, error);
3467 spin_unlock_irqrestore(&sc->runtime_lock, flags);
3468 return -EINVAL;
3469 }
3470
3471 sc->pm_stats.runtime_put++;
3472
3473 hif_pm_runtime_mark_last_busy(sc->dev);
3474 hif_pm_runtime_put_auto(sc->dev);
3475
3476 return 0;
3477}
3478
3479
3480/**
3481 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol reason
3482 * @hif_sc: pci context
3483 * @lock: runtime_pm lock being acquired
3484 *
3485 * Return 0 if successful.
3486 */
3487static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
3488 *hif_sc, struct hif_pm_runtime_lock *lock)
3489{
3490 int ret = 0;
3491
3492 /*
3493 * We shouldn't be setting context->timeout to zero here when
3494 * context is active as we will have a case where Timeout API's
3495 * for the same context called back to back.
3496 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
3497 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
3498 * API to ensure the timeout version is no more active and
3499 * list entry of this context will be deleted during allow suspend.
3500 */
3501 if (lock->active)
3502 return 0;
3503
3504 ret = __hif_pm_runtime_get(hif_sc->dev);
3505
3506 /**
3507 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
3508 * RPM_SUSPENDING. Any other negative value is an error.
3509 * We shouldn't be do runtime_put here as in later point allow
3510 * suspend gets called with the the context and there the usage count
3511 * is decremented, so suspend will be prevented.
3512 */
3513
3514 if (ret < 0 && ret != -EINPROGRESS) {
3515 hif_sc->pm_stats.runtime_get_err++;
3516 hif_pci_runtime_pm_warn(hif_sc,
3517 "Prevent Suspend Runtime PM Error");
3518 }
3519
3520 hif_sc->prevent_suspend_cnt++;
3521
3522 lock->active = true;
3523
3524 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
3525
3526 hif_sc->pm_stats.prevent_suspend++;
3527
3528 HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303529 qdf_atomic_read(&hif_sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003530
3531 return ret;
3532}
3533
3534static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
3535 struct hif_pm_runtime_lock *lock)
3536{
3537 int ret = 0;
3538 int usage_count;
3539
3540 if (hif_sc->prevent_suspend_cnt == 0)
3541 return ret;
3542
3543 if (!lock->active)
3544 return ret;
3545
3546 usage_count = atomic_read(&hif_sc->dev->power.usage_count);
3547
3548 /*
3549 * During Driver unload, platform driver increments the usage
3550 * count to prevent any runtime suspend getting called.
3551 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
3552 * usage_count should be one. Ideally this shouldn't happen as
3553 * context->active should be active for allow suspend to happen
3554 * Handling this case here to prevent any failures.
3555 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303556 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
Houston Hoffman9078a152015-11-02 16:15:02 -08003557 && usage_count == 1) || usage_count == 0) {
3558 hif_pci_runtime_pm_warn(hif_sc,
3559 "Allow without a prevent suspend");
3560 return -EINVAL;
3561 }
3562
3563 list_del(&lock->list);
3564
3565 hif_sc->prevent_suspend_cnt--;
3566
3567 lock->active = false;
3568 lock->timeout = 0;
3569
3570 hif_pm_runtime_mark_last_busy(hif_sc->dev);
3571 ret = hif_pm_runtime_put_auto(hif_sc->dev);
3572
3573 HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303574 qdf_atomic_read(&hif_sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003575
3576 hif_sc->pm_stats.allow_suspend++;
3577 return ret;
3578}
3579
3580/**
3581 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
3582 * @data: calback data that is the pci context
3583 *
3584 * if runtime locks are aquired with a timeout, this function releases
3585 * the locks when the last runtime lock expires.
3586 *
3587 * dummy implementation until lock acquisition is implemented.
3588 */
3589static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
3590{
3591 struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
3592 unsigned long flags;
3593 unsigned long timer_expires;
3594 struct hif_pm_runtime_lock *context, *temp;
3595
3596 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3597
3598 timer_expires = hif_sc->runtime_timer_expires;
3599
3600 /* Make sure we are not called too early, this should take care of
3601 * following case
3602 *
3603 * CPU0 CPU1 (timeout function)
3604 * ---- ----------------------
3605 * spin_lock_irq
3606 * timeout function called
3607 *
3608 * mod_timer()
3609 *
3610 * spin_unlock_irq
3611 * spin_lock_irq
3612 */
3613 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
3614 hif_sc->runtime_timer_expires = 0;
3615 list_for_each_entry_safe(context, temp,
3616 &hif_sc->prevent_suspend_list, list) {
3617 if (context->timeout) {
3618 __hif_pm_runtime_allow_suspend(hif_sc, context);
3619 hif_sc->pm_stats.allow_suspend_timeout++;
3620 }
3621 }
3622 }
3623
3624 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3625}
3626
Komal Seelam5584a7c2016-02-24 19:22:48 +05303627int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08003628 struct hif_pm_runtime_lock *data)
3629{
Komal Seelam644263d2016-02-22 20:45:49 +05303630 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
3631 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08003632 struct hif_pm_runtime_lock *context = data;
3633 unsigned long flags;
3634
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003635 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08003636 return 0;
3637
3638 if (!context)
3639 return -EINVAL;
3640
3641 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3642 context->timeout = 0;
3643 __hif_pm_runtime_prevent_suspend(hif_sc, context);
3644 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3645
3646 return 0;
3647}
3648
Komal Seelam5584a7c2016-02-24 19:22:48 +05303649int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
Komal Seelam644263d2016-02-22 20:45:49 +05303650 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08003651{
Komal Seelam644263d2016-02-22 20:45:49 +05303652 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
3653 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08003654 struct hif_pm_runtime_lock *context = data;
3655
3656 unsigned long flags;
3657
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003658 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08003659 return 0;
3660
3661 if (!context)
3662 return -EINVAL;
3663
3664 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3665
3666 __hif_pm_runtime_allow_suspend(hif_sc, context);
3667
3668 /* The list can be empty as well in cases where
3669 * we have one context in the list and the allow
3670 * suspend came before the timer expires and we delete
3671 * context above from the list.
3672 * When list is empty prevent_suspend count will be zero.
3673 */
3674 if (hif_sc->prevent_suspend_cnt == 0 &&
3675 hif_sc->runtime_timer_expires > 0) {
3676 del_timer(&hif_sc->runtime_timer);
3677 hif_sc->runtime_timer_expires = 0;
3678 }
3679
3680 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3681
3682 return 0;
3683}
3684
3685/**
3686 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
3687 * @ol_sc: HIF context
3688 * @lock: which lock is being acquired
3689 * @delay: Timeout in milliseconds
3690 *
3691 * Prevent runtime suspend with a timeout after which runtime suspend would be
3692 * allowed. This API uses a single timer to allow the suspend and timer is
3693 * modified if the timeout is changed before timer fires.
3694 * If the timeout is less than autosuspend_delay then use mark_last_busy instead
3695 * of starting the timer.
3696 *
3697 * It is wise to try not to use this API and correct the design if possible.
3698 *
3699 * Return: 0 on success and negative error code on failure
3700 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303701int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08003702 struct hif_pm_runtime_lock *lock, unsigned int delay)
3703{
Komal Seelam644263d2016-02-22 20:45:49 +05303704 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
3705 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
3706
Houston Hoffman9078a152015-11-02 16:15:02 -08003707 int ret = 0;
3708 unsigned long expires;
3709 unsigned long flags;
3710 struct hif_pm_runtime_lock *context = lock;
3711
Komal Seelambd7c51d2016-02-24 10:27:30 +05303712 if (hif_is_load_or_unload_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08003713 HIF_ERROR("%s: Load/unload in progress, ignore!",
3714 __func__);
3715 return -EINVAL;
3716 }
3717
Komal Seelambd7c51d2016-02-24 10:27:30 +05303718 if (hif_is_recovery_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08003719 HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
3720 return -EINVAL;
3721 }
3722
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003723 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08003724 return 0;
3725
3726 if (!context)
3727 return -EINVAL;
3728
3729 /*
3730 * Don't use internal timer if the timeout is less than auto suspend
3731 * delay.
3732 */
3733 if (delay <= hif_sc->dev->power.autosuspend_delay) {
3734 hif_pm_request_resume(hif_sc->dev);
3735 hif_pm_runtime_mark_last_busy(hif_sc->dev);
3736 return ret;
3737 }
3738
3739 expires = jiffies + msecs_to_jiffies(delay);
3740 expires += !expires;
3741
3742 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3743
3744 context->timeout = delay;
3745 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
3746 hif_sc->pm_stats.prevent_suspend_timeout++;
3747
3748 /* Modify the timer only if new timeout is after already configured
3749 * timeout
3750 */
3751 if (time_after(expires, hif_sc->runtime_timer_expires)) {
3752 mod_timer(&hif_sc->runtime_timer, expires);
3753 hif_sc->runtime_timer_expires = expires;
3754 }
3755
3756 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3757
3758 HIF_ERROR("%s: pm_state: %d delay: %dms ret: %d\n", __func__,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303759 qdf_atomic_read(&hif_sc->pm_state), delay, ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003760
3761 return ret;
3762}
3763
3764/**
3765 * hif_runtime_lock_init() - API to initialize Runtime PM context
3766 * @name: Context name
3767 *
3768 * This API initalizes the Runtime PM context of the caller and
3769 * return the pointer.
3770 *
3771 * Return: void *
3772 */
3773struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name)
3774{
3775 struct hif_pm_runtime_lock *context;
3776
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303777 context = qdf_mem_malloc(sizeof(*context));
Houston Hoffman9078a152015-11-02 16:15:02 -08003778 if (!context) {
3779 HIF_ERROR("%s: No memory for Runtime PM wakelock context\n",
3780 __func__);
3781 return NULL;
3782 }
3783
3784 context->name = name ? name : "Default";
3785 return context;
3786}
3787
3788/**
3789 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
3790 * @data: Runtime PM context
3791 *
3792 * Return: void
3793 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303794void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
Komal Seelam644263d2016-02-22 20:45:49 +05303795 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08003796{
3797 unsigned long flags;
3798 struct hif_pm_runtime_lock *context = data;
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003799 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003800
3801 if (!sc)
3802 return;
3803
3804 if (!context)
3805 return;
3806
3807 /*
3808 * Ensure to delete the context list entry and reduce the usage count
3809 * before freeing the context if context is active.
3810 */
3811 spin_lock_irqsave(&sc->runtime_lock, flags);
3812 __hif_pm_runtime_allow_suspend(sc, context);
3813 spin_unlock_irqrestore(&sc->runtime_lock, flags);
3814
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303815 qdf_mem_free(context);
Houston Hoffman9078a152015-11-02 16:15:02 -08003816}
3817
3818#endif /* FEATURE_RUNTIME_PM */