blob: d5dd28c5edf6b8ef25ac7ce9015c07e50cefadd0 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/if_arp.h>
32#ifdef CONFIG_PCI_MSM
33#include <linux/msm_pcie.h>
34#endif
35#include "hif_io32.h"
36#include "if_pci.h"
37#include "hif.h"
38#include "hif_main.h"
Houston Hoffman63777f22016-03-14 21:11:49 -070039#include "ce_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "ce_api.h"
41#include "ce_internal.h"
42#include "ce_reg.h"
Houston Hoffman108da402016-03-14 21:11:24 -070043#include "ce_bmi.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044#include "regtable.h"
Houston Hoffmanec93ab02016-05-03 20:09:55 -070045#include "hif_hw_version.h"
Houston Hoffman62aa58d2015-11-02 21:14:55 -080046#include <linux/debugfs.h>
47#include <linux/seq_file.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053048#include "qdf_status.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053049#include "qdf_atomic.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070050#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080051#include "mp_dev.h"
52#include "hif_debug.h"
53
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080054#include "if_pci_internal.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080055#include "ce_tasklet.h"
Houston Hoffmanf303f912016-03-14 21:11:42 -070056#include "targaddrs.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080057
Houston Hoffman32bc8eb2016-03-14 21:11:34 -070058#include "pci_api.h"
Aravind Narasimhana1c7d6d2016-06-01 10:21:32 +053059#include "ahb_api.h"
Houston Hoffman32bc8eb2016-03-14 21:11:34 -070060
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080061/* Maximum ms timeout for host to wake up target */
62#define PCIE_WAKE_TIMEOUT 1000
63#define RAMDUMP_EVENT_TIMEOUT 2500
64
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
66 * PCIe data bus error
67 * As workaround for this issue - changing the reset sequence to
68 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
69 */
70#define CPU_WARM_RESET_WAR
Houston Hoffmanfb698ef2016-05-05 19:50:44 -070071
72#ifdef CONFIG_WIN
73extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
74#endif
75
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080076/*
77 * Top-level interrupt handler for all PCI interrupts from a Target.
78 * When a block of MSI interrupts is allocated, this top-level handler
79 * is not used; instead, we directly call the correct sub-handler.
80 */
81struct ce_irq_reg_table {
82 uint32_t irq_enable;
83 uint32_t irq_status;
84};
85
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080086#ifndef QCA_WIFI_3_0_ADRASTEA
87static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
88{
89 return;
90}
91#else
92void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
93{
Komal Seelam644263d2016-02-22 20:45:49 +053094 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080095 unsigned int target_enable0, target_enable1;
96 unsigned int target_cause0, target_cause1;
97
98 target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
99 target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
100 target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
101 target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
102
103 if ((target_enable0 & target_cause0) ||
104 (target_enable1 & target_cause1)) {
105 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
106 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
107
108 if (scn->notice_send)
Yuanyuan Liufd594c22016-04-25 13:59:19 -0700109 pld_intr_notify_q6(sc->dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800110 }
111}
112#endif
113
Houston Hoffmanf7bc3082016-10-17 19:52:55 -0700114
115#ifdef QCA_WIFI_NAPIER_EMULATION
116void __iomem *napier_emu_ioremap(struct pci_dev *dev,
117 int bar, unsigned long maxlen)
118{
119 resource_size_t start = pci_resource_start(dev, bar);
120 resource_size_t len = 0xD00000;
121 unsigned long flags = pci_resource_flags(dev, bar);
122
123 if (!len || !start)
124 return NULL;
125
126 if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
127 if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
128 return ioremap(start, len);
129 else
130 return ioremap_nocache(start, len);
131 }
132
133 return NULL;
134}
135#endif
136
137
Houston Hoffman247f09b2016-04-06 21:21:40 -0700138/**
139 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
140 * @scn: scn
141 *
142 * Return: N/A
143 */
144static void pci_dispatch_interrupt(struct hif_softc *scn)
145{
146 uint32_t intr_summary;
147 int id;
148 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
149
150 if (scn->hif_init_done != true)
151 return;
152
153 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
154 return;
155
156 intr_summary = CE_INTERRUPT_SUMMARY(scn);
157
158 if (intr_summary == 0) {
Komal Seelam6ee55902016-04-11 17:11:07 +0530159 if ((scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman247f09b2016-04-06 21:21:40 -0700160 (!qdf_atomic_read(&scn->link_suspended))) {
161
162 hif_write32_mb(scn->mem +
163 (SOC_CORE_BASE_ADDRESS |
164 PCIE_INTR_ENABLE_ADDRESS),
165 HOST_GROUP0_MASK);
166
167 hif_read32_mb(scn->mem +
168 (SOC_CORE_BASE_ADDRESS |
169 PCIE_INTR_ENABLE_ADDRESS));
170 }
171 Q_TARGET_ACCESS_END(scn);
172 return;
173 } else {
174 Q_TARGET_ACCESS_END(scn);
175 }
176
177 scn->ce_irq_summary = intr_summary;
178 for (id = 0; intr_summary && (id < scn->ce_count); id++) {
179 if (intr_summary & (1 << id)) {
180 intr_summary &= ~(1 << id);
181 ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
182 }
183 }
184}
185
Houston Hoffman3db96a42016-05-05 19:54:39 -0700186irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800187{
188 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530189 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530190 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800191 volatile int tmp;
192 uint16_t val;
193 uint32_t bar0;
194 uint32_t fw_indicator_address, fw_indicator;
195 bool ssr_irq = false;
196 unsigned int host_cause, host_enable;
197
198 if (LEGACY_INTERRUPTS(sc)) {
199 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
200 return IRQ_HANDLED;
201
202 if (ADRASTEA_BU) {
203 host_enable = hif_read32_mb(sc->mem +
204 PCIE_INTR_ENABLE_ADDRESS);
205 host_cause = hif_read32_mb(sc->mem +
206 PCIE_INTR_CAUSE_ADDRESS);
207 if (!(host_enable & host_cause)) {
208 hif_pci_route_adrastea_interrupt(sc);
209 return IRQ_HANDLED;
210 }
211 }
212
213 /* Clear Legacy PCI line interrupts
214 * IMPORTANT: INTR_CLR regiser has to be set
215 * after INTR_ENABLE is set to 0,
216 * otherwise interrupt can not be really cleared */
217 hif_write32_mb(sc->mem +
218 (SOC_CORE_BASE_ADDRESS |
219 PCIE_INTR_ENABLE_ADDRESS), 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800220
221 hif_write32_mb(sc->mem +
222 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
223 ADRASTEA_BU ?
224 (host_enable & host_cause) :
225 HOST_GROUP0_MASK);
226
227 if (ADRASTEA_BU)
228 hif_write32_mb(sc->mem + 0x2f100c , (host_cause >> 1));
229
230 /* IMPORTANT: this extra read transaction is required to
231 * flush the posted write buffer */
232 if (!ADRASTEA_BU) {
233 tmp =
234 hif_read32_mb(sc->mem +
235 (SOC_CORE_BASE_ADDRESS |
236 PCIE_INTR_ENABLE_ADDRESS));
237
238 if (tmp == 0xdeadbeef) {
239 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
240 __func__);
241
242 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
243 HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
244 __func__, val);
245
246 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
247 HIF_ERROR("%s: PCI Device ID = 0x%04x",
248 __func__, val);
249
250 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
251 HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
252 val);
253
254 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
255 HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
256 val);
257
258 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
259 &bar0);
260 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
261 bar0);
262
263 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
264 __func__,
265 hif_read32_mb(sc->mem +
266 PCIE_LOCAL_BASE_ADDRESS
267 + RTC_STATE_ADDRESS));
268 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
269 __func__,
270 hif_read32_mb(sc->mem +
271 PCIE_LOCAL_BASE_ADDRESS
272 + PCIE_SOC_WAKE_ADDRESS));
273 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
274 __func__,
275 hif_read32_mb(sc->mem + 0x80008),
276 hif_read32_mb(sc->mem + 0x8000c));
277 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
278 __func__,
279 hif_read32_mb(sc->mem + 0x80010),
280 hif_read32_mb(sc->mem + 0x80014));
281 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
282 __func__,
283 hif_read32_mb(sc->mem + 0x80018),
284 hif_read32_mb(sc->mem + 0x8001c));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530285 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800286 }
287
288 PCI_CLR_CAUSE0_REGISTER(sc);
289 }
290
291 if (HAS_FW_INDICATOR) {
292 fw_indicator_address = hif_state->fw_indicator_address;
293 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
294 if ((fw_indicator != ~0) &&
295 (fw_indicator & FW_IND_EVENT_PENDING))
296 ssr_irq = true;
297 }
298
299 if (Q_TARGET_ACCESS_END(scn) < 0)
300 return IRQ_HANDLED;
301 }
302 /* TBDXXX: Add support for WMAC */
303
304 if (ssr_irq) {
305 sc->irq_event = irq;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530306 qdf_atomic_set(&scn->tasklet_from_intr, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800307
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530308 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800309 tasklet_schedule(&sc->intr_tq);
310 } else {
Houston Hoffman247f09b2016-04-06 21:21:40 -0700311 pci_dispatch_interrupt(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800312 }
313
314 return IRQ_HANDLED;
315}
316
317static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
318{
319 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
320
Komal Seelam02cf2f82016-02-22 20:44:25 +0530321 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800322
323 return IRQ_HANDLED;
324}
325
Komal Seelam644263d2016-02-22 20:45:49 +0530326bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800327{
328 return 1; /* FIX THIS */
329}
330
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +0530331int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
332{
333 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
334 int i = 0;
335
336 if (!irq || !size) {
337 return -EINVAL;
338 }
339
340 if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
341 irq[0] = sc->irq;
342 return 1;
343 }
344
345 if (sc->num_msi_intrs > size) {
346 qdf_print("Not enough space in irq buffer to return irqs\n");
347 return -EINVAL;
348 }
349
350 for (i = 0; i < sc->num_msi_intrs; i++) {
351 irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
352 }
353
354 return sc->num_msi_intrs;
355}
356
357
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800358/**
359 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
Komal Seelam644263d2016-02-22 20:45:49 +0530360 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800361 *
362 * Return: void
363 */
364#if CONFIG_ATH_PCIE_MAX_PERF == 0
Komal Seelam644263d2016-02-22 20:45:49 +0530365void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800366{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530367 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800368 A_target_id_t pci_addr = scn->mem;
369
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530370 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800371 /*
372 * If the deferred sleep timer is running cancel it
373 * and put the soc into sleep.
374 */
375 if (hif_state->fake_sleep == true) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530376 qdf_timer_stop(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800377 if (hif_state->verified_awake == false) {
378 hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
379 PCIE_SOC_WAKE_ADDRESS,
380 PCIE_SOC_WAKE_RESET);
381 }
382 hif_state->fake_sleep = false;
383 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530384 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800385}
386#else
Komal Seelam644263d2016-02-22 20:45:49 +0530387inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800388{
389 return;
390}
391#endif
392
393#define A_PCIE_LOCAL_REG_READ(mem, addr) \
394 hif_read32_mb((char *)(mem) + \
395 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
396
397#define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
398 hif_write32_mb(((char *)(mem) + \
399 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
400
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700401#ifdef QCA_WIFI_3_0
402/**
403 * hif_targ_is_awake() - check to see if the target is awake
404 * @hif_ctx: hif context
405 *
406 * emulation never goes to sleep
407 *
408 * Return: true if target is awake
409 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700410static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700411{
412 return true;
413}
414#else
415/**
416 * hif_targ_is_awake() - check to see if the target is awake
417 * @hif_ctx: hif context
418 *
419 * Return: true if the targets clocks are on
420 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700421static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700422{
423 uint32_t val;
424
425 if (scn->recovery)
426 return false;
427 val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS
428 + RTC_STATE_ADDRESS);
Houston Hoffmanf241eb02016-05-10 17:07:36 -0700429 return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700430}
431#endif
432
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800433#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
434static void hif_pci_device_reset(struct hif_pci_softc *sc)
435{
436 void __iomem *mem = sc->mem;
437 int i;
438 uint32_t val;
Komal Seelam644263d2016-02-22 20:45:49 +0530439 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800440
441 if (!scn->hostdef)
442 return;
443
444 /* NB: Don't check resetok here. This form of reset
445 * is integral to correct operation. */
446
447 if (!SOC_GLOBAL_RESET_ADDRESS) {
448 return;
449 }
450
451 if (!mem) {
452 return;
453 }
454
455 HIF_ERROR("%s: Reset Device", __func__);
456
457 /*
458 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
459 * writing WAKE_V, the Target may scribble over Host memory!
460 */
461 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
462 PCIE_SOC_WAKE_V_MASK);
463 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
464 if (hif_targ_is_awake(scn, mem))
465 break;
466
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530467 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800468 }
469
470 /* Put Target, including PCIe, into RESET. */
471 val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
472 val |= 1;
473 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
474 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
475 if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
476 RTC_STATE_COLD_RESET_MASK)
477 break;
478
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530479 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800480 }
481
482 /* Pull Target, including PCIe, out of RESET. */
483 val &= ~1;
484 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
485 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
486 if (!
487 (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
488 RTC_STATE_COLD_RESET_MASK))
489 break;
490
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530491 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800492 }
493
494 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
495}
496
497/* CPU warm reset function
498 * Steps:
499 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
500 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
501 * correctly on WARM reset
502 * 3. Clear TARGET CPU LF timer interrupt
503 * 4. Reset all CEs to clear any pending CE tarnsactions
504 * 5. Warm reset CPU
505 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700506static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800507{
508 void __iomem *mem = sc->mem;
509 int i;
510 uint32_t val;
511 uint32_t fw_indicator;
Komal Seelam644263d2016-02-22 20:45:49 +0530512 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800513
514 /* NB: Don't check resetok here. This form of reset is
515 * integral to correct operation. */
516
517 if (!mem) {
518 return;
519 }
520
521 HIF_INFO_MED("%s: Target Warm Reset", __func__);
522
523 /*
524 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
525 * writing WAKE_V, the Target may scribble over Host memory!
526 */
527 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
528 PCIE_SOC_WAKE_V_MASK);
529 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
530 if (hif_targ_is_awake(scn, mem))
531 break;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530532 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533 }
534
535 /*
536 * Disable Pending interrupts
537 */
538 val =
539 hif_read32_mb(mem +
540 (SOC_CORE_BASE_ADDRESS |
541 PCIE_INTR_CAUSE_ADDRESS));
542 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
543 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
544 /* Target CPU Intr Cause */
545 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
546 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
547
548 val =
549 hif_read32_mb(mem +
550 (SOC_CORE_BASE_ADDRESS |
551 PCIE_INTR_ENABLE_ADDRESS));
552 hif_write32_mb((mem +
553 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
554 hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
555 HOST_GROUP0_MASK);
556
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530557 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800558
559 /* Clear FW_INDICATOR_ADDRESS */
560 if (HAS_FW_INDICATOR) {
561 fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
562 hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
563 }
564
565 /* Clear Target LF Timer interrupts */
566 val =
567 hif_read32_mb(mem +
568 (RTC_SOC_BASE_ADDRESS +
569 SOC_LF_TIMER_CONTROL0_ADDRESS));
570 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
571 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
572 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
573 hif_write32_mb(mem +
574 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
575 val);
576
577 /* Reset CE */
578 val =
579 hif_read32_mb(mem +
580 (RTC_SOC_BASE_ADDRESS |
581 SOC_RESET_CONTROL_ADDRESS));
582 val |= SOC_RESET_CONTROL_CE_RST_MASK;
583 hif_write32_mb((mem +
584 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
585 val);
586 val =
587 hif_read32_mb(mem +
588 (RTC_SOC_BASE_ADDRESS |
589 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530590 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800591
592 /* CE unreset */
593 val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
594 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
595 val);
596 val =
597 hif_read32_mb(mem +
598 (RTC_SOC_BASE_ADDRESS |
599 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530600 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800601
602 /* Read Target CPU Intr Cause */
603 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
604 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
605 __func__, val);
606
607 /* CPU warm RESET */
608 val =
609 hif_read32_mb(mem +
610 (RTC_SOC_BASE_ADDRESS |
611 SOC_RESET_CONTROL_ADDRESS));
612 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
613 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
614 val);
615 val =
616 hif_read32_mb(mem +
617 (RTC_SOC_BASE_ADDRESS |
618 SOC_RESET_CONTROL_ADDRESS));
619 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
620 __func__, val);
621
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530622 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800623 HIF_INFO_MED("%s: Target Warm reset complete", __func__);
624
625}
626
627#ifndef QCA_WIFI_3_0
Komal Seelam5584a7c2016-02-24 19:22:48 +0530628int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800629{
Komal Seelam644263d2016-02-22 20:45:49 +0530630 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530631 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800632 void __iomem *mem = sc->mem;
633 uint32_t val;
634
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700635 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
636 return ATH_ISR_NOSCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800637 val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700638 if (Q_TARGET_ACCESS_END(scn) < 0)
639 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800640
641 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
642
643 if (val & FW_IND_HELPER)
644 return 0;
645
646 return 1;
647}
648#endif
649
Komal Seelam5584a7c2016-02-24 19:22:48 +0530650int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800651{
Komal Seelam644263d2016-02-22 20:45:49 +0530652 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800653 uint16_t device_id;
654 uint32_t val;
655 uint16_t timeout_count = 0;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530656 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800657
658 /* Check device ID from PCIe configuration space for link status */
659 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
660 if (device_id != sc->devid) {
661 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
662 __func__, device_id, sc->devid);
663 return -EACCES;
664 }
665
666 /* Check PCIe local register for bar/memory access */
667 val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
668 RTC_STATE_ADDRESS);
669 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
670
671 /* Try to wake up taget if it sleeps */
672 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
673 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
674 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
675 hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
676 PCIE_SOC_WAKE_ADDRESS));
677
678 /* Check if taget can be woken up */
679 while (!hif_targ_is_awake(scn, sc->mem)) {
680 if (timeout_count >= PCIE_WAKE_TIMEOUT) {
681 HIF_ERROR("%s: wake up timeout, %08x, %08x",
682 __func__,
683 hif_read32_mb(sc->mem +
684 PCIE_LOCAL_BASE_ADDRESS +
685 RTC_STATE_ADDRESS),
686 hif_read32_mb(sc->mem +
687 PCIE_LOCAL_BASE_ADDRESS +
688 PCIE_SOC_WAKE_ADDRESS));
689 return -EACCES;
690 }
691
692 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
693 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
694
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530695 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800696 timeout_count += 100;
697 }
698
699 /* Check Power register for SoC internal bus issues */
700 val =
701 hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
702 SOC_POWER_REG_OFFSET);
703 HIF_INFO_MED("%s: Power register is %08x", __func__, val);
704
705 return 0;
706}
707
Govind Singh2443fb32016-01-13 17:44:48 +0530708/**
Houston Hoffman3c017e72016-03-14 21:12:11 -0700709 * __hif_pci_dump_registers(): dump other PCI debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530710 * @scn: struct hif_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530711 *
Houston Hoffman3c017e72016-03-14 21:12:11 -0700712 * This function dumps pci debug registers. The parrent function
713 * dumps the copy engine registers before calling this function.
Govind Singh2443fb32016-01-13 17:44:48 +0530714 *
715 * Return: void
716 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700717static void __hif_pci_dump_registers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800718{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530719 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800720 void __iomem *mem = sc->mem;
721 uint32_t val, i, j;
722 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
723 uint32_t ce_base;
724
Houston Hoffmanbac94542016-03-14 21:11:59 -0700725 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
726 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800727
728 /* DEBUG_INPUT_SEL_SRC = 0x6 */
729 val =
730 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
731 WLAN_DEBUG_INPUT_SEL_OFFSET);
732 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
733 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
734 hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
735 val);
736
737 /* DEBUG_CONTROL_ENABLE = 0x1 */
738 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
739 WLAN_DEBUG_CONTROL_OFFSET);
740 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
741 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
742 hif_write32_mb(mem + GPIO_BASE_ADDRESS +
743 WLAN_DEBUG_CONTROL_OFFSET, val);
744
745 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
746 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
747 WLAN_DEBUG_INPUT_SEL_OFFSET),
748 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
749 WLAN_DEBUG_CONTROL_OFFSET));
750
751 HIF_INFO_MED("%s: Debug CE", __func__);
752 /* Loop CE debug output */
753 /* AMBA_DEBUG_BUS_SEL = 0xc */
754 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
755 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
756 val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
757 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
758
759 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
760 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
761 val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
762 CE_WRAPPER_DEBUG_OFFSET);
763 val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
764 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
765 hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
766 CE_WRAPPER_DEBUG_OFFSET, val);
767
768 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
769 __func__, wrapper_idx[i],
770 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
771 AMBA_DEBUG_BUS_OFFSET),
772 hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
773 CE_WRAPPER_DEBUG_OFFSET));
774
775 if (wrapper_idx[i] <= 7) {
776 for (j = 0; j <= 5; j++) {
777 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
778 /* For (j=0~5) write CE_DEBUG_SEL = j */
779 val =
780 hif_read32_mb(mem + ce_base +
781 CE_DEBUG_OFFSET);
782 val &= ~CE_DEBUG_SEL_MASK;
783 val |= CE_DEBUG_SEL_SET(j);
784 hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
785 val);
786
787 /* read (@gpio_athr_wlan_reg)
788 * WLAN_DEBUG_OUT_DATA */
789 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
790 WLAN_DEBUG_OUT_OFFSET);
791 val = WLAN_DEBUG_OUT_DATA_GET(val);
792
793 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
794 __func__, j,
795 hif_read32_mb(mem + ce_base +
796 CE_DEBUG_OFFSET), val);
797 }
798 } else {
799 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
800 val =
801 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
802 WLAN_DEBUG_OUT_OFFSET);
803 val = WLAN_DEBUG_OUT_DATA_GET(val);
804
805 HIF_INFO_MED("%s: out: %x", __func__, val);
806 }
807 }
808
809 HIF_INFO_MED("%s: Debug PCIe:", __func__);
810 /* Loop PCIe debug output */
811 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
812 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
813 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
814 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
815 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
816
817 for (i = 0; i <= 8; i++) {
818 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
819 val =
820 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
821 AMBA_DEBUG_BUS_OFFSET);
822 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
823 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
824 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
825 val);
826
827 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
828 val =
829 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
830 WLAN_DEBUG_OUT_OFFSET);
831 val = WLAN_DEBUG_OUT_DATA_GET(val);
832
833 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
834 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
835 WLAN_DEBUG_OUT_OFFSET), val,
836 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
837 WLAN_DEBUG_OUT_OFFSET));
838 }
839
Houston Hoffmanbac94542016-03-14 21:11:59 -0700840 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800841}
842
Govind Singh2443fb32016-01-13 17:44:48 +0530843/**
844 * hif_dump_registers(): dump bus debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530845 * @scn: struct hif_opaque_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530846 *
847 * This function dumps hif bus debug registers
848 *
849 * Return: 0 for success or error code
850 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700851int hif_pci_dump_registers(struct hif_softc *hif_ctx)
Govind Singh2443fb32016-01-13 17:44:48 +0530852{
853 int status;
Komal Seelam644263d2016-02-22 20:45:49 +0530854 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Govind Singh2443fb32016-01-13 17:44:48 +0530855
856 status = hif_dump_ce_registers(scn);
857
858 if (status)
859 HIF_ERROR("%s: Dump CE Registers Failed", __func__);
860
Houston Hoffman3c017e72016-03-14 21:12:11 -0700861 /* dump non copy engine pci registers */
862 __hif_pci_dump_registers(scn);
Govind Singh2443fb32016-01-13 17:44:48 +0530863
864 return 0;
865}
866
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800867/*
868 * Handler for a per-engine interrupt on a PARTICULAR CE.
869 * This is used in cases where each CE has a private
870 * MSI interrupt.
871 */
872static irqreturn_t ce_per_engine_handler(int irq, void *arg)
873{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800874 int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
875
876 /*
877 * NOTE: We are able to derive CE_id from irq because we
878 * use a one-to-one mapping for CE's 0..5.
879 * CE's 6 & 7 do not use interrupts at all.
880 *
881 * This mapping must be kept in sync with the mapping
882 * used by firmware.
883 */
884
Komal Seelam02cf2f82016-02-22 20:44:25 +0530885 ce_per_engine_service(arg, CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800886
887 return IRQ_HANDLED;
888}
889
Venkateswara Swamy Bandaru814094e2016-11-11 15:24:27 +0530890#ifdef HIF_CONFIG_SLUB_DEBUG_ON
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800891
892/* worker thread to schedule wlan_tasklet in SLUB debug build */
Komal Seelamaa72bb72016-02-01 17:22:50 +0530893static void reschedule_tasklet_work_handler(void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800894{
Komal Seelamaa72bb72016-02-01 17:22:50 +0530895 struct hif_pci_softc *sc = arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530896 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800897
Komal Seelamaa72bb72016-02-01 17:22:50 +0530898 if (!scn) {
Komal Seelam644263d2016-02-22 20:45:49 +0530899 HIF_ERROR("%s: hif_softc is NULL\n", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800900 return;
901 }
902
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800903 if (scn->hif_init_done == false) {
904 HIF_ERROR("%s: wlan driver is unloaded", __func__);
905 return;
906 }
907
908 tasklet_schedule(&sc->intr_tq);
909 return;
910}
911
Komal Seelamaa72bb72016-02-01 17:22:50 +0530912/**
913 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
914 * work
915 * @sc: HIF PCI Context
916 *
917 * Return: void
918 */
919static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
920{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530921 qdf_create_work(0, &sc->reschedule_tasklet_work,
922 reschedule_tasklet_work_handler, NULL);
Komal Seelamaa72bb72016-02-01 17:22:50 +0530923}
924#else
925static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
Venkateswara Swamy Bandaru814094e2016-11-11 15:24:27 +0530926#endif /* HIF_CONFIG_SLUB_DEBUG_ON */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800927
Houston Hoffman3db96a42016-05-05 19:54:39 -0700928void wlan_tasklet(unsigned long data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800929{
930 struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
Komal Seelam644263d2016-02-22 20:45:49 +0530931 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800932
933 if (scn->hif_init_done == false)
934 goto end;
935
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530936 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800937 goto end;
938
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800939 if (!ADRASTEA_BU) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800940 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +0530941 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 goto end;
943 }
944
945end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530946 qdf_atomic_set(&scn->tasklet_from_intr, 0);
947 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800948}
949
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800950#ifdef FEATURE_RUNTIME_PM
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +0530951static const char *hif_pm_runtime_state_to_string(uint32_t state)
952{
953 switch (state) {
954 case HIF_PM_RUNTIME_STATE_NONE:
955 return "INIT_STATE";
956 case HIF_PM_RUNTIME_STATE_ON:
957 return "ON";
958 case HIF_PM_RUNTIME_STATE_INPROGRESS:
959 return "INPROGRESS";
960 case HIF_PM_RUNTIME_STATE_SUSPENDED:
961 return "SUSPENDED";
962 default:
963 return "INVALID STATE";
964 }
965}
966
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800967#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
968 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800969/**
970 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
971 * @sc: hif_pci_softc context
972 * @msg: log message
973 *
974 * log runtime pm stats when something seems off.
975 *
976 * Return: void
977 */
978void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
979{
980 struct hif_pm_runtime_lock *ctx;
981
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +0530982 HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800983 msg, atomic_read(&sc->dev->power.usage_count),
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +0530984 hif_pm_runtime_state_to_string(
985 atomic_read(&sc->pm_state)),
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800986 sc->prevent_suspend_cnt);
987
988 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
989 sc->dev->power.runtime_status,
990 sc->dev->power.runtime_error,
991 sc->dev->power.disable_depth,
992 sc->dev->power.autosuspend_delay);
993
994 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
995 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
996 sc->pm_stats.request_resume);
997
998 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
999 sc->pm_stats.allow_suspend,
1000 sc->pm_stats.prevent_suspend);
1001
1002 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
1003 sc->pm_stats.prevent_suspend_timeout,
1004 sc->pm_stats.allow_suspend_timeout);
1005
1006 HIF_ERROR("Suspended: %u, resumed: %u count",
1007 sc->pm_stats.suspended,
1008 sc->pm_stats.resumed);
1009
1010 HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
1011 sc->pm_stats.suspend_err,
1012 sc->pm_stats.runtime_get_err);
1013
1014 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
1015
1016 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1017 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
1018 }
1019
1020 WARN_ON(1);
1021}
1022
1023/**
1024 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
1025 * @s: file to print to
1026 * @data: unused
1027 *
1028 * debugging tool added to the debug fs for displaying runtimepm stats
1029 *
1030 * Return: 0
1031 */
1032static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
1033{
1034 struct hif_pci_softc *sc = s->private;
1035 static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
1036 "SUSPENDED"};
1037 unsigned int msecs_age;
1038 int pm_state = atomic_read(&sc->pm_state);
1039 unsigned long timer_expires, flags;
1040 struct hif_pm_runtime_lock *ctx;
1041
1042 seq_printf(s, "%30s: %s\n", "Runtime PM state",
1043 autopm_state[pm_state]);
1044 seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
1045 sc->pm_stats.last_resume_caller);
1046
1047 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1048 msecs_age = jiffies_to_msecs(
1049 jiffies - sc->pm_stats.suspend_jiffies);
1050 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1051 msecs_age / 1000, msecs_age % 1000);
1052 }
1053
1054 seq_printf(s, "%30s: %d\n", "PM Usage count",
1055 atomic_read(&sc->dev->power.usage_count));
1056
1057 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1058 sc->prevent_suspend_cnt);
1059
1060 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1061 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1062 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1063 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1064 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1065 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1066 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1067 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1068 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1069 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1070 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1071
1072 timer_expires = sc->runtime_timer_expires;
1073 if (timer_expires > 0) {
1074 msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1075 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1076 msecs_age / 1000, msecs_age % 1000);
1077 }
1078
1079 spin_lock_irqsave(&sc->runtime_lock, flags);
1080 if (list_empty(&sc->prevent_suspend_list)) {
1081 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1082 return 0;
1083 }
1084
1085 seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1086 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1087 seq_printf(s, "%s", ctx->name);
1088 if (ctx->timeout)
1089 seq_printf(s, "(%d ms)", ctx->timeout);
1090 seq_puts(s, " ");
1091 }
1092 seq_puts(s, "\n");
1093 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1094
1095 return 0;
1096}
1097#undef HIF_PCI_RUNTIME_PM_STATS
1098
1099/**
1100 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1101 * @inode
1102 * @file
1103 *
1104 * Return: linux error code of single_open.
1105 */
1106static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1107{
1108 return single_open(file, hif_pci_pm_runtime_debugfs_show,
1109 inode->i_private);
1110}
1111
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001112static const struct file_operations hif_pci_runtime_pm_fops = {
1113 .owner = THIS_MODULE,
1114 .open = hif_pci_runtime_pm_open,
1115 .release = single_release,
1116 .read = seq_read,
1117 .llseek = seq_lseek,
1118};
1119
1120/**
1121 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1122 * @sc: pci context
1123 *
1124 * creates a debugfs entry to debug the runtime pm feature.
1125 */
1126static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1127{
1128 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1129 S_IRUSR, NULL, sc,
1130 &hif_pci_runtime_pm_fops);
1131}
Komal Seelam81045d52016-09-26 17:08:34 +05301132
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001133/**
1134 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1135 * @sc: pci context
1136 *
1137 * removes the debugfs entry to debug the runtime pm feature.
1138 */
1139static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1140{
1141 debugfs_remove(sc->pm_dentry);
1142}
Komal Seelam81045d52016-09-26 17:08:34 +05301143
1144static void hif_runtime_init(struct device *dev, int delay)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001145{
Komal Seelam81045d52016-09-26 17:08:34 +05301146 pm_runtime_set_autosuspend_delay(dev, delay);
1147 pm_runtime_use_autosuspend(dev);
1148 pm_runtime_allow(dev);
1149 pm_runtime_mark_last_busy(dev);
1150 pm_runtime_put_noidle(dev);
1151 pm_suspend_ignore_children(dev, true);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001152}
Komal Seelam81045d52016-09-26 17:08:34 +05301153
1154static void hif_runtime_exit(struct device *dev)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001155{
Komal Seelam81045d52016-09-26 17:08:34 +05301156 pm_runtime_get_noresume(dev);
1157 pm_runtime_set_active(dev);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001158}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001159
Houston Hoffman9078a152015-11-02 16:15:02 -08001160static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001161
1162/**
1163 * hif_pm_runtime_start(): start the runtime pm
1164 * @sc: pci context
1165 *
1166 * After this call, runtime pm will be active.
1167 */
1168static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1169{
Komal Seelam644263d2016-02-22 20:45:49 +05301170 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001171 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001172
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001173 if (!ol_sc->hif_config.enable_runtime_pm) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001174 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1175 return;
1176 }
1177
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001178 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001179 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1180 __func__);
1181 return;
1182 }
1183
1184 setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1185 (unsigned long)sc);
1186
1187 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
Houston Hoffman4ede3492016-06-28 19:10:48 -07001188 ol_sc->hif_config.runtime_pm_delay);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001189
Komal Seelam81045d52016-09-26 17:08:34 +05301190 hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301191 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001192 hif_runtime_pm_debugfs_create(sc);
1193}
1194
1195/**
1196 * hif_pm_runtime_stop(): stop runtime pm
1197 * @sc: pci context
1198 *
1199 * Turns off runtime pm and frees corresponding resources
1200 * that were acquired by hif_runtime_pm_start().
1201 */
1202static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1203{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001204 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Komal Seelambd7c51d2016-02-24 10:27:30 +05301205 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001206
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001207 if (!ol_sc->hif_config.enable_runtime_pm)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001208 return;
1209
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001210 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001211 return;
1212
Komal Seelam81045d52016-09-26 17:08:34 +05301213 hif_runtime_exit(sc->dev);
Houston Hoffmanc2611a22016-06-28 19:35:45 -07001214 hif_pm_runtime_resume(sc->dev);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001215
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301216 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001217
1218 hif_runtime_pm_debugfs_remove(sc);
1219 del_timer_sync(&sc->runtime_timer);
1220 /* doesn't wait for penting trafic unlike cld-2.0 */
1221}
1222
1223/**
1224 * hif_pm_runtime_open(): initialize runtime pm
1225 * @sc: pci data structure
1226 *
1227 * Early initialization
1228 */
1229static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1230{
1231 spin_lock_init(&sc->runtime_lock);
1232
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301233 qdf_atomic_init(&sc->pm_state);
Houston Hoffmancceec342015-11-11 11:37:20 -08001234 sc->prevent_linkdown_lock =
1235 hif_runtime_lock_init("linkdown suspend disabled");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301236 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001237 INIT_LIST_HEAD(&sc->prevent_suspend_list);
1238}
1239
1240/**
Houston Hoffman20968292016-03-23 17:55:47 -07001241 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1242 * @sc: pci context
1243 *
1244 * Ensure we have only one vote against runtime suspend before closing
1245 * the runtime suspend feature.
1246 *
1247 * all gets by the wlan driver should have been returned
1248 * one vote should remain as part of cnss_runtime_exit
1249 *
1250 * needs to be revisited if we share the root complex.
1251 */
1252static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1253{
Komal Seelam887b6482016-09-29 17:46:59 +05301254 unsigned long flags;
1255 struct hif_pm_runtime_lock *ctx, *tmp;
1256
Houston Hoffman20968292016-03-23 17:55:47 -07001257 if (atomic_read(&sc->dev->power.usage_count) != 1)
1258 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
Komal Seelam887b6482016-09-29 17:46:59 +05301259 else
1260 return;
1261
1262 spin_lock_irqsave(&sc->runtime_lock, flags);
1263 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1264 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1265 hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1266 spin_lock_irqsave(&sc->runtime_lock, flags);
1267 }
1268 spin_unlock_irqrestore(&sc->runtime_lock, flags);
Houston Hoffman20968292016-03-23 17:55:47 -07001269
1270 /* ensure 1 and only 1 usage count so that when the wlan
1271 * driver is re-insmodded runtime pm won't be
1272 * disabled also ensures runtime pm doesn't get
1273 * broken on by being less than 1.
1274 */
1275 if (atomic_read(&sc->dev->power.usage_count) <= 0)
1276 atomic_set(&sc->dev->power.usage_count, 1);
1277 while (atomic_read(&sc->dev->power.usage_count) > 1)
1278 hif_pm_runtime_put_auto(sc->dev);
1279}
1280
Komal Seelam887b6482016-09-29 17:46:59 +05301281static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1282 struct hif_pm_runtime_lock *lock);
1283
1284/**
1285 * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1286 * @sc: PCIe Context
1287 *
1288 * API is used to empty the runtime pm prevent suspend list.
1289 *
1290 * Return: void
1291 */
1292static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1293{
1294 unsigned long flags;
1295 struct hif_pm_runtime_lock *ctx, *tmp;
1296
1297 spin_lock_irqsave(&sc->runtime_lock, flags);
1298 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1299 __hif_pm_runtime_allow_suspend(sc, ctx);
1300 }
1301 spin_unlock_irqrestore(&sc->runtime_lock, flags);
1302}
1303
Houston Hoffman20968292016-03-23 17:55:47 -07001304/**
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001305 * hif_pm_runtime_close(): close runtime pm
1306 * @sc: pci bus handle
1307 *
1308 * ensure runtime_pm is stopped before closing the driver
1309 */
1310static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1311{
Komal Seelam887b6482016-09-29 17:46:59 +05301312 struct hif_softc *scn = HIF_GET_SOFTC(sc);
1313
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301314 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001315 return;
1316 else
1317 hif_pm_runtime_stop(sc);
Houston Hoffman20968292016-03-23 17:55:47 -07001318
Komal Seelam887b6482016-09-29 17:46:59 +05301319 hif_is_recovery_in_progress(scn) ?
1320 hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1321 hif_pm_runtime_sanitize_on_exit(sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001322}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001323#else
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001324static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1325static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1326static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
Houston Hoffman53b34c42015-11-18 15:51:32 -08001327static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001328#endif
1329
1330/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001331 * hif_disable_power_gating() - disable HW power gating
1332 * @hif_ctx: hif context
1333 *
1334 * disables pcie L1 power states
1335 */
1336static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1337{
1338 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1339 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1340
1341 if (NULL == scn) {
1342 HIF_ERROR("%s: Could not disable ASPM scn is null",
1343 __func__);
1344 return;
1345 }
1346
1347 /* Disable ASPM when pkt log is enabled */
1348 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1349 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1350}
1351
1352/**
1353 * hif_enable_power_gating() - enable HW power gating
1354 * @hif_ctx: hif context
1355 *
1356 * enables pcie L1 power states
1357 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001358static void hif_enable_power_gating(struct hif_pci_softc *sc)
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001359{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001360 if (NULL == sc) {
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001361 HIF_ERROR("%s: Could not disable ASPM scn is null",
1362 __func__);
1363 return;
1364 }
1365
1366 /* Re-enable ASPM after firmware/OTP download is complete */
1367 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1368}
1369
1370/**
1371 * hif_enable_power_management() - enable power management
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001372 * @hif_ctx: hif context
1373 *
Venkateswara Swamy Bandarue20c6dc2016-09-20 20:25:20 +05301374 * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1375 * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1376 *
1377 * note: epping mode does not call this function as it does not
1378 * care about saving power.
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001379 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001380void hif_pci_enable_power_management(struct hif_softc *hif_sc,
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001381 bool is_packet_log_enabled)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001382{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001383 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001384
Komal Seelam02cf2f82016-02-22 20:44:25 +05301385 if (pci_ctx == NULL) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001386 HIF_ERROR("%s, hif_ctx null", __func__);
1387 return;
1388 }
1389
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001390 hif_pm_runtime_start(pci_ctx);
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001391
1392 if (!is_packet_log_enabled)
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001393 hif_enable_power_gating(pci_ctx);
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001394
1395 if (!CONFIG_ATH_PCIE_MAX_PERF &&
1396 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
Venkateswara Swamy Bandarue20c6dc2016-09-20 20:25:20 +05301397 /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07001398 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001399 HIF_ERROR("%s, failed to set target to sleep",
1400 __func__);
1401 }
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001402}
1403
Houston Hoffman53b34c42015-11-18 15:51:32 -08001404/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001405 * hif_disable_power_management() - disable power management
Houston Hoffman53b34c42015-11-18 15:51:32 -08001406 * @hif_ctx: hif context
1407 *
1408 * Currently disables runtime pm. Should be updated to behave
1409 * if runtime pm is not started. Should be updated to take care
1410 * of aspm and soc sleep for driver load.
1411 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001412void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
Houston Hoffman53b34c42015-11-18 15:51:32 -08001413{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301414 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman53b34c42015-11-18 15:51:32 -08001415
Komal Seelam02cf2f82016-02-22 20:44:25 +05301416 if (pci_ctx == NULL) {
Houston Hoffman53b34c42015-11-18 15:51:32 -08001417 HIF_ERROR("%s, hif_ctx null", __func__);
1418 return;
1419 }
1420
Houston Hoffman53b34c42015-11-18 15:51:32 -08001421 hif_pm_runtime_stop(pci_ctx);
1422}
1423
Nirav Shahb70bd732016-05-25 14:31:51 +05301424void hif_pci_display_stats(struct hif_softc *hif_ctx)
1425{
1426 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1427
1428 if (pci_ctx == NULL) {
1429 HIF_ERROR("%s, hif_ctx null", __func__);
1430 return;
1431 }
1432 hif_display_ce_stats(&pci_ctx->ce_sc);
1433}
1434
1435void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1436{
1437 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1438
1439 if (pci_ctx == NULL) {
1440 HIF_ERROR("%s, hif_ctx null", __func__);
1441 return;
1442 }
1443 hif_clear_ce_stats(&pci_ctx->ce_sc);
1444}
1445
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001446#define ATH_PCI_PROBE_RETRY_MAX 3
1447/**
1448 * hif_bus_open(): hif_bus_open
1449 * @scn: scn
1450 * @bus_type: bus type
1451 *
1452 * Return: n/a
1453 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001454QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001455{
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001456 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001457
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001458 hif_ctx->bus_type = bus_type;
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001459 hif_pm_runtime_open(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001460
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301461 qdf_spinlock_create(&sc->irq_lock);
Houston Hoffman8a13e5c2015-10-29 16:12:09 -07001462
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001463 return hif_ce_open(hif_ctx);
Houston Hoffman108da402016-03-14 21:11:24 -07001464}
1465
1466#ifdef BMI_RSP_POLLING
1467#define BMI_RSP_CB_REGISTER 0
1468#else
1469#define BMI_RSP_CB_REGISTER 1
1470#endif
1471
1472/**
1473 * hif_register_bmi_callbacks() - register bmi callbacks
1474 * @hif_sc: hif context
1475 *
1476 * Bmi phase uses different copy complete callbacks than mission mode.
1477 */
Leo Chang5ea93a42016-11-03 12:39:49 -07001478void hif_register_bmi_callbacks(struct hif_softc *hif_sc)
Houston Hoffman108da402016-03-14 21:11:24 -07001479{
1480 struct HIF_CE_pipe_info *pipe_info;
1481 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1482
1483 /*
1484 * Initially, establish CE completion handlers for use with BMI.
1485 * These are overwritten with generic handlers after we exit BMI phase.
1486 */
1487 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1488 ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1489
1490 if (BMI_RSP_CB_REGISTER) {
1491 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1492 ce_recv_cb_register(
1493 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1494 }
1495}
1496
1497/**
Houston Hoffman854e67f2016-03-14 21:11:39 -07001498 * hif_wake_target_cpu() - wake the target's cpu
1499 * @scn: hif context
1500 *
1501 * Send an interrupt to the device to wake up the Target CPU
1502 * so it has an opportunity to notice any changed state.
1503 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001504static void hif_wake_target_cpu(struct hif_softc *scn)
Houston Hoffman854e67f2016-03-14 21:11:39 -07001505{
1506 QDF_STATUS rv;
1507 uint32_t core_ctrl;
1508 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1509
1510 rv = hif_diag_read_access(hif_hdl,
1511 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1512 &core_ctrl);
1513 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1514 /* A_INUM_FIRMWARE interrupt to Target CPU */
1515 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1516
1517 rv = hif_diag_write_access(hif_hdl,
1518 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1519 core_ctrl);
1520 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1521}
1522
Houston Hoffman63777f22016-03-14 21:11:49 -07001523/**
1524 * soc_wake_reset() - allow the target to go to sleep
1525 * @scn: hif_softc
1526 *
1527 * Clear the force wake register. This is done by
1528 * hif_sleep_entry and cancel defered timer sleep.
1529 */
1530static void soc_wake_reset(struct hif_softc *scn)
1531{
1532 hif_write32_mb(scn->mem +
1533 PCIE_LOCAL_BASE_ADDRESS +
1534 PCIE_SOC_WAKE_ADDRESS,
1535 PCIE_SOC_WAKE_RESET);
1536}
1537
1538/**
1539 * hif_sleep_entry() - gate target sleep
1540 * @arg: hif context
1541 *
1542 * This function is the callback for the sleep timer.
1543 * Check if last force awake critical section was at least
1544 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
1545 * allow the target to go to sleep and cancel the sleep timer.
1546 * otherwise reschedule the sleep timer.
1547 */
1548static void hif_sleep_entry(void *arg)
1549{
1550 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1551 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1552 uint32_t idle_ms;
1553
1554 if (scn->recovery)
1555 return;
1556
1557 if (hif_is_driver_unloading(scn))
1558 return;
1559
1560 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1561 if (hif_state->verified_awake == false) {
1562 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1563 - hif_state->sleep_ticks);
1564 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1565 if (!qdf_atomic_read(&scn->link_suspended)) {
1566 soc_wake_reset(scn);
1567 hif_state->fake_sleep = false;
1568 }
1569 } else {
1570 qdf_timer_stop(&hif_state->sleep_timer);
1571 qdf_timer_start(&hif_state->sleep_timer,
1572 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1573 }
1574 } else {
1575 qdf_timer_stop(&hif_state->sleep_timer);
1576 qdf_timer_start(&hif_state->sleep_timer,
1577 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1578 }
1579 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1580}
1581
Houston Hoffman854e67f2016-03-14 21:11:39 -07001582#define HIF_HIA_MAX_POLL_LOOP 1000000
1583#define HIF_HIA_POLLING_DELAY_MS 10
1584
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001585#ifdef CONFIG_WIN
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001586static void hif_set_hia_extnd(struct hif_softc *scn)
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001587{
1588 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1589 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1590 uint32_t target_type = tgt_info->target_type;
1591
1592 HIF_TRACE("%s: E", __func__);
1593
1594 if ((target_type == TARGET_TYPE_AR900B) ||
1595 target_type == TARGET_TYPE_QCA9984 ||
1596 target_type == TARGET_TYPE_QCA9888) {
1597 /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1598 in RTC space */
1599 tgt_info->target_revision
1600 = CHIP_ID_REVISION_GET(hif_read32_mb(scn->mem
1601 + CHIP_ID_ADDRESS));
1602 qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n",
1603 target_type, tgt_info->target_revision);
1604 }
1605
1606 {
1607 uint32_t flag2_value = 0;
1608 uint32_t flag2_targ_addr =
1609 host_interest_item_address(target_type,
1610 offsetof(struct host_interest_s, hi_skip_clock_init));
1611
1612 if ((ar900b_20_targ_clk != -1) &&
1613 (frac != -1) && (intval != -1)) {
1614 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1615 &flag2_value);
1616 qdf_print("\n Setting clk_override\n");
1617 flag2_value |= CLOCK_OVERRIDE;
1618
1619 hif_diag_write_access(hif_hdl, flag2_targ_addr,
1620 flag2_value);
1621 qdf_print("\n CLOCK PLL val set %d\n", flag2_value);
1622 } else {
1623 qdf_print(KERN_INFO"\n CLOCK PLL skipped\n");
1624 }
1625 }
1626
1627 if (target_type == TARGET_TYPE_AR900B
1628 || target_type == TARGET_TYPE_QCA9984
1629 || target_type == TARGET_TYPE_QCA9888) {
1630
1631 /* for AR9980_2.0, 300 mhz clock is used, right now we assume
1632 * this would be supplied through module parameters,
1633 * if not supplied assumed default or same behavior as 1.0.
1634 * Assume 1.0 clock can't be tuned, reset to defaults
1635 */
1636
1637 qdf_print(KERN_INFO"%s: setting the target pll frac %x intval %x\n",
1638 __func__, frac, intval);
1639
1640 /* do not touch frac, and int val, let them be default -1,
1641 * if desired, host can supply these through module params
1642 */
1643 if (frac != -1 || intval != -1) {
1644 uint32_t flag2_value = 0;
1645 uint32_t flag2_targ_addr;
1646
1647 flag2_targ_addr =
1648 host_interest_item_address(target_type,
1649 offsetof(struct host_interest_s,
1650 hi_clock_info));
1651 hif_diag_read_access(hif_hdl,
1652 flag2_targ_addr, &flag2_value);
1653 qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1654 flag2_value);
1655 hif_diag_write_access(hif_hdl, flag2_value, frac);
1656 qdf_print("\n INT Val %x Address %x\n",
1657 intval, flag2_value + 4);
1658 hif_diag_write_access(hif_hdl,
1659 flag2_value + 4, intval);
1660 } else {
1661 qdf_print(KERN_INFO"%s: no frac provided, skipping pre-configuring PLL\n",
1662 __func__);
1663 }
1664
1665 /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1666 if ((target_type == TARGET_TYPE_AR900B)
1667 && (tgt_info->target_revision == AR900B_REV_2)
1668 && ar900b_20_targ_clk != -1) {
1669 uint32_t flag2_value = 0;
1670 uint32_t flag2_targ_addr;
1671
1672 flag2_targ_addr
1673 = host_interest_item_address(target_type,
1674 offsetof(struct host_interest_s,
1675 hi_desired_cpu_speed_hz));
1676 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1677 &flag2_value);
1678 qdf_print("\n ====> hi_desired_cpu_speed_hz Address %x\n",
1679 flag2_value);
1680 hif_diag_write_access(hif_hdl, flag2_value,
1681 ar900b_20_targ_clk/*300000000u*/);
1682 } else if (target_type == TARGET_TYPE_QCA9888) {
1683 uint32_t flag2_targ_addr;
1684
1685 if (200000000u != qca9888_20_targ_clk) {
1686 qca9888_20_targ_clk = 300000000u;
1687 /* Setting the target clock speed to 300 mhz */
1688 }
1689
1690 flag2_targ_addr
1691 = host_interest_item_address(target_type,
1692 offsetof(struct host_interest_s,
1693 hi_desired_cpu_speed_hz));
1694 hif_diag_write_access(hif_hdl, flag2_targ_addr,
1695 qca9888_20_targ_clk);
1696 } else {
1697 qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n",
1698 __func__);
1699 }
1700 } else {
1701 if (frac != -1 || intval != -1) {
1702 uint32_t flag2_value = 0;
1703 uint32_t flag2_targ_addr =
1704 host_interest_item_address(target_type,
1705 offsetof(struct host_interest_s,
1706 hi_clock_info));
1707 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1708 &flag2_value);
1709 qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1710 flag2_value);
1711 hif_diag_write_access(hif_hdl, flag2_value, frac);
1712 qdf_print("\n INT Val %x Address %x\n", intval,
1713 flag2_value + 4);
1714 hif_diag_write_access(hif_hdl, flag2_value + 4,
1715 intval);
1716 }
1717 }
1718}
1719
1720#else
1721
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001722static void hif_set_hia_extnd(struct hif_softc *scn)
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001723{
1724}
1725
1726#endif
1727
Houston Hoffman854e67f2016-03-14 21:11:39 -07001728/**
1729 * hif_set_hia() - fill out the host interest area
1730 * @scn: hif context
1731 *
1732 * This is replaced by hif_wlan_enable for integrated targets.
1733 * This fills out the host interest area. The firmware will
1734 * process these memory addresses when it is first brought out
1735 * of reset.
1736 *
1737 * Return: 0 for success.
1738 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001739static int hif_set_hia(struct hif_softc *scn)
Houston Hoffman854e67f2016-03-14 21:11:39 -07001740{
1741 QDF_STATUS rv;
1742 uint32_t interconnect_targ_addr = 0;
1743 uint32_t pcie_state_targ_addr = 0;
1744 uint32_t pipe_cfg_targ_addr = 0;
1745 uint32_t svc_to_pipe_map = 0;
1746 uint32_t pcie_config_flags = 0;
1747 uint32_t flag2_value = 0;
1748 uint32_t flag2_targ_addr = 0;
1749#ifdef QCA_WIFI_3_0
1750 uint32_t host_interest_area = 0;
1751 uint8_t i;
1752#else
1753 uint32_t ealloc_value = 0;
1754 uint32_t ealloc_targ_addr = 0;
1755 uint8_t banks_switched = 1;
1756 uint32_t chip_id;
1757#endif
1758 uint32_t pipe_cfg_addr;
1759 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1760 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1761 uint32_t target_type = tgt_info->target_type;
1762 int target_ce_config_sz, target_service_to_ce_map_sz;
1763 static struct CE_pipe_config *target_ce_config;
1764 struct service_to_pipe *target_service_to_ce_map;
1765
1766 HIF_TRACE("%s: E", __func__);
1767
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301768 hif_get_target_ce_config(scn,
1769 &target_ce_config, &target_ce_config_sz,
Houston Hoffman854e67f2016-03-14 21:11:39 -07001770 &target_service_to_ce_map,
1771 &target_service_to_ce_map_sz,
1772 NULL, NULL);
1773
1774 if (ADRASTEA_BU)
1775 return QDF_STATUS_SUCCESS;
1776
1777#ifdef QCA_WIFI_3_0
1778 i = 0;
1779 while (i < HIF_HIA_MAX_POLL_LOOP) {
1780 host_interest_area = hif_read32_mb(scn->mem +
1781 A_SOC_CORE_SCRATCH_0_ADDRESS);
1782 if ((host_interest_area & 0x01) == 0) {
1783 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1784 host_interest_area = 0;
1785 i++;
1786 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1787 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1788 } else {
1789 host_interest_area &= (~0x01);
1790 hif_write32_mb(scn->mem + 0x113014, 0);
1791 break;
1792 }
1793 }
1794
1795 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1796 HIF_ERROR("%s: hia polling timeout", __func__);
1797 return -EIO;
1798 }
1799
1800 if (host_interest_area == 0) {
1801 HIF_ERROR("%s: host_interest_area = 0", __func__);
1802 return -EIO;
1803 }
1804
1805 interconnect_targ_addr = host_interest_area +
1806 offsetof(struct host_interest_area_t,
1807 hi_interconnect_state);
1808
1809 flag2_targ_addr = host_interest_area +
1810 offsetof(struct host_interest_area_t, hi_option_flag2);
1811
1812#else
1813 interconnect_targ_addr = hif_hia_item_address(target_type,
1814 offsetof(struct host_interest_s, hi_interconnect_state));
1815 ealloc_targ_addr = hif_hia_item_address(target_type,
1816 offsetof(struct host_interest_s, hi_early_alloc));
1817 flag2_targ_addr = hif_hia_item_address(target_type,
1818 offsetof(struct host_interest_s, hi_option_flag2));
1819#endif
1820 /* Supply Target-side CE configuration */
1821 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1822 &pcie_state_targ_addr);
1823 if (rv != QDF_STATUS_SUCCESS) {
1824 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1825 __func__, interconnect_targ_addr, rv);
1826 goto done;
1827 }
1828 if (pcie_state_targ_addr == 0) {
1829 rv = QDF_STATUS_E_FAILURE;
1830 HIF_ERROR("%s: pcie state addr is 0", __func__);
1831 goto done;
1832 }
1833 pipe_cfg_addr = pcie_state_targ_addr +
1834 offsetof(struct pcie_state_s,
1835 pipe_cfg_addr);
1836 rv = hif_diag_read_access(hif_hdl,
1837 pipe_cfg_addr,
1838 &pipe_cfg_targ_addr);
1839 if (rv != QDF_STATUS_SUCCESS) {
1840 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1841 __func__, pipe_cfg_addr, rv);
1842 goto done;
1843 }
1844 if (pipe_cfg_targ_addr == 0) {
1845 rv = QDF_STATUS_E_FAILURE;
1846 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1847 goto done;
1848 }
1849
1850 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1851 (uint8_t *) target_ce_config,
1852 target_ce_config_sz);
1853
1854 if (rv != QDF_STATUS_SUCCESS) {
1855 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1856 goto done;
1857 }
1858
1859 rv = hif_diag_read_access(hif_hdl,
1860 pcie_state_targ_addr +
1861 offsetof(struct pcie_state_s,
1862 svc_to_pipe_map),
1863 &svc_to_pipe_map);
1864 if (rv != QDF_STATUS_SUCCESS) {
1865 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1866 goto done;
1867 }
1868 if (svc_to_pipe_map == 0) {
1869 rv = QDF_STATUS_E_FAILURE;
1870 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1871 goto done;
1872 }
1873
1874 rv = hif_diag_write_mem(hif_hdl,
1875 svc_to_pipe_map,
1876 (uint8_t *) target_service_to_ce_map,
1877 target_service_to_ce_map_sz);
1878 if (rv != QDF_STATUS_SUCCESS) {
1879 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1880 goto done;
1881 }
1882
1883 rv = hif_diag_read_access(hif_hdl,
1884 pcie_state_targ_addr +
1885 offsetof(struct pcie_state_s,
1886 config_flags),
1887 &pcie_config_flags);
1888 if (rv != QDF_STATUS_SUCCESS) {
1889 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1890 goto done;
1891 }
1892#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1893 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1894#else
1895 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1896#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1897 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1898#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1899 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1900#endif
1901 rv = hif_diag_write_mem(hif_hdl,
1902 pcie_state_targ_addr +
1903 offsetof(struct pcie_state_s,
1904 config_flags),
1905 (uint8_t *) &pcie_config_flags,
1906 sizeof(pcie_config_flags));
1907 if (rv != QDF_STATUS_SUCCESS) {
1908 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1909 goto done;
1910 }
1911
1912#ifndef QCA_WIFI_3_0
1913 /* configure early allocation */
1914 ealloc_targ_addr = hif_hia_item_address(target_type,
1915 offsetof(
1916 struct host_interest_s,
1917 hi_early_alloc));
1918
1919 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1920 &ealloc_value);
1921 if (rv != QDF_STATUS_SUCCESS) {
1922 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1923 goto done;
1924 }
1925
1926 /* 1 bank is switched to IRAM, except ROME 1.0 */
1927 ealloc_value |=
1928 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1929 HI_EARLY_ALLOC_MAGIC_MASK);
1930
1931 rv = hif_diag_read_access(hif_hdl,
1932 CHIP_ID_ADDRESS |
1933 RTC_SOC_BASE_ADDRESS, &chip_id);
1934 if (rv != QDF_STATUS_SUCCESS) {
1935 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1936 goto done;
1937 }
1938 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1939 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1940 switch (CHIP_ID_REVISION_GET(chip_id)) {
1941 case 0x2: /* ROME 1.3 */
1942 /* 2 banks are switched to IRAM */
1943 banks_switched = 2;
1944 break;
1945 case 0x4: /* ROME 2.1 */
1946 case 0x5: /* ROME 2.2 */
1947 banks_switched = 6;
1948 break;
1949 case 0x8: /* ROME 3.0 */
1950 case 0x9: /* ROME 3.1 */
1951 case 0xA: /* ROME 3.2 */
1952 banks_switched = 9;
1953 break;
1954 case 0x0: /* ROME 1.0 */
1955 case 0x1: /* ROME 1.1 */
1956 default:
1957 /* 3 banks are switched to IRAM */
1958 banks_switched = 3;
1959 break;
1960 }
1961 }
1962
1963 ealloc_value |=
1964 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1965 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1966
1967 rv = hif_diag_write_access(hif_hdl,
1968 ealloc_targ_addr,
1969 ealloc_value);
1970 if (rv != QDF_STATUS_SUCCESS) {
1971 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1972 goto done;
1973 }
1974#endif
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001975 if ((target_type == TARGET_TYPE_AR900B)
1976 || (target_type == TARGET_TYPE_QCA9984)
1977 || (target_type == TARGET_TYPE_QCA9888)
Aravind Narasimhane79befa2016-06-24 12:03:15 +05301978 || (target_type == TARGET_TYPE_AR9888)) {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001979 hif_set_hia_extnd(scn);
1980 }
Houston Hoffman854e67f2016-03-14 21:11:39 -07001981
1982 /* Tell Target to proceed with initialization */
1983 flag2_targ_addr = hif_hia_item_address(target_type,
1984 offsetof(
1985 struct host_interest_s,
1986 hi_option_flag2));
1987
1988 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1989 &flag2_value);
1990 if (rv != QDF_STATUS_SUCCESS) {
1991 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1992 goto done;
1993 }
1994
1995 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1996 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1997 flag2_value);
1998 if (rv != QDF_STATUS_SUCCESS) {
1999 HIF_ERROR("%s: set option val (%d)", __func__, rv);
2000 goto done;
2001 }
2002
2003 hif_wake_target_cpu(scn);
2004
2005done:
2006
2007 return rv;
2008}
2009
2010/**
Houston Hoffman108da402016-03-14 21:11:24 -07002011 * hif_bus_configure() - configure the pcie bus
2012 * @hif_sc: pointer to the hif context.
2013 *
2014 * return: 0 for success. nonzero for failure.
2015 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002016int hif_pci_bus_configure(struct hif_softc *hif_sc)
Houston Hoffman108da402016-03-14 21:11:24 -07002017{
2018 int status = 0;
Houston Hoffman63777f22016-03-14 21:11:49 -07002019 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302020 struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002021
Houston Hoffman108da402016-03-14 21:11:24 -07002022 hif_ce_prepare_config(hif_sc);
2023
Houston Hoffman63777f22016-03-14 21:11:49 -07002024 /* initialize sleep state adjust variables */
2025 hif_state->sleep_timer_init = true;
2026 hif_state->keep_awake_count = 0;
2027 hif_state->fake_sleep = false;
2028 hif_state->sleep_ticks = 0;
2029
2030 qdf_timer_init(NULL, &hif_state->sleep_timer,
2031 hif_sleep_entry, (void *)hif_state,
2032 QDF_TIMER_TYPE_WAKE_APPS);
2033 hif_state->sleep_timer_init = true;
2034
Houston Hoffmana15d0b02016-11-23 15:10:15 -08002035 status = hif_wlan_enable(hif_sc);
2036 if (status) {
2037 HIF_ERROR("%s: hif_wlan_enable error = %d",
2038 __func__, status);
2039 goto timer_free;
Houston Hoffman108da402016-03-14 21:11:24 -07002040 }
2041
2042 A_TARGET_ACCESS_LIKELY(hif_sc);
Houston Hoffmanf7718622016-03-14 21:11:37 -07002043
2044 if (CONFIG_ATH_PCIE_MAX_PERF ||
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002045 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
Venkateswara Swamy Bandarue20c6dc2016-09-20 20:25:20 +05302046 /*
2047 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
2048 * prevent sleep when we want to keep firmware always awake
2049 * note: when we want to keep firmware always awake,
2050 * hif_target_sleep_state_adjust will point to a dummy
2051 * function, and hif_pci_target_sleep_state_adjust must
2052 * be called instead.
2053 * note: bus type check is here because AHB bus is reusing
2054 * hif_pci_bus_configure code.
2055 */
2056 if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002057 if (hif_pci_target_sleep_state_adjust(hif_sc,
2058 false, true) < 0) {
2059 status = -EACCES;
2060 goto disable_wlan;
2061 }
Houston Hoffmanf7718622016-03-14 21:11:37 -07002062 }
2063 }
2064
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002065 /* todo: consider replacing this with an srng field */
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002066 if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2067 (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302068 hif_sc->per_ce_irq = true;
2069 }
2070
Houston Hoffman108da402016-03-14 21:11:24 -07002071 status = hif_config_ce(hif_sc);
2072 if (status)
2073 goto disable_wlan;
2074
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302075 /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
2076 if (hif_needs_bmi(hif_osc)) {
2077 status = hif_set_hia(hif_sc);
2078 if (status)
2079 goto unconfig_ce;
Houston Hoffman108da402016-03-14 21:11:24 -07002080
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302081 HIF_INFO_MED("%s: hif_set_hia done", __func__);
Houston Hoffman108da402016-03-14 21:11:24 -07002082
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302083 hif_register_bmi_callbacks(hif_sc);
2084 }
Houston Hoffman108da402016-03-14 21:11:24 -07002085
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002086 if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2087 (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2088 HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2089 __func__);
2090 else {
2091 status = hif_configure_irq(hif_sc);
2092 if (status < 0)
2093 goto unconfig_ce;
2094 }
Houston Hoffman108da402016-03-14 21:11:24 -07002095
2096 A_TARGET_ACCESS_UNLIKELY(hif_sc);
2097
2098 return status;
2099
2100unconfig_ce:
2101 hif_unconfig_ce(hif_sc);
2102disable_wlan:
2103 A_TARGET_ACCESS_UNLIKELY(hif_sc);
Houston Hoffmana15d0b02016-11-23 15:10:15 -08002104 hif_wlan_disable(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002105
Houston Hoffman63777f22016-03-14 21:11:49 -07002106timer_free:
2107 qdf_timer_stop(&hif_state->sleep_timer);
2108 qdf_timer_free(&hif_state->sleep_timer);
2109 hif_state->sleep_timer_init = false;
2110
Houston Hoffman108da402016-03-14 21:11:24 -07002111 HIF_ERROR("%s: failed, status = %d", __func__, status);
2112 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113}
2114
2115/**
2116 * hif_bus_close(): hif_bus_close
2117 *
2118 * Return: n/a
2119 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07002120void hif_pci_close(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002121{
Houston Hoffman108da402016-03-14 21:11:24 -07002122 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002123 hif_pm_runtime_close(hif_pci_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002124 hif_ce_close(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002125}
2126
2127#define BAR_NUM 0
2128
Houston Hoffmand0620a32016-11-09 20:44:56 -08002129#ifndef CONFIG_PLD_PCIE_INIT
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002130static int hif_enable_pci(struct hif_pci_softc *sc,
2131 struct pci_dev *pdev,
2132 const struct pci_device_id *id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002133{
2134 void __iomem *mem;
2135 int ret = 0;
2136 uint16_t device_id;
Komal Seelam644263d2016-02-22 20:45:49 +05302137 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002138
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302139 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2140 if (device_id != id->device) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002141 HIF_ERROR(
2142 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2143 __func__, device_id, id->device);
2144 /* pci link is down, so returing with error code */
2145 return -EIO;
2146 }
2147
2148 /* FIXME: temp. commenting out assign_resource
2149 * call for dev_attach to work on 2.6.38 kernel
2150 */
Amar Singhal901e33f2015-10-08 11:55:32 -07002151#if (!defined(__LINUX_ARM_ARCH__))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002152 if (pci_assign_resource(pdev, BAR_NUM)) {
2153 HIF_ERROR("%s: pci_assign_resource error", __func__);
2154 return -EIO;
2155 }
2156#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002157 if (pci_enable_device(pdev)) {
2158 HIF_ERROR("%s: pci_enable_device error",
2159 __func__);
2160 return -EIO;
2161 }
2162
2163 /* Request MMIO resources */
2164 ret = pci_request_region(pdev, BAR_NUM, "ath");
2165 if (ret) {
2166 HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2167 ret = -EIO;
2168 goto err_region;
2169 }
Houston Hoffmand0620a32016-11-09 20:44:56 -08002170
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002171#ifdef CONFIG_ARM_LPAE
2172 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2173 * for 32 bits device also. */
2174 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2175 if (ret) {
2176 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2177 goto err_dma;
2178 }
2179 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2180 if (ret) {
2181 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2182 goto err_dma;
2183 }
2184#else
2185 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2186 if (ret) {
2187 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2188 goto err_dma;
2189 }
2190 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2191 if (ret) {
2192 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2193 __func__);
2194 goto err_dma;
2195 }
2196#endif
2197
2198 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2199
2200 /* Set bus master bit in PCI_COMMAND to enable DMA */
2201 pci_set_master(pdev);
2202
2203 /* Arrange for access to Target SoC registers. */
Houston Hoffmanf7bc3082016-10-17 19:52:55 -07002204#ifdef QCA_WIFI_NAPIER_EMULATION
2205 mem = napier_emu_ioremap(pdev, BAR_NUM, 0);
2206#else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207 mem = pci_iomap(pdev, BAR_NUM, 0);
Houston Hoffmanf7bc3082016-10-17 19:52:55 -07002208#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002209 if (!mem) {
2210 HIF_ERROR("%s: PCI iomap error", __func__);
2211 ret = -EIO;
2212 goto err_iomap;
2213 }
Houston Hoffmanf7bc3082016-10-17 19:52:55 -07002214
2215 pr_err("*****BAR is %p\n", mem);
2216
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002217 sc->mem = mem;
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002218
2219 HIF_INFO("%s, mem after pci_iomap:%p\n",
2220 __func__, sc->mem);
Karunakar Dasineni8a8afe22016-10-18 13:10:13 -07002221
2222 /* Hawkeye emulation specific change */
2223 if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2224 (device_id == RUMIM2M_DEVICE_ID_NODE1)) {
2225 mem = mem + 0x0c000000;
2226 sc->mem = mem;
2227 HIF_INFO("%s: Changing PCI mem base to %p\n",
2228 __func__, sc->mem);
2229 }
2230
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002231 ol_sc->mem = mem;
Houston Hoffmand0620a32016-11-09 20:44:56 -08002232 ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002233 sc->pci_enabled = true;
2234 return ret;
2235
2236err_iomap:
2237 pci_clear_master(pdev);
2238err_dma:
2239 pci_release_region(pdev, BAR_NUM);
2240err_region:
2241 pci_disable_device(pdev);
2242 return ret;
2243}
Houston Hoffmand0620a32016-11-09 20:44:56 -08002244#else
2245int hif_enable_pci(struct hif_pci_softc *sc,
2246 struct pci_dev *pdev,
2247 const struct pci_device_id *id)
2248{
2249 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2250 sc->pci_enabled = true;
2251 return 0;
2252}
2253#endif
2254
2255
2256#ifndef CONFIG_PLD_PCIE_INIT
2257static inline void hif_pci_deinit(struct hif_pci_softc *sc)
2258{
2259 pci_iounmap(sc->pdev, sc->mem);
2260 pci_clear_master(sc->pdev);
2261 pci_release_region(sc->pdev, BAR_NUM);
2262 pci_disable_device(sc->pdev);
2263}
2264#else
2265static inline void hif_pci_deinit(struct hif_pci_softc *sc) {}
2266#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002267
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002268static void hif_disable_pci(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002269{
Komal Seelam644263d2016-02-22 20:45:49 +05302270 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2271
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002272 if (ol_sc == NULL) {
2273 HIF_ERROR("%s: ol_sc = NULL", __func__);
2274 return;
2275 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002276 hif_pci_device_reset(sc);
Houston Hoffmand0620a32016-11-09 20:44:56 -08002277
2278 hif_pci_deinit(sc);
2279
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002280 sc->mem = NULL;
2281 ol_sc->mem = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002282}
2283
Houston Hoffman6a5fff62016-12-01 15:11:06 -08002284#ifndef QCA_WIFI_NAPIER_EMULATION
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002285static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002286{
2287 int ret = 0;
2288 int targ_awake_limit = 500;
2289#ifndef QCA_WIFI_3_0
2290 uint32_t fw_indicator;
2291#endif
Komal Seelam644263d2016-02-22 20:45:49 +05302292 struct hif_softc *scn = HIF_GET_SOFTC(sc);
2293
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002294 /*
2295 * Verify that the Target was started cleanly.*
2296 * The case where this is most likely is with an AUX-powered
2297 * Target and a Host in WoW mode. If the Host crashes,
2298 * loses power, or is restarted (without unloading the driver)
2299 * then the Target is left (aux) powered and running. On a
2300 * subsequent driver load, the Target is in an unexpected state.
2301 * We try to catch that here in order to reset the Target and
2302 * retry the probe.
2303 */
2304 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2305 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2306 while (!hif_targ_is_awake(scn, sc->mem)) {
2307 if (0 == targ_awake_limit) {
2308 HIF_ERROR("%s: target awake timeout", __func__);
2309 ret = -EAGAIN;
2310 goto end;
2311 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302312 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002313 targ_awake_limit--;
2314 }
2315
2316#if PCIE_BAR0_READY_CHECKING
2317 {
2318 int wait_limit = 200;
2319 /* Synchronization point: wait the BAR0 is configured */
2320 while (wait_limit-- &&
2321 !(hif_read32_mb(sc->mem +
2322 PCIE_LOCAL_BASE_ADDRESS +
2323 PCIE_SOC_RDY_STATUS_ADDRESS) \
2324 & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302325 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002326 }
2327 if (wait_limit < 0) {
2328 /* AR6320v1 doesn't support checking of BAR0 configuration,
2329 takes one sec to wait BAR0 ready */
2330 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2331 __func__);
2332 }
2333 }
2334#endif
2335
2336#ifndef QCA_WIFI_3_0
2337 fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
2338 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2339 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2340
2341 if (fw_indicator & FW_IND_INITIALIZED) {
2342 HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2343 __func__);
2344 ret = -EAGAIN;
2345 goto end;
2346 }
2347#endif
2348
2349end:
2350 return ret;
2351}
Houston Hoffman6a5fff62016-12-01 15:11:06 -08002352#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002353
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002354static void wlan_tasklet_msi(unsigned long data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002355{
2356 struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2357 struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
Komal Seelam644263d2016-02-22 20:45:49 +05302358 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002359
Komal Seelam02cf2f82016-02-22 20:44:25 +05302360 if (scn->hif_init_done == false)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361 goto irq_handled;
2362
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302363 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002364 goto irq_handled;
2365
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302366 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002367
2368 if (entry->id == HIF_MAX_TASKLET_NUM) {
2369 /* the last tasklet is for fw IRQ */
Komal Seelam02cf2f82016-02-22 20:44:25 +05302370 (irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +05302371 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002372 goto irq_handled;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302373 } else if (entry->id < scn->ce_count) {
2374 ce_per_engine_service(scn, entry->id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002375 } else {
2376 HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2377 __func__, entry->id);
2378 }
2379 return;
2380
2381irq_handled:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302382 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002383
2384}
2385
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002386static int hif_configure_msi(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002387{
2388 int ret = 0;
2389 int num_msi_desired;
2390 int rv = -1;
Komal Seelam644263d2016-02-22 20:45:49 +05302391 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002392
2393 HIF_TRACE("%s: E", __func__);
2394
2395 num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2396 if (num_msi_desired < 1) {
2397 HIF_ERROR("%s: MSI is not configured", __func__);
2398 return -EINVAL;
2399 }
2400
2401 if (num_msi_desired > 1) {
2402#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2403 rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2404 num_msi_desired);
2405#else
2406 rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2407#endif
2408 }
2409 HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2410 __func__, num_msi_desired, rv);
2411
2412 if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2413 int i;
2414
2415 sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302416 sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002417 (void *)sc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302418 sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002419 HIF_MAX_TASKLET_NUM;
2420 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2421 (unsigned long)&sc->tasklet_entries[
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302422 HIF_MAX_TASKLET_NUM-1]);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002423 ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2424 hif_pci_msi_fw_handler,
2425 IRQF_SHARED, "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302426 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002427 HIF_ERROR("%s: request_irq failed", __func__);
2428 goto err_intr;
2429 }
2430 for (i = 0; i <= scn->ce_count; i++) {
2431 sc->tasklet_entries[i].hif_handler = (void *)sc;
2432 sc->tasklet_entries[i].id = i;
2433 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2434 (unsigned long)&sc->tasklet_entries[i]);
2435 ret = request_irq((sc->pdev->irq +
2436 i + MSI_ASSIGN_CE_INITIAL),
2437 ce_per_engine_handler, IRQF_SHARED,
2438 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302439 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002440 HIF_ERROR("%s: request_irq failed", __func__);
2441 goto err_intr;
2442 }
2443 }
2444 } else if (rv > 0) {
2445 HIF_TRACE("%s: use single msi", __func__);
2446
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302447 ret = pci_enable_msi(sc->pdev);
2448 if (ret < 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002449 HIF_ERROR("%s: single MSI allocation failed",
2450 __func__);
2451 /* Try for legacy PCI line interrupts */
2452 sc->num_msi_intrs = 0;
2453 } else {
2454 sc->num_msi_intrs = 1;
2455 tasklet_init(&sc->intr_tq,
2456 wlan_tasklet, (unsigned long)sc);
2457 ret = request_irq(sc->pdev->irq,
2458 hif_pci_interrupt_handler,
2459 IRQF_SHARED, "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302460 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002461 HIF_ERROR("%s: request_irq failed", __func__);
2462 goto err_intr;
2463 }
2464 }
2465 } else {
2466 sc->num_msi_intrs = 0;
2467 ret = -EIO;
2468 HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2469 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302470 ret = pci_enable_msi(sc->pdev);
2471 if (ret < 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002472 HIF_ERROR("%s: single MSI interrupt allocation failed",
2473 __func__);
2474 /* Try for legacy PCI line interrupts */
2475 sc->num_msi_intrs = 0;
2476 } else {
2477 sc->num_msi_intrs = 1;
2478 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2479 ret = request_irq(sc->pdev->irq,
2480 hif_pci_interrupt_handler, IRQF_SHARED,
2481 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302482 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002483 HIF_ERROR("%s: request_irq failed", __func__);
2484 goto err_intr;
2485 }
2486 }
2487
2488 if (ret == 0) {
2489 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2490 PCIE_INTR_ENABLE_ADDRESS),
2491 HOST_GROUP0_MASK);
2492 hif_write32_mb(sc->mem +
2493 PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2494 PCIE_SOC_WAKE_RESET);
2495 }
2496 HIF_TRACE("%s: X, ret = %d", __func__, ret);
2497
2498 return ret;
2499
2500err_intr:
2501if (sc->num_msi_intrs >= 1)
2502 pci_disable_msi(sc->pdev);
2503 return ret;
2504}
2505
2506static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2507{
2508 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05302509 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Aravind Narasimhane79befa2016-06-24 12:03:15 +05302510 uint32_t target_type = scn->target_info.target_type;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002511
2512 HIF_TRACE("%s: E", __func__);
2513
2514 /* do notn support MSI or MSI IRQ failed */
2515 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2516 ret = request_irq(sc->pdev->irq,
2517 hif_pci_interrupt_handler, IRQF_SHARED,
2518 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302519 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002520 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2521 goto end;
2522 }
Houston Hoffman3db96a42016-05-05 19:54:39 -07002523 /* Use sc->irq instead of sc->pdev-irq
2524 platform_device pdev doesn't have an irq field */
2525 sc->irq = sc->pdev->irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002526 /* Use Legacy PCI Interrupts */
2527 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2528 PCIE_INTR_ENABLE_ADDRESS),
2529 HOST_GROUP0_MASK);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002530 hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2531 PCIE_INTR_ENABLE_ADDRESS));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002532 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002533 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
Aravind Narasimhane79befa2016-06-24 12:03:15 +05302534
2535 if ((target_type == TARGET_TYPE_IPQ4019) ||
2536 (target_type == TARGET_TYPE_AR900B) ||
2537 (target_type == TARGET_TYPE_QCA9984) ||
2538 (target_type == TARGET_TYPE_AR9888) ||
2539 (target_type == TARGET_TYPE_QCA9888)) {
2540 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2541 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2542 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002543end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302544 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002545 "%s: X, ret = %d", __func__, ret);
2546 return ret;
2547}
2548
Houston Hoffman15010772016-09-16 14:01:13 -07002549static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2550{
2551 int ret;
2552 int ce_id, irq;
2553 uint32_t msi_data_start;
2554 uint32_t msi_data_count;
2555 uint32_t msi_irq_start;
2556 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2557
2558 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2559 &msi_data_count, &msi_data_start,
2560 &msi_irq_start);
2561 if (ret)
2562 return ret;
2563
2564 /* needs to match the ce_id -> irq data mapping
2565 * used in the srng parameter configuration
2566 */
2567 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2568 unsigned int msi_data;
2569 msi_data = (ce_id % msi_data_count) + msi_irq_start;
2570 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2571
2572 HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2573 ce_id, msi_data, irq);
2574
2575 free_irq(irq, &ce_sc->tasklets[ce_id]);
2576 }
2577
2578 return ret;
2579}
2580
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002581/**
2582 * hif_nointrs(): disable IRQ
2583 *
2584 * This function stops interrupt(s)
2585 *
Komal Seelam644263d2016-02-22 20:45:49 +05302586 * @scn: struct hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002587 *
2588 * Return: none
2589 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002590void hif_pci_nointrs(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002591{
Houston Hoffman15010772016-09-16 14:01:13 -07002592 int i, ret;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302593 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2594 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002595
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +05302596 ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2597
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002598 if (scn->request_irq_done == false)
2599 return;
Houston Hoffman15010772016-09-16 14:01:13 -07002600
2601 ret = hif_ce_srng_msi_free_irq(scn);
2602 if (ret != 0 && sc->num_msi_intrs > 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002603 /* MSI interrupt(s) */
2604 for (i = 0; i < sc->num_msi_intrs; i++) {
Houston Hoffman3db96a42016-05-05 19:54:39 -07002605 free_irq(sc->irq + i, sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002606 }
2607 sc->num_msi_intrs = 0;
2608 } else {
Houston Hoffman3db96a42016-05-05 19:54:39 -07002609 /* Legacy PCI line interrupt
2610 Use sc->irq instead of sc->pdev-irq
2611 platform_device pdev doesn't have an irq field */
2612 free_irq(sc->irq, sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002613 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002614 scn->request_irq_done = false;
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +05302615
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002616}
2617
2618/**
2619 * hif_disable_bus(): hif_disable_bus
2620 *
2621 * This function disables the bus
2622 *
2623 * @bdev: bus dev
2624 *
2625 * Return: none
2626 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002627void hif_pci_disable_bus(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002628{
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302629 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302630 struct pci_dev *pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002631 void __iomem *mem;
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302632 struct hif_target_info *tgt_info = &scn->target_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002633
2634 /* Attach did not succeed, all resources have been
2635 * freed in error handler
2636 */
2637 if (!sc)
2638 return;
2639
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302640 pdev = sc->pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002641 if (ADRASTEA_BU) {
Houston Hoffmanc1064a82016-07-25 13:22:25 -07002642 hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2643
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002644 hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2645 hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
2646 HOST_GROUP0_MASK);
2647 }
2648
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302649#if defined(CPU_WARM_RESET_WAR)
2650 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
2651 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2652 * verified for AR9888_REV1
2653 */
2654 if ((tgt_info->target_version == AR9888_REV2_VERSION) || (tgt_info->target_version == AR9887_REV1_VERSION))
2655 hif_pci_device_warm_reset(sc);
2656 else
2657 hif_pci_device_reset(sc);
2658#else
Houston Hoffmanf241eb02016-05-10 17:07:36 -07002659 hif_pci_device_reset(sc);
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302660#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002661 mem = (void __iomem *)sc->mem;
2662 if (mem) {
Houston Hoffmand0620a32016-11-09 20:44:56 -08002663#ifndef CONFIG_PLD_PCIE_INIT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002664 pci_disable_msi(pdev);
Houston Hoffmand0620a32016-11-09 20:44:56 -08002665#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002666 hif_dump_pipe_debug_count(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002667 if (scn->athdiag_procfs_inited) {
2668 athdiag_procfs_remove();
2669 scn->athdiag_procfs_inited = false;
2670 }
Houston Hoffmand0620a32016-11-09 20:44:56 -08002671 hif_pci_deinit(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002672 scn->mem = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002673 }
2674 HIF_INFO("%s: X", __func__);
2675}
2676
2677#define OL_ATH_PCI_PM_CONTROL 0x44
2678
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002679#ifdef FEATURE_RUNTIME_PM
Houston Hoffmancceec342015-11-11 11:37:20 -08002680/**
2681 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
2682 * @scn: hif context
2683 * @flag: prevent linkdown if true otherwise allow
2684 *
2685 * this api should only be called as part of bus prevent linkdown
2686 */
Komal Seelam644263d2016-02-22 20:45:49 +05302687static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002688{
Komal Seelam644263d2016-02-22 20:45:49 +05302689 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Komal Seelam5584a7c2016-02-24 19:22:48 +05302690 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Houston Hoffmancceec342015-11-11 11:37:20 -08002691
2692 if (flag)
Komal Seelam644263d2016-02-22 20:45:49 +05302693 hif_pm_runtime_prevent_suspend(hif_hdl,
2694 sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002695 else
Komal Seelam644263d2016-02-22 20:45:49 +05302696 hif_pm_runtime_allow_suspend(hif_hdl,
2697 sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002698}
2699#else
Komal Seelam644263d2016-02-22 20:45:49 +05302700static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002701{
2702}
2703#endif
2704
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002705#if defined(CONFIG_PCI_MSM)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002706/**
2707 * hif_bus_prevent_linkdown(): allow or permit linkdown
2708 * @flag: true prevents linkdown, false allows
2709 *
2710 * Calls into the platform driver to vote against taking down the
2711 * pcie link.
2712 *
2713 * Return: n/a
2714 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002715void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002716{
2717 HIF_ERROR("wlan: %s pcie power collapse",
2718 (flag ? "disable" : "enable"));
Houston Hoffmancceec342015-11-11 11:37:20 -08002719 hif_runtime_prevent_linkdown(scn, flag);
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002720 pld_wlan_pm_control(scn->qdf_dev->dev, flag);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002721}
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002722#else
Houston Hoffman4849fcc2016-05-05 15:42:35 -07002723void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002724{
2725 HIF_ERROR("wlan: %s pcie power collapse",
2726 (flag ? "disable" : "enable"));
2727 hif_runtime_prevent_linkdown(scn, flag);
2728}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002729#endif
2730
2731/**
Dustin Brown782a07e2016-12-07 14:14:24 -08002732 * hif_pci_bus_enable_wake_irq() - enable pci bus wake irq
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002733 *
2734 * Configures the pci irq line as a wakeup source.
2735 *
2736 * Return: 0 for success and non-zero for failure
2737 */
Dustin Brown782a07e2016-12-07 14:14:24 -08002738static int hif_pci_bus_enable_wake_irq(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002739{
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302740 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002741
Dustin Brown782a07e2016-12-07 14:14:24 -08002742 if (!sc) {
2743 HIF_ERROR("%s: sc is null", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002744 return -EFAULT;
Dustin Brown782a07e2016-12-07 14:14:24 -08002745 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002746
Dustin Brown782a07e2016-12-07 14:14:24 -08002747 if (!sc->pdev) {
2748 HIF_ERROR("%s: pdev is null", __func__);
2749 return -EFAULT;
2750 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002751
Dustin Brown782a07e2016-12-07 14:14:24 -08002752 if (unlikely(enable_irq_wake(sc->pdev->irq))) {
2753 HIF_ERROR("%s: Failed to enable wake IRQ", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002754 return -EINVAL;
2755 }
2756
2757 return 0;
2758}
2759
2760/**
Dustin Brown782a07e2016-12-07 14:14:24 -08002761 * hif_pci_bus_suspend(): prepare hif for suspend
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002762 *
Dustin Brown782a07e2016-12-07 14:14:24 -08002763 * Enables pci bus wake irq based on link suspend voting.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002764 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08002765 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002766 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002767int hif_pci_bus_suspend(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002768{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002769 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
Dustin Brown782a07e2016-12-07 14:14:24 -08002770 return 0;
2771
2772 /* pci link is staying up; enable wake irq */
2773 return hif_pci_bus_enable_wake_irq(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002774}
2775
2776/**
Komal Seelam2510b582016-08-22 18:43:28 +05302777 * __hif_check_link_status() - API to check if PCIe link is active/not
2778 * @scn: HIF Context
2779 *
2780 * API reads the PCIe config space to verify if PCIe link training is
2781 * successful or not.
2782 *
2783 * Return: Success/Failure
2784 */
2785static int __hif_check_link_status(struct hif_softc *scn)
2786{
2787 uint16_t dev_id;
2788 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2789 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2790
2791 if (!sc) {
2792 HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2793 return -EINVAL;
2794 }
2795
2796 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2797
2798 if (dev_id == sc->devid)
2799 return 0;
2800
2801 HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2802 __func__, dev_id);
2803
2804 scn->recovery = true;
2805
2806 if (cbk && cbk->set_recovery_in_progress)
2807 cbk->set_recovery_in_progress(cbk->context, true);
2808 else
2809 HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2810
2811 pld_is_pci_link_down(sc->dev);
2812 return -EACCES;
2813}
2814
2815/**
Dustin Brown782a07e2016-12-07 14:14:24 -08002816 * hif_pci_bus_disable_wake_irq() - disable pci bus wake irq
Houston Hoffman1688fba2015-11-10 16:47:27 -08002817 *
Dustin Brown782a07e2016-12-07 14:14:24 -08002818 * Deconfigures the pci irq line as a wakeup source.
2819 *
2820 * Return: 0 for success and non-zero for failure
2821 */
2822static int hif_pci_bus_disable_wake_irq(struct hif_softc *scn)
2823{
2824 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2825
2826 if (!sc) {
2827 HIF_ERROR("%s: sc is null", __func__);
2828 return -EFAULT;
2829 }
2830
2831 if (!sc->pdev) {
2832 HIF_ERROR("%s: pdev is null", __func__);
2833 return -EFAULT;
2834 }
2835
2836 if (unlikely(disable_irq_wake(sc->pdev->irq))) {
2837 HIF_ERROR("%s: Failed to disable wake IRQ", __func__);
2838 return -EFAULT;
2839 }
2840
2841 return 0;
2842}
2843
2844/**
2845 * hif_pci_bus_resume(): prepare hif for resume
2846 *
2847 * Disables pci bus wake irq based on link suspend voting.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002848 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08002849 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002850 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002851int hif_pci_bus_resume(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002852{
Komal Seelam2510b582016-08-22 18:43:28 +05302853 int ret;
2854
2855 ret = __hif_check_link_status(scn);
2856 if (ret)
2857 return ret;
2858
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002859 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
Dustin Brown782a07e2016-12-07 14:14:24 -08002860 return 0;
2861
2862 /* pci link is up; disable wake irq */
2863 return hif_pci_bus_disable_wake_irq(scn);
2864}
2865
2866/**
2867 * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2868 * @scn: hif context
2869 *
2870 * Ensure that if we recieved the wakeup message before the irq
2871 * was disabled that the message is pocessed before suspending.
2872 *
2873 * Return: -EBUSY if we fail to flush the tasklets.
2874 */
2875int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2876{
2877 if (hif_drain_tasklets(scn) != 0)
2878 return -EBUSY;
2879
2880 /* Stop the HIF Sleep Timer */
2881 hif_cancel_deferred_target_sleep(scn);
2882
2883 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2884 qdf_atomic_set(&scn->link_suspended, 1);
2885
2886 return 0;
2887}
2888
2889/**
2890 * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2891 * @scn: hif context
2892 *
2893 * Ensure that if we recieved the wakeup message before the irq
2894 * was disabled that the message is pocessed before suspending.
2895 *
2896 * Return: -EBUSY if we fail to flush the tasklets.
2897 */
2898int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2899{
2900 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2901 qdf_atomic_set(&scn->link_suspended, 0);
2902
2903 return 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002904}
2905
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002906#ifdef FEATURE_RUNTIME_PM
2907/**
2908 * __hif_runtime_pm_set_state(): utility function
2909 * @state: state to set
2910 *
2911 * indexes into the runtime pm state and sets it.
2912 */
Komal Seelam644263d2016-02-22 20:45:49 +05302913static void __hif_runtime_pm_set_state(struct hif_softc *scn,
Komal Seelamf8600682016-02-02 18:17:13 +05302914 enum hif_pm_runtime_state state)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002915{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002916 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002917
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002918 if (NULL == sc) {
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002919 HIF_ERROR("%s: HIF_CTX not initialized",
2920 __func__);
2921 return;
2922 }
2923
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302924 qdf_atomic_set(&sc->pm_state, state);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002925}
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002926
2927/**
2928 * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2929 *
2930 * Notify hif that a runtime pm opperation has started
2931 */
Komal Seelam644263d2016-02-22 20:45:49 +05302932static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002933{
Komal Seelamf8600682016-02-02 18:17:13 +05302934 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002935}
2936
2937/**
2938 * hif_runtime_pm_set_state_on(): adjust runtime pm state
2939 *
2940 * Notify hif that a the runtime pm state should be on
2941 */
Komal Seelam644263d2016-02-22 20:45:49 +05302942static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002943{
Komal Seelamf8600682016-02-02 18:17:13 +05302944 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002945}
2946
2947/**
2948 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
2949 *
2950 * Notify hif that a runtime suspend attempt has been completed successfully
2951 */
Komal Seelam644263d2016-02-22 20:45:49 +05302952static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002953{
Komal Seelamf8600682016-02-02 18:17:13 +05302954 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002955}
2956
Houston Hoffman692cc052015-11-10 18:42:47 -08002957/**
2958 * hif_log_runtime_suspend_success() - log a successful runtime suspend
2959 */
Komal Seelam644263d2016-02-22 20:45:49 +05302960static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002961{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002962 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002963 if (sc == NULL)
2964 return;
2965
2966 sc->pm_stats.suspended++;
2967 sc->pm_stats.suspend_jiffies = jiffies;
2968}
2969
2970/**
2971 * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2972 *
2973 * log a failed runtime suspend
2974 * mark last busy to prevent immediate runtime suspend
2975 */
Komal Seelamf8600682016-02-02 18:17:13 +05302976static void hif_log_runtime_suspend_failure(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002977{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002978 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002979 if (sc == NULL)
2980 return;
2981
2982 sc->pm_stats.suspend_err++;
Houston Hoffman692cc052015-11-10 18:42:47 -08002983}
2984
2985/**
2986 * hif_log_runtime_resume_success() - log a successful runtime resume
2987 *
2988 * log a successfull runtime resume
2989 * mark last busy to prevent immediate runtime suspend
2990 */
Komal Seelamf8600682016-02-02 18:17:13 +05302991static void hif_log_runtime_resume_success(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002992{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002993 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman692cc052015-11-10 18:42:47 -08002994 if (sc == NULL)
2995 return;
2996
2997 sc->pm_stats.resumed++;
Houston Hoffman78467a82016-01-05 20:08:56 -08002998}
2999
3000/**
3001 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
3002 *
3003 * Record the failure.
3004 * mark last busy to delay a retry.
3005 * adjust the runtime_pm state.
3006 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303007void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08003008{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003009 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
3010 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08003011
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003012 hif_log_runtime_suspend_failure(hif_ctx);
3013 if (hif_pci_sc != NULL)
3014 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
3015 hif_runtime_pm_set_state_on(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08003016}
3017
3018/**
3019 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
3020 *
3021 * Makes sure that the pci link will be taken down by the suspend opperation.
3022 * If the hif layer is configured to leave the bus on, runtime suspend will
3023 * not save any power.
3024 *
3025 * Set the runtime suspend state to in progress.
3026 *
3027 * return -EINVAL if the bus won't go down. otherwise return 0
3028 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303029int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08003030{
Komal Seelam644263d2016-02-22 20:45:49 +05303031 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3032
Komal Seelamf8600682016-02-02 18:17:13 +05303033 if (!hif_can_suspend_link(hif_ctx)) {
Houston Hoffman78467a82016-01-05 20:08:56 -08003034 HIF_ERROR("Runtime PM not supported for link up suspend");
3035 return -EINVAL;
3036 }
3037
Komal Seelam644263d2016-02-22 20:45:49 +05303038 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08003039 return 0;
3040}
3041
3042/**
3043 * hif_process_runtime_suspend_success() - bookkeeping of suspend success
3044 *
3045 * Record the success.
3046 * adjust the runtime_pm state
3047 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303048void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08003049{
Komal Seelam644263d2016-02-22 20:45:49 +05303050 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3051
3052 hif_runtime_pm_set_state_suspended(scn);
3053 hif_log_runtime_suspend_success(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08003054}
3055
3056/**
3057 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
3058 *
3059 * update the runtime pm state.
3060 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303061void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08003062{
Komal Seelam644263d2016-02-22 20:45:49 +05303063 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3064
3065 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08003066}
3067
3068/**
3069 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
3070 *
3071 * record the success.
3072 * adjust the runtime_pm state
3073 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303074void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08003075{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003076 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
3077 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08003078
Houston Hoffmanb21a0532016-03-14 21:12:12 -07003079 hif_log_runtime_resume_success(hif_ctx);
3080 if (hif_pci_sc != NULL)
3081 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
3082 hif_runtime_pm_set_state_on(scn);
Houston Hoffman692cc052015-11-10 18:42:47 -08003083}
Houston Hoffman692cc052015-11-10 18:42:47 -08003084
Houston Hoffman1688fba2015-11-10 16:47:27 -08003085/**
3086 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
3087 *
3088 * Return: 0 for success and non-zero error code for failure
3089 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303090int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08003091{
Dustin Brown782a07e2016-12-07 14:14:24 -08003092 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3093 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn)
3094 int err;
3095
3096 err = hif_pci_bus_suspend(scn);
3097 if (err)
3098 goto exit_with_error;
3099
3100 /*
3101 * normal 3-stage suspend from CNSS disables irqs before calling
3102 * noirq stage
3103 */
3104 disable_irq(sc->pdev->irq);
3105
3106 err = hif_pci_bus_suspend_noirq(scn);
3107 if (err)
3108 goto bus_resume;
3109
3110 return 0;
3111
3112bus_resume:
3113 enable_irq(sc->pdev->irq);
3114 err = hif_pci_bus_resume(scn);
3115 QDF_BUG(err == 0);
3116
3117exit_with_error:
3118 return err;
Houston Hoffman1688fba2015-11-10 16:47:27 -08003119}
3120
Houston Hoffmanf4607852015-12-17 17:14:40 -08003121#ifdef WLAN_FEATURE_FASTPATH
3122/**
3123 * hif_fastpath_resume() - resume fastpath for runtimepm
3124 *
3125 * ensure that the fastpath write index register is up to date
3126 * since runtime pm may cause ce_send_fast to skip the register
3127 * write.
3128 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303129static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08003130{
Komal Seelam644263d2016-02-22 20:45:49 +05303131 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003132 struct CE_state *ce_state;
3133
3134 if (!scn)
3135 return;
3136
3137 if (scn->fastpath_mode_on) {
Houston Hoffmanfaf8ab52016-07-01 04:22:40 -07003138 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3139 return;
Houston Hoffmanf4607852015-12-17 17:14:40 -08003140
Houston Hoffmanfaf8ab52016-07-01 04:22:40 -07003141 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
3142 qdf_spin_lock_bh(&ce_state->ce_index_lock);
3143
3144 /*war_ce_src_ring_write_idx_set */
3145 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
3146 ce_state->src_ring->write_index);
3147 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
3148 Q_TARGET_ACCESS_END(scn);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003149 }
3150}
3151#else
Komal Seelam5584a7c2016-02-24 19:22:48 +05303152static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
Houston Hoffmanf4607852015-12-17 17:14:40 -08003153#endif
3154
3155
Houston Hoffman1688fba2015-11-10 16:47:27 -08003156/**
3157 * hif_runtime_resume() - do the bus resume part of a runtime resume
3158 *
3159 * Return: 0 for success and non-zero error code for failure
3160 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303161int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08003162{
Dustin Brown782a07e2016-12-07 14:14:24 -08003163 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3164 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn)
3165 int err;
3166
3167 err = hif_pci_bus_resume_noirq(scn);
3168 if (err)
3169 goto exit_with_error;
3170
3171 /*
3172 * normal 3-stage resume from CNSS enables irqs after calling
3173 * noirq stage
3174 */
3175 enable_irq(sc->pdev->irq);
3176
3177 err = hif_pci_bus_resume(scn);
3178 if (err)
3179 goto bus_suspend_noirq;
Houston Hoffmanf4607852015-12-17 17:14:40 -08003180
Komal Seelamf8600682016-02-02 18:17:13 +05303181 hif_fastpath_resume(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003182
Dustin Brown782a07e2016-12-07 14:14:24 -08003183 return 0;
3184
3185bus_suspend_noirq:
3186 disable_irq(sc->pdev->irq);
3187 err = hif_pci_bus_suspend_noirq(scn);
3188 QDF_BUG(err == 0);
3189
3190exit_with_error:
3191 return err;
Houston Hoffman1688fba2015-11-10 16:47:27 -08003192}
Jeff Johnson6950fdb2016-10-07 13:00:59 -07003193#endif /* #ifdef FEATURE_RUNTIME_PM */
Houston Hoffman1688fba2015-11-10 16:47:27 -08003194
Komal Seelamaa72bb72016-02-01 17:22:50 +05303195#if CONFIG_PCIE_64BIT_MSI
Komal Seelam644263d2016-02-22 20:45:49 +05303196static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05303197{
3198 struct hif_pci_softc *sc = scn->hif_sc;
3199 struct hif_msi_info *info = &sc->msi_info;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303200 struct device *dev = scn->qdf_dev->dev;
Komal Seelamaa72bb72016-02-01 17:22:50 +05303201
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05303202 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
3203 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
Komal Seelamaa72bb72016-02-01 17:22:50 +05303204 info->magic = NULL;
3205 info->magic_dma = 0;
3206}
3207#else
Komal Seelam644263d2016-02-22 20:45:49 +05303208static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05303209{
3210}
3211#endif
3212
Houston Hoffman8f239f62016-03-14 21:12:05 -07003213void hif_pci_disable_isr(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003214{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303215 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003216
Komal Seelam644263d2016-02-22 20:45:49 +05303217 hif_nointrs(scn);
Komal Seelamaa72bb72016-02-01 17:22:50 +05303218 hif_free_msi_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003219 /* Cancel the pending tasklet */
Komal Seelam644263d2016-02-22 20:45:49 +05303220 ce_tasklet_kill(scn);
Venkateswara Swamy Bandaru31108f32016-08-08 18:04:29 +05303221 hif_grp_tasklet_kill(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003222 tasklet_kill(&sc->intr_tq);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303223 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Venkateswara Swamy Bandaru31108f32016-08-08 18:04:29 +05303224 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003225}
3226
3227/* Function to reset SoC */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07003228void hif_pci_reset_soc(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003229{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07003230 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
3231 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
Komal Seelam644263d2016-02-22 20:45:49 +05303232 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003233
3234#if defined(CPU_WARM_RESET_WAR)
3235 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
3236 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3237 * verified for AR9888_REV1
3238 */
Komal Seelam91553ce2016-01-27 18:57:10 +05303239 if (tgt_info->target_version == AR9888_REV2_VERSION)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003240 hif_pci_device_warm_reset(sc);
Komal Seelam91553ce2016-01-27 18:57:10 +05303241 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003242 hif_pci_device_reset(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003243#else
3244 hif_pci_device_reset(sc);
3245#endif
3246}
3247
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003248#ifdef CONFIG_PCI_MSM
3249static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3250{
3251 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3252 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3253}
3254#else
3255static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3256#endif
3257
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003258#ifndef QCA_WIFI_NAPIER_EMULATION
Komal Seelambd7c51d2016-02-24 10:27:30 +05303259/**
3260 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3261 * @sc: HIF PCIe Context
3262 *
3263 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3264 *
3265 * Return: Failure to caller
3266 */
3267static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3268{
3269 uint16_t val;
3270 uint32_t bar;
3271 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3272 struct hif_softc *scn = HIF_GET_SOFTC(sc);
3273 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3274 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
Komal Seelam75080122016-03-02 15:18:25 +05303275 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303276 A_target_id_t pci_addr = scn->mem;
3277
3278 HIF_ERROR("%s: keep_awake_count = %d",
3279 __func__, hif_state->keep_awake_count);
3280
3281 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3282
3283 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3284
3285 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3286
3287 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3288
3289 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3290
3291 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3292
3293 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3294
3295 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3296
3297 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3298
3299 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3300
3301 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3302 hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3303 PCIE_SOC_WAKE_ADDRESS));
3304
3305 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3306 hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3307 RTC_STATE_ADDRESS));
3308
3309 HIF_ERROR("%s:error, wakeup target", __func__);
3310 hif_msm_pcie_debug_info(sc);
3311
3312 if (!cfg->enable_self_recovery)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303313 QDF_BUG(0);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303314
3315 scn->recovery = true;
3316
3317 if (cbk->set_recovery_in_progress)
3318 cbk->set_recovery_in_progress(cbk->context, true);
3319
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003320 pld_is_pci_link_down(sc->dev);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303321 return -EACCES;
3322}
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003323#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05303324
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003325/*
3326 * For now, we use simple on-demand sleep/wake.
3327 * Some possible improvements:
3328 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3329 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3330 * Careful, though, these functions may be used by
3331 * interrupt handlers ("atomic")
3332 * -Don't use host_reg_table for this code; instead use values directly
3333 * -Use a separate timer to track activity and allow Target to sleep only
3334 * if it hasn't done anything for a while; may even want to delay some
3335 * processing for a short while in order to "batch" (e.g.) transmit
3336 * requests with completion processing into "windows of up time". Costs
3337 * some performance, but improves power utilization.
3338 * -On some platforms, it might be possible to eliminate explicit
3339 * sleep/wakeup. Instead, take a chance that each access works OK. If not,
3340 * recover from the failure by forcing the Target awake.
3341 * -Change keep_awake_count to an atomic_t in order to avoid spin lock
3342 * overhead in some cases. Perhaps this makes more sense when
3343 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3344 * disabled.
3345 * -It is possible to compile this code out and simply force the Target
3346 * to remain awake. That would yield optimal performance at the cost of
3347 * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3348 *
3349 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3350 */
3351/**
3352 * hif_target_sleep_state_adjust() - on-demand sleep/wake
Komal Seelam644263d2016-02-22 20:45:49 +05303353 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003354 * @sleep_ok: bool
3355 * @wait_for_it: bool
3356 *
3357 * Output the pipe error counts of each pipe to log file
3358 *
3359 * Return: int
3360 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07003361int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003362 bool sleep_ok, bool wait_for_it)
3363{
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003364#ifndef QCA_WIFI_NAPIER_EMULATION
Komal Seelam02cf2f82016-02-22 20:44:25 +05303365 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003366 A_target_id_t pci_addr = scn->mem;
3367 static int max_delay;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303368 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003369 static int debug;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003370 if (scn->recovery)
3371 return -EACCES;
3372
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303373 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003374 HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3375 debug = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303376 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003377 return -EACCES;
3378 }
3379
3380 if (debug) {
3381 wait_for_it = true;
3382 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3383 __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303384 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003385 }
3386
3387 if (sleep_ok) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303388 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003389 hif_state->keep_awake_count--;
3390 if (hif_state->keep_awake_count == 0) {
3391 /* Allow sleep */
3392 hif_state->verified_awake = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303393 hif_state->sleep_ticks = qdf_system_ticks();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003394 }
3395 if (hif_state->fake_sleep == false) {
3396 /* Set the Fake Sleep */
3397 hif_state->fake_sleep = true;
3398
3399 /* Start the Sleep Timer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303400 qdf_timer_stop(&hif_state->sleep_timer);
3401 qdf_timer_start(&hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003402 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3403 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303404 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003405 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303406 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003407
3408 if (hif_state->fake_sleep) {
3409 hif_state->verified_awake = true;
3410 } else {
3411 if (hif_state->keep_awake_count == 0) {
3412 /* Force AWAKE */
3413 hif_write32_mb(pci_addr +
3414 PCIE_LOCAL_BASE_ADDRESS +
3415 PCIE_SOC_WAKE_ADDRESS,
3416 PCIE_SOC_WAKE_V_MASK);
3417 }
3418 }
3419 hif_state->keep_awake_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303420 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003421
3422 if (wait_for_it && !hif_state->verified_awake) {
3423#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
3424 int tot_delay = 0;
3425 int curr_delay = 5;
3426
3427 for (;; ) {
3428 if (hif_targ_is_awake(scn, pci_addr)) {
3429 hif_state->verified_awake = true;
3430 break;
3431 } else
3432 if (!hif_pci_targ_is_present
3433 (scn, pci_addr)) {
3434 break;
3435 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003436
Komal Seelambd7c51d2016-02-24 10:27:30 +05303437 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3438 return hif_log_soc_wakeup_timeout(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003439
3440 OS_DELAY(curr_delay);
3441 tot_delay += curr_delay;
3442
3443 if (curr_delay < 50)
3444 curr_delay += 5;
3445 }
3446
3447 /*
3448 * NB: If Target has to come out of Deep Sleep,
3449 * this may take a few Msecs. Typically, though
3450 * this delay should be <30us.
3451 */
3452 if (tot_delay > max_delay)
3453 max_delay = tot_delay;
3454 }
3455 }
3456
3457 if (debug && hif_state->verified_awake) {
3458 debug = 0;
3459 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3460 __func__,
3461 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3462 PCIE_INTR_ENABLE_ADDRESS),
3463 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3464 PCIE_INTR_CAUSE_ADDRESS),
3465 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3466 CPU_INTR_ADDRESS),
3467 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3468 PCIE_INTR_CLR_ADDRESS),
3469 hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
3470 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3471 }
3472
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003473#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003474 return 0;
3475}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003476
3477#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303478uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003479{
3480 uint32_t value;
3481 void *addr;
3482
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003483 addr = scn->mem + offset;
Houston Hoffman56e0d702016-05-05 17:48:06 -07003484 value = hif_read32_mb(addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003485
3486 {
3487 unsigned long irq_flags;
3488 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3489
3490 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3491 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3492 pcie_access_log[idx].is_write = false;
3493 pcie_access_log[idx].addr = addr;
3494 pcie_access_log[idx].value = value;
3495 pcie_access_log_seqnum++;
3496 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3497 }
3498
3499 return value;
3500}
3501
3502void
Komal Seelam644263d2016-02-22 20:45:49 +05303503hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003504{
3505 void *addr;
3506
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003507 addr = scn->mem + (offset);
3508 hif_write32_mb(addr, value);
3509
3510 {
3511 unsigned long irq_flags;
3512 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3513
3514 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3515 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3516 pcie_access_log[idx].is_write = true;
3517 pcie_access_log[idx].addr = addr;
3518 pcie_access_log[idx].value = value;
3519 pcie_access_log_seqnum++;
3520 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3521 }
3522}
3523
3524/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003525 * hif_target_dump_access_log() - dump access log
3526 *
3527 * dump access log
3528 *
3529 * Return: n/a
3530 */
3531void hif_target_dump_access_log(void)
3532{
3533 int idx, len, start_idx, cur_idx;
3534 unsigned long irq_flags;
3535
3536 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3537 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3538 len = PCIE_ACCESS_LOG_NUM;
3539 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3540 } else {
3541 len = pcie_access_log_seqnum;
3542 start_idx = 0;
3543 }
3544
3545 for (idx = 0; idx < len; idx++) {
3546 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3547 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%p val:%u.",
3548 __func__, idx,
3549 pcie_access_log[cur_idx].seqnum,
3550 pcie_access_log[cur_idx].is_write,
3551 pcie_access_log[cur_idx].addr,
3552 pcie_access_log[cur_idx].value);
3553 }
3554
3555 pcie_access_log_seqnum = 0;
3556 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3557}
3558#endif
3559
Houston Hoffman3db96a42016-05-05 19:54:39 -07003560#ifndef HIF_AHB
3561int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3562{
3563 QDF_BUG(0);
3564 return -EINVAL;
3565}
Houston Hoffman471e9a02016-08-23 11:40:19 -07003566
3567int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3568{
3569 QDF_BUG(0);
3570 return -EINVAL;
3571}
Houston Hoffman3db96a42016-05-05 19:54:39 -07003572#endif
3573
Houston Hoffman15010772016-09-16 14:01:13 -07003574irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3575{
3576 struct ce_tasklet_entry *tasklet_entry = context;
3577 return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3578}
3579extern const char *ce_name[];
3580
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003581/* hif_srng_msi_irq_disable() - disable the irq for msi
3582 * @hif_sc: hif context
3583 * @ce_id: which ce to disable copy complete interrupts for
3584 *
3585 * since MSI interrupts are not level based, the system can function
3586 * without disabling these interrupts. Interrupt mitigation can be
3587 * added here for better system performance.
3588 */
3589static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003590{
3591 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
3592
3593 disable_irq_nosync(pci_sc->ce_msi_irq_num[ce_id]);
3594}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003595
3596static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003597{
3598 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
3599
3600 enable_irq(pci_sc->ce_msi_irq_num[ce_id]);
3601}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003602
3603static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3604{}
3605
3606static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3607{}
3608
3609static int hif_ce_msi_configure_irq(struct hif_softc *scn)
Houston Hoffman15010772016-09-16 14:01:13 -07003610{
3611 int ret;
3612 int ce_id, irq;
3613 uint32_t msi_data_start;
3614 uint32_t msi_data_count;
3615 uint32_t msi_irq_start;
3616 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003617 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffman15010772016-09-16 14:01:13 -07003618
3619 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3620 &msi_data_count, &msi_data_start,
3621 &msi_irq_start);
Houston Hoffman15010772016-09-16 14:01:13 -07003622 if (ret)
3623 return ret;
3624
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003625 if (ce_srng_based(scn)) {
3626 scn->bus_ops.hif_irq_disable =
3627 &hif_ce_srng_msi_irq_disable;
3628 scn->bus_ops.hif_irq_enable =
3629 &hif_ce_srng_msi_irq_enable;
3630 } else {
3631 scn->bus_ops.hif_irq_disable =
3632 &hif_ce_legacy_msi_irq_disable;
3633 scn->bus_ops.hif_irq_enable =
3634 &hif_ce_legacy_msi_irq_enable;
3635 }
3636
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003637 /* needs to match the ce_id -> irq data mapping
3638 * used in the srng parameter configuration
3639 */
3640 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3641 unsigned int msi_data = (ce_id % msi_data_count) +
3642 msi_irq_start;
3643 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003644
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003645 HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d tasklet %p)",
3646 __func__, ce_id, msi_data, irq,
3647 &ce_sc->tasklets[ce_id]);
3648
3649 pci_sc->ce_msi_irq_num[ce_id] = irq;
3650 ret = request_irq(irq, hif_ce_interrupt_handler,
3651 IRQF_SHARED,
3652 ce_name[ce_id],
3653 &ce_sc->tasklets[ce_id]);
3654 if (ret)
3655 goto free_irq;
3656 }
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003657
Houston Hoffman15010772016-09-16 14:01:13 -07003658 return ret;
3659
3660free_irq:
3661 /* the request_irq for the last ce_id failed so skip it. */
3662 while (ce_id > 0 && ce_id < scn->ce_count) {
3663 unsigned int msi_data;
3664
3665 ce_id--;
3666 msi_data = (ce_id % msi_data_count) + msi_data_start;
3667 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3668 free_irq(irq, &ce_sc->tasklets[ce_id]);
3669 }
3670 return ret;
3671}
3672
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003673
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003674/**
Houston Hoffman3db96a42016-05-05 19:54:39 -07003675 * hif_configure_irq() - configure interrupt
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003676 *
3677 * This function configures interrupt(s)
3678 *
3679 * @sc: PCIe control struct
3680 * @hif_hdl: struct HIF_CE_state
3681 *
3682 * Return: 0 - for success
3683 */
Komal Seelam644263d2016-02-22 20:45:49 +05303684int hif_configure_irq(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003685{
3686 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05303687 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003688
3689 HIF_TRACE("%s: E", __func__);
3690
Komal Seelamaa72bb72016-02-01 17:22:50 +05303691 hif_init_reschedule_tasklet_work(sc);
3692
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003693 ret = hif_ce_msi_configure_irq(scn);
Houston Hoffman15010772016-09-16 14:01:13 -07003694 if (ret == 0) {
3695 goto end;
3696 }
3697
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003698 if (ENABLE_MSI) {
3699 ret = hif_configure_msi(sc);
3700 if (ret == 0)
3701 goto end;
3702 }
3703 /* MSI failed. Try legacy irq */
Houston Hoffman3db96a42016-05-05 19:54:39 -07003704 switch (scn->target_info.target_type) {
3705 case TARGET_TYPE_IPQ4019:
3706 ret = hif_ahb_configure_legacy_irq(sc);
3707 break;
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303708 case TARGET_TYPE_QCA8074:
3709 ret = hif_ahb_configure_irq(sc);
3710 break;
Houston Hoffman3db96a42016-05-05 19:54:39 -07003711 default:
3712 ret = hif_pci_configure_legacy_irq(sc);
3713 break;
3714 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003715 if (ret < 0) {
3716 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3717 __func__, ret);
3718 return ret;
3719 }
3720end:
3721 scn->request_irq_done = true;
3722 return 0;
3723}
3724
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003725#ifndef QCA_WIFI_NAPIER_EMULATION
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003726/**
3727 * hif_target_sync() : ensure the target is ready
3728 * @scn: hif controll structure
3729 *
3730 * Informs fw that we plan to use legacy interupts so that
3731 * it can begin booting. Ensures that the fw finishes booting
3732 * before continuing. Should be called before trying to write
3733 * to the targets other registers for the first time.
3734 *
3735 * Return: none
3736 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07003737static void hif_target_sync(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003738{
3739 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3740 PCIE_INTR_ENABLE_ADDRESS),
3741 PCIE_INTR_FIRMWARE_MASK);
3742
3743 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3744 PCIE_SOC_WAKE_ADDRESS,
3745 PCIE_SOC_WAKE_V_MASK);
3746 while (!hif_targ_is_awake(scn, scn->mem))
3747 ;
3748
3749 if (HAS_FW_INDICATOR) {
3750 int wait_limit = 500;
3751 int fw_ind = 0;
3752 HIF_TRACE("%s: Loop checking FW signal", __func__);
3753 while (1) {
Komal Seelam02cf2f82016-02-22 20:44:25 +05303754 fw_ind = hif_read32_mb(scn->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003755 FW_INDICATOR_ADDRESS);
3756 if (fw_ind & FW_IND_INITIALIZED)
3757 break;
3758 if (wait_limit-- < 0)
3759 break;
3760 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3761 PCIE_INTR_ENABLE_ADDRESS),
3762 PCIE_INTR_FIRMWARE_MASK);
3763
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303764 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003765 }
3766 if (wait_limit < 0)
3767 HIF_TRACE("%s: FW signal timed out",
3768 __func__);
3769 else
3770 HIF_TRACE("%s: Got FW signal, retries = %x",
3771 __func__, 500-wait_limit);
3772 }
3773 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3774 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3775}
Jeff Johnson6950fdb2016-10-07 13:00:59 -07003776#endif /* QCA_WIFI_QCA8074_VP */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003777
Houston Hoffmand0620a32016-11-09 20:44:56 -08003778#ifdef CONFIG_PLD_PCIE_INIT
3779static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3780{
3781 struct pld_soc_info info;
3782
3783 pld_get_soc_info(dev, &info);
3784 sc->mem = info.v_addr;
3785 sc->ce_sc.ol_sc.mem = info.v_addr;
3786 sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3787}
3788#else
3789static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3790{}
3791#endif
3792
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003793/**
3794 * hif_enable_bus(): enable bus
3795 *
3796 * This function enables the bus
3797 *
3798 * @ol_sc: soft_sc struct
3799 * @dev: device pointer
3800 * @bdev: bus dev pointer
3801 * bid: bus id pointer
3802 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303803 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003804 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07003805QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003806 struct device *dev, void *bdev,
3807 const hif_bus_id *bid,
3808 enum hif_enable_type type)
3809{
3810 int ret = 0;
3811 uint32_t hif_type, target_type;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303812 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
Komal Seelam5584a7c2016-02-24 19:22:48 +05303813 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003814 uint16_t revision_id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003815 int probe_again = 0;
3816 struct pci_dev *pdev = bdev;
Houston Hoffmanf303f912016-03-14 21:11:42 -07003817 const struct pci_device_id *id = (const struct pci_device_id *)bid;
Komal Seelam91553ce2016-01-27 18:57:10 +05303818 struct hif_target_info *tgt_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003819
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003820 if (!ol_sc) {
3821 HIF_ERROR("%s: hif_ctx is NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303822 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003823 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003824
Komal Seelambd7c51d2016-02-24 10:27:30 +05303825 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3826 __func__, hif_get_conparam(ol_sc), id->device);
3827
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003828 sc->pdev = pdev;
3829 sc->dev = &pdev->dev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003830 sc->devid = id->device;
3831 sc->cacheline_sz = dma_get_cache_alignment();
Komal Seelam644263d2016-02-22 20:45:49 +05303832 tgt_info = hif_get_target_info_handle(hif_hdl);
Houston Hoffmand0620a32016-11-09 20:44:56 -08003833 hif_pci_get_soc_info(sc, dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003834again:
3835 ret = hif_enable_pci(sc, pdev, id);
3836 if (ret < 0) {
3837 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3838 __func__, ret);
3839 goto err_enable_pci;
3840 }
3841 HIF_TRACE("%s: hif_enable_pci done", __func__);
3842
3843 /* Temporary FIX: disable ASPM on peregrine.
3844 * Will be removed after the OTP is programmed
3845 */
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07003846 hif_disable_power_gating(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003847
3848 device_disable_async_suspend(&pdev->dev);
3849 pci_read_config_word(pdev, 0x08, &revision_id);
3850
3851 ret = hif_get_device_type(id->device, revision_id,
3852 &hif_type, &target_type);
3853 if (ret < 0) {
3854 HIF_ERROR("%s: invalid device id/revision_id", __func__);
3855 goto err_tgtstate;
3856 }
3857 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3858 __func__, hif_type, target_type);
3859
Komal Seelam02cf2f82016-02-22 20:44:25 +05303860 hif_register_tbl_attach(ol_sc, hif_type);
Govind Singh051a8c42016-05-10 12:23:41 +05303861 hif_target_register_tbl_attach(ol_sc, target_type);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003862
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003863 if ((id->device == RUMIM2M_DEVICE_ID_NODE0) ||
3864 (id->device == RUMIM2M_DEVICE_ID_NODE1))
3865 HIF_TRACE("%s:Skip tgt_wake up for PCI based 8074\n", __func__);
3866 else {
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003867#ifndef QCA_WIFI_NAPIER_EMULATION
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003868 ret = hif_pci_probe_tgt_wakeup(sc);
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003869#endif
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003870 if (ret < 0) {
3871 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3872 __func__, ret);
3873 if (ret == -EAGAIN)
3874 probe_again++;
3875 goto err_tgtstate;
3876 }
3877 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003878 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003879
Komal Seelam91553ce2016-01-27 18:57:10 +05303880 tgt_info->target_type = target_type;
3881
Houston Hoffmand0620a32016-11-09 20:44:56 -08003882 if (!ol_sc->mem_pa) {
3883 HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003884 ret = -EIO;
3885 goto err_tgtstate;
3886 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003887
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003888 if ((id->device != RUMIM2M_DEVICE_ID_NODE0) &&
3889 (id->device != RUMIM2M_DEVICE_ID_NODE1)) {
3890#ifndef QCA_WIFI_NAPIER_EMULATION
3891 hif_target_sync(ol_sc);
Houston Hoffman2d8ee282016-10-17 19:56:07 -07003892#endif
Houston Hoffmanc1064a82016-07-25 13:22:25 -07003893
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003894 if (ADRASTEA_BU)
3895 hif_vote_link_up(hif_hdl);
3896 }
Houston Hoffmanc1064a82016-07-25 13:22:25 -07003897
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003898 return 0;
3899
3900err_tgtstate:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003901 hif_disable_pci(sc);
3902 sc->pci_enabled = false;
3903 HIF_ERROR("%s: error, hif_disable_pci done", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303904 return QDF_STATUS_E_ABORTED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003905
3906err_enable_pci:
3907 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3908 int delay_time;
3909
3910 HIF_INFO("%s: pci reprobe", __func__);
3911 /* 10, 40, 90, 100, 100, ... */
3912 delay_time = max(100, 10 * (probe_again * probe_again));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303913 qdf_mdelay(delay_time);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003914 goto again;
3915 }
3916 return ret;
3917}
3918
3919/**
Houston Hoffman8f239f62016-03-14 21:12:05 -07003920 * hif_pci_irq_enable() - ce_irq_enable
3921 * @scn: hif_softc
3922 * @ce_id: ce_id
3923 *
3924 * Return: void
3925 */
3926void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3927{
3928 uint32_t tmp = 1 << ce_id;
3929 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3930
3931 qdf_spin_lock_irqsave(&sc->irq_lock);
3932 scn->ce_irq_summary &= ~tmp;
3933 if (scn->ce_irq_summary == 0) {
3934 /* Enable Legacy PCI line interrupts */
3935 if (LEGACY_INTERRUPTS(sc) &&
Komal Seelam6ee55902016-04-11 17:11:07 +05303936 (scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman8f239f62016-03-14 21:12:05 -07003937 (!qdf_atomic_read(&scn->link_suspended))) {
3938
3939 hif_write32_mb(scn->mem +
3940 (SOC_CORE_BASE_ADDRESS |
3941 PCIE_INTR_ENABLE_ADDRESS),
3942 HOST_GROUP0_MASK);
3943
3944 hif_read32_mb(scn->mem +
3945 (SOC_CORE_BASE_ADDRESS |
3946 PCIE_INTR_ENABLE_ADDRESS));
3947 }
3948 }
3949 if (scn->hif_init_done == true)
3950 Q_TARGET_ACCESS_END(scn);
3951 qdf_spin_unlock_irqrestore(&sc->irq_lock);
3952
3953 /* check for missed firmware crash */
3954 hif_fw_interrupt_handler(0, scn);
3955}
3956/**
3957 * hif_pci_irq_disable() - ce_irq_disable
3958 * @scn: hif_softc
3959 * @ce_id: ce_id
3960 *
3961 * Return: void
3962 */
3963void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3964{
3965 /* For Rome only need to wake up target */
3966 /* target access is maintained untill interrupts are re-enabled */
3967 Q_TARGET_ACCESS_BEGIN(scn);
3968}
3969
Houston Hoffman9078a152015-11-02 16:15:02 -08003970#ifdef FEATURE_RUNTIME_PM
Houston Hoffmanf4607852015-12-17 17:14:40 -08003971
Komal Seelam5584a7c2016-02-24 19:22:48 +05303972void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08003973{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303974 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003975
Houston Hoffmanf4607852015-12-17 17:14:40 -08003976 if (NULL == sc)
3977 return;
3978
3979 sc->pm_stats.runtime_get++;
3980 pm_runtime_get_noresume(sc->dev);
3981}
3982
Houston Hoffman9078a152015-11-02 16:15:02 -08003983/**
3984 * hif_pm_runtime_get() - do a get opperation on the device
3985 *
3986 * A get opperation will prevent a runtime suspend untill a
3987 * corresponding put is done. This api should be used when sending
3988 * data.
3989 *
3990 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3991 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3992 *
3993 * return: success if the bus is up and a get has been issued
3994 * otherwise an error code.
3995 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303996int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003997{
Komal Seelam644263d2016-02-22 20:45:49 +05303998 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303999 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08004000 int ret;
4001 int pm_state;
4002
4003 if (NULL == scn) {
4004 HIF_ERROR("%s: Could not do runtime get, scn is null",
4005 __func__);
4006 return -EFAULT;
4007 }
Houston Hoffman9078a152015-11-02 16:15:02 -08004008
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304009 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08004010
4011 if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
4012 pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4013 sc->pm_stats.runtime_get++;
4014 ret = __hif_pm_runtime_get(sc->dev);
4015
4016 /* Get can return 1 if the device is already active, just return
4017 * success in that case
4018 */
4019 if (ret > 0)
4020 ret = 0;
4021
4022 if (ret)
4023 hif_pm_runtime_put(hif_ctx);
4024
4025 if (ret && ret != -EINPROGRESS) {
4026 sc->pm_stats.runtime_get_err++;
4027 HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304028 __func__, qdf_atomic_read(&sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004029 }
4030
4031 return ret;
4032 }
4033
4034 sc->pm_stats.request_resume++;
4035 sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4036 ret = hif_pm_request_resume(sc->dev);
4037
4038 return -EAGAIN;
4039}
4040
4041/**
4042 * hif_pm_runtime_put() - do a put opperation on the device
4043 *
4044 * A put opperation will allow a runtime suspend after a corresponding
4045 * get was done. This api should be used when sending data.
4046 *
4047 * This api will return a failure if runtime pm is stopped
4048 * This api will return failure if it would decrement the usage count below 0.
4049 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304050 * return: QDF_STATUS_SUCCESS if the put is performed
Houston Hoffman9078a152015-11-02 16:15:02 -08004051 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05304052int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08004053{
Komal Seelam644263d2016-02-22 20:45:49 +05304054 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05304055 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08004056 int pm_state, usage_count;
4057 unsigned long flags;
4058 char *error = NULL;
4059
4060 if (NULL == scn) {
4061 HIF_ERROR("%s: Could not do runtime put, scn is null",
4062 __func__);
4063 return -EFAULT;
4064 }
Houston Hoffman9078a152015-11-02 16:15:02 -08004065 usage_count = atomic_read(&sc->dev->power.usage_count);
4066
4067 if (usage_count == 1) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304068 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08004069
4070 if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4071 error = "Ignoring unexpected put when runtime pm is disabled";
4072
4073 } else if (usage_count == 0) {
4074 error = "PUT Without a Get Operation";
4075 }
4076
4077 if (error) {
4078 spin_lock_irqsave(&sc->runtime_lock, flags);
4079 hif_pci_runtime_pm_warn(sc, error);
4080 spin_unlock_irqrestore(&sc->runtime_lock, flags);
4081 return -EINVAL;
4082 }
4083
4084 sc->pm_stats.runtime_put++;
4085
4086 hif_pm_runtime_mark_last_busy(sc->dev);
4087 hif_pm_runtime_put_auto(sc->dev);
4088
4089 return 0;
4090}
4091
4092
4093/**
4094 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol reason
4095 * @hif_sc: pci context
4096 * @lock: runtime_pm lock being acquired
4097 *
4098 * Return 0 if successful.
4099 */
4100static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4101 *hif_sc, struct hif_pm_runtime_lock *lock)
4102{
4103 int ret = 0;
4104
4105 /*
4106 * We shouldn't be setting context->timeout to zero here when
4107 * context is active as we will have a case where Timeout API's
4108 * for the same context called back to back.
4109 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4110 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4111 * API to ensure the timeout version is no more active and
4112 * list entry of this context will be deleted during allow suspend.
4113 */
4114 if (lock->active)
4115 return 0;
4116
4117 ret = __hif_pm_runtime_get(hif_sc->dev);
4118
4119 /**
4120 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4121 * RPM_SUSPENDING. Any other negative value is an error.
4122 * We shouldn't be do runtime_put here as in later point allow
4123 * suspend gets called with the the context and there the usage count
4124 * is decremented, so suspend will be prevented.
4125 */
4126
4127 if (ret < 0 && ret != -EINPROGRESS) {
4128 hif_sc->pm_stats.runtime_get_err++;
4129 hif_pci_runtime_pm_warn(hif_sc,
4130 "Prevent Suspend Runtime PM Error");
4131 }
4132
4133 hif_sc->prevent_suspend_cnt++;
4134
4135 lock->active = true;
4136
4137 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4138
4139 hif_sc->pm_stats.prevent_suspend++;
4140
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +05304141 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4142 hif_pm_runtime_state_to_string(
4143 qdf_atomic_read(&hif_sc->pm_state)),
4144 ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004145
4146 return ret;
4147}
4148
4149static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4150 struct hif_pm_runtime_lock *lock)
4151{
4152 int ret = 0;
4153 int usage_count;
4154
4155 if (hif_sc->prevent_suspend_cnt == 0)
4156 return ret;
4157
4158 if (!lock->active)
4159 return ret;
4160
4161 usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4162
4163 /*
4164 * During Driver unload, platform driver increments the usage
4165 * count to prevent any runtime suspend getting called.
4166 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4167 * usage_count should be one. Ideally this shouldn't happen as
4168 * context->active should be active for allow suspend to happen
4169 * Handling this case here to prevent any failures.
4170 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304171 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
Houston Hoffman9078a152015-11-02 16:15:02 -08004172 && usage_count == 1) || usage_count == 0) {
4173 hif_pci_runtime_pm_warn(hif_sc,
4174 "Allow without a prevent suspend");
4175 return -EINVAL;
4176 }
4177
4178 list_del(&lock->list);
4179
4180 hif_sc->prevent_suspend_cnt--;
4181
4182 lock->active = false;
4183 lock->timeout = 0;
4184
4185 hif_pm_runtime_mark_last_busy(hif_sc->dev);
4186 ret = hif_pm_runtime_put_auto(hif_sc->dev);
4187
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +05304188 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4189 hif_pm_runtime_state_to_string(
4190 qdf_atomic_read(&hif_sc->pm_state)),
4191 ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004192
4193 hif_sc->pm_stats.allow_suspend++;
4194 return ret;
4195}
4196
4197/**
4198 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4199 * @data: calback data that is the pci context
4200 *
4201 * if runtime locks are aquired with a timeout, this function releases
4202 * the locks when the last runtime lock expires.
4203 *
4204 * dummy implementation until lock acquisition is implemented.
4205 */
4206static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4207{
4208 struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4209 unsigned long flags;
4210 unsigned long timer_expires;
4211 struct hif_pm_runtime_lock *context, *temp;
4212
4213 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
4214
4215 timer_expires = hif_sc->runtime_timer_expires;
4216
4217 /* Make sure we are not called too early, this should take care of
4218 * following case
4219 *
4220 * CPU0 CPU1 (timeout function)
4221 * ---- ----------------------
4222 * spin_lock_irq
4223 * timeout function called
4224 *
4225 * mod_timer()
4226 *
4227 * spin_unlock_irq
4228 * spin_lock_irq
4229 */
4230 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4231 hif_sc->runtime_timer_expires = 0;
4232 list_for_each_entry_safe(context, temp,
4233 &hif_sc->prevent_suspend_list, list) {
4234 if (context->timeout) {
4235 __hif_pm_runtime_allow_suspend(hif_sc, context);
4236 hif_sc->pm_stats.allow_suspend_timeout++;
4237 }
4238 }
4239 }
4240
4241 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
4242}
4243
Komal Seelam5584a7c2016-02-24 19:22:48 +05304244int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08004245 struct hif_pm_runtime_lock *data)
4246{
Komal Seelam644263d2016-02-22 20:45:49 +05304247 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4248 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08004249 struct hif_pm_runtime_lock *context = data;
4250 unsigned long flags;
4251
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004252 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08004253 return 0;
4254
4255 if (!context)
4256 return -EINVAL;
4257
4258 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
4259 context->timeout = 0;
4260 __hif_pm_runtime_prevent_suspend(hif_sc, context);
4261 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
4262
4263 return 0;
4264}
4265
Komal Seelam5584a7c2016-02-24 19:22:48 +05304266int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
Komal Seelam644263d2016-02-22 20:45:49 +05304267 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08004268{
Komal Seelam644263d2016-02-22 20:45:49 +05304269 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4270 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08004271 struct hif_pm_runtime_lock *context = data;
4272
4273 unsigned long flags;
4274
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004275 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08004276 return 0;
4277
4278 if (!context)
4279 return -EINVAL;
4280
4281 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
4282
4283 __hif_pm_runtime_allow_suspend(hif_sc, context);
4284
4285 /* The list can be empty as well in cases where
4286 * we have one context in the list and the allow
4287 * suspend came before the timer expires and we delete
4288 * context above from the list.
4289 * When list is empty prevent_suspend count will be zero.
4290 */
4291 if (hif_sc->prevent_suspend_cnt == 0 &&
4292 hif_sc->runtime_timer_expires > 0) {
4293 del_timer(&hif_sc->runtime_timer);
4294 hif_sc->runtime_timer_expires = 0;
4295 }
4296
4297 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
4298
4299 return 0;
4300}
4301
4302/**
4303 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4304 * @ol_sc: HIF context
4305 * @lock: which lock is being acquired
4306 * @delay: Timeout in milliseconds
4307 *
4308 * Prevent runtime suspend with a timeout after which runtime suspend would be
4309 * allowed. This API uses a single timer to allow the suspend and timer is
4310 * modified if the timeout is changed before timer fires.
4311 * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4312 * of starting the timer.
4313 *
4314 * It is wise to try not to use this API and correct the design if possible.
4315 *
4316 * Return: 0 on success and negative error code on failure
4317 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05304318int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08004319 struct hif_pm_runtime_lock *lock, unsigned int delay)
4320{
Komal Seelam644263d2016-02-22 20:45:49 +05304321 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4322 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4323
Houston Hoffman9078a152015-11-02 16:15:02 -08004324 int ret = 0;
4325 unsigned long expires;
4326 unsigned long flags;
4327 struct hif_pm_runtime_lock *context = lock;
4328
Komal Seelambd7c51d2016-02-24 10:27:30 +05304329 if (hif_is_load_or_unload_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08004330 HIF_ERROR("%s: Load/unload in progress, ignore!",
4331 __func__);
4332 return -EINVAL;
4333 }
4334
Komal Seelambd7c51d2016-02-24 10:27:30 +05304335 if (hif_is_recovery_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08004336 HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4337 return -EINVAL;
4338 }
4339
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004340 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08004341 return 0;
4342
4343 if (!context)
4344 return -EINVAL;
4345
4346 /*
4347 * Don't use internal timer if the timeout is less than auto suspend
4348 * delay.
4349 */
4350 if (delay <= hif_sc->dev->power.autosuspend_delay) {
4351 hif_pm_request_resume(hif_sc->dev);
4352 hif_pm_runtime_mark_last_busy(hif_sc->dev);
4353 return ret;
4354 }
4355
4356 expires = jiffies + msecs_to_jiffies(delay);
4357 expires += !expires;
4358
4359 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
4360
4361 context->timeout = delay;
4362 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4363 hif_sc->pm_stats.prevent_suspend_timeout++;
4364
4365 /* Modify the timer only if new timeout is after already configured
4366 * timeout
4367 */
4368 if (time_after(expires, hif_sc->runtime_timer_expires)) {
4369 mod_timer(&hif_sc->runtime_timer, expires);
4370 hif_sc->runtime_timer_expires = expires;
4371 }
4372
4373 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
4374
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +05304375 HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4376 hif_pm_runtime_state_to_string(
4377 qdf_atomic_read(&hif_sc->pm_state)),
4378 delay, ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004379
4380 return ret;
4381}
4382
4383/**
4384 * hif_runtime_lock_init() - API to initialize Runtime PM context
4385 * @name: Context name
4386 *
4387 * This API initalizes the Runtime PM context of the caller and
4388 * return the pointer.
4389 *
4390 * Return: void *
4391 */
4392struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name)
4393{
4394 struct hif_pm_runtime_lock *context;
4395
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304396 context = qdf_mem_malloc(sizeof(*context));
Houston Hoffman9078a152015-11-02 16:15:02 -08004397 if (!context) {
4398 HIF_ERROR("%s: No memory for Runtime PM wakelock context\n",
4399 __func__);
4400 return NULL;
4401 }
4402
4403 context->name = name ? name : "Default";
4404 return context;
4405}
4406
4407/**
4408 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4409 * @data: Runtime PM context
4410 *
4411 * Return: void
4412 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05304413void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
Komal Seelam644263d2016-02-22 20:45:49 +05304414 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08004415{
4416 unsigned long flags;
4417 struct hif_pm_runtime_lock *context = data;
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004418 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08004419
4420 if (!sc)
4421 return;
4422
4423 if (!context)
4424 return;
4425
4426 /*
4427 * Ensure to delete the context list entry and reduce the usage count
4428 * before freeing the context if context is active.
4429 */
4430 spin_lock_irqsave(&sc->runtime_lock, flags);
4431 __hif_pm_runtime_allow_suspend(sc, context);
4432 spin_unlock_irqrestore(&sc->runtime_lock, flags);
4433
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304434 qdf_mem_free(context);
Houston Hoffman9078a152015-11-02 16:15:02 -08004435}
Houston Hoffman9078a152015-11-02 16:15:02 -08004436#endif /* FEATURE_RUNTIME_PM */