blob: 9d9742a73052055fad7824fca22fe45f0fed061e [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05302 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/if_arp.h>
23#ifdef CONFIG_PCI_MSM
24#include <linux/msm_pcie.h>
25#endif
26#include "hif_io32.h"
27#include "if_pci.h"
28#include "hif.h"
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053029#include "target_type.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080030#include "hif_main.h"
Houston Hoffman63777f22016-03-14 21:11:49 -070031#include "ce_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
33#include "ce_internal.h"
34#include "ce_reg.h"
Houston Hoffman108da402016-03-14 21:11:24 -070035#include "ce_bmi.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "regtable.h"
Houston Hoffmanec93ab02016-05-03 20:09:55 -070037#include "hif_hw_version.h"
Houston Hoffman62aa58d2015-11-02 21:14:55 -080038#include <linux/debugfs.h>
39#include <linux/seq_file.h>
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_status.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053041#include "qdf_atomic.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070042#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080043#include "mp_dev.h"
44#include "hif_debug.h"
45
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080046#include "if_pci_internal.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080047#include "ce_tasklet.h"
Houston Hoffmanf303f912016-03-14 21:11:42 -070048#include "targaddrs.h"
Houston Hoffman648a9182017-05-21 23:27:50 -070049#include "hif_exec.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
Houston Hoffman32bc8eb2016-03-14 21:11:34 -070051#include "pci_api.h"
Aravind Narasimhana1c7d6d2016-06-01 10:21:32 +053052#include "ahb_api.h"
Houston Hoffman32bc8eb2016-03-14 21:11:34 -070053
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080054/* Maximum ms timeout for host to wake up target */
55#define PCIE_WAKE_TIMEOUT 1000
56#define RAMDUMP_EVENT_TIMEOUT 2500
57
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080058/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59 * PCIe data bus error
60 * As workaround for this issue - changing the reset sequence to
61 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62 */
63#define CPU_WARM_RESET_WAR
Houston Hoffmanfb698ef2016-05-05 19:50:44 -070064
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065/*
66 * Top-level interrupt handler for all PCI interrupts from a Target.
67 * When a block of MSI interrupts is allocated, this top-level handler
68 * is not used; instead, we directly call the correct sub-handler.
69 */
70struct ce_irq_reg_table {
71 uint32_t irq_enable;
72 uint32_t irq_status;
73};
74
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080075#ifndef QCA_WIFI_3_0_ADRASTEA
76static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
77{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080078}
79#else
Nirav Shahfdfc25d2018-06-25 12:10:55 +053080static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080081{
Komal Seelam644263d2016-02-22 20:45:49 +053082 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080083 unsigned int target_enable0, target_enable1;
84 unsigned int target_cause0, target_cause1;
85
Nirav Shahf1e3fb52018-06-12 14:39:34 +053086 target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
87 target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
88 target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
89 target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
91 if ((target_enable0 & target_cause0) ||
92 (target_enable1 & target_cause1)) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +053093 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
94 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080095
96 if (scn->notice_send)
Yuanyuan Liufd594c22016-04-25 13:59:19 -070097 pld_intr_notify_q6(sc->dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080098 }
99}
100#endif
101
Houston Hoffmanf7bc3082016-10-17 19:52:55 -0700102
Houston Hoffman247f09b2016-04-06 21:21:40 -0700103/**
104 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
105 * @scn: scn
106 *
107 * Return: N/A
108 */
109static void pci_dispatch_interrupt(struct hif_softc *scn)
110{
111 uint32_t intr_summary;
112 int id;
113 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
114
115 if (scn->hif_init_done != true)
116 return;
117
118 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
119 return;
120
121 intr_summary = CE_INTERRUPT_SUMMARY(scn);
122
123 if (intr_summary == 0) {
Komal Seelam6ee55902016-04-11 17:11:07 +0530124 if ((scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman247f09b2016-04-06 21:21:40 -0700125 (!qdf_atomic_read(&scn->link_suspended))) {
126
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530127 hif_write32_mb(scn, scn->mem +
Houston Hoffman247f09b2016-04-06 21:21:40 -0700128 (SOC_CORE_BASE_ADDRESS |
129 PCIE_INTR_ENABLE_ADDRESS),
130 HOST_GROUP0_MASK);
131
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530132 hif_read32_mb(scn, scn->mem +
Houston Hoffman247f09b2016-04-06 21:21:40 -0700133 (SOC_CORE_BASE_ADDRESS |
134 PCIE_INTR_ENABLE_ADDRESS));
135 }
136 Q_TARGET_ACCESS_END(scn);
137 return;
Houston Hoffman247f09b2016-04-06 21:21:40 -0700138 }
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700139 Q_TARGET_ACCESS_END(scn);
Houston Hoffman247f09b2016-04-06 21:21:40 -0700140
141 scn->ce_irq_summary = intr_summary;
142 for (id = 0; intr_summary && (id < scn->ce_count); id++) {
143 if (intr_summary & (1 << id)) {
144 intr_summary &= ~(1 << id);
145 ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
146 }
147 }
148}
149
Yun Park3fb36442017-08-17 17:37:53 -0700150irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800151{
152 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530153 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530154 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700155
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800156 volatile int tmp;
jiad8245c032018-01-03 12:35:39 +0800157 uint16_t val = 0;
158 uint32_t bar0 = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800159 uint32_t fw_indicator_address, fw_indicator;
160 bool ssr_irq = false;
161 unsigned int host_cause, host_enable;
162
163 if (LEGACY_INTERRUPTS(sc)) {
164 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
165 return IRQ_HANDLED;
166
167 if (ADRASTEA_BU) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530168 host_enable = hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800169 PCIE_INTR_ENABLE_ADDRESS);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530170 host_cause = hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800171 PCIE_INTR_CAUSE_ADDRESS);
172 if (!(host_enable & host_cause)) {
173 hif_pci_route_adrastea_interrupt(sc);
174 return IRQ_HANDLED;
175 }
176 }
177
178 /* Clear Legacy PCI line interrupts
179 * IMPORTANT: INTR_CLR regiser has to be set
180 * after INTR_ENABLE is set to 0,
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700181 * otherwise interrupt can not be really cleared
182 */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530183 hif_write32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800184 (SOC_CORE_BASE_ADDRESS |
185 PCIE_INTR_ENABLE_ADDRESS), 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800186
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530187 hif_write32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800188 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
189 ADRASTEA_BU ?
190 (host_enable & host_cause) :
191 HOST_GROUP0_MASK);
192
193 if (ADRASTEA_BU)
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530194 hif_write32_mb(sc, sc->mem + 0x2f100c,
195 (host_cause >> 1));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800196
197 /* IMPORTANT: this extra read transaction is required to
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700198 * flush the posted write buffer
199 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800200 if (!ADRASTEA_BU) {
201 tmp =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530202 hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800203 (SOC_CORE_BASE_ADDRESS |
204 PCIE_INTR_ENABLE_ADDRESS));
205
206 if (tmp == 0xdeadbeef) {
207 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
208 __func__);
209
210 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
211 HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
212 __func__, val);
213
214 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
215 HIF_ERROR("%s: PCI Device ID = 0x%04x",
216 __func__, val);
217
218 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
219 HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
220 val);
221
222 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
223 HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
224 val);
225
226 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
227 &bar0);
228 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
229 bar0);
230
231 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
232 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530233 hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800234 PCIE_LOCAL_BASE_ADDRESS
235 + RTC_STATE_ADDRESS));
236 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
237 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530238 hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800239 PCIE_LOCAL_BASE_ADDRESS
240 + PCIE_SOC_WAKE_ADDRESS));
241 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
242 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530243 hif_read32_mb(sc, sc->mem + 0x80008),
244 hif_read32_mb(sc, sc->mem + 0x8000c));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800245 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
246 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530247 hif_read32_mb(sc, sc->mem + 0x80010),
248 hif_read32_mb(sc, sc->mem + 0x80014));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800249 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
250 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530251 hif_read32_mb(sc, sc->mem + 0x80018),
252 hif_read32_mb(sc, sc->mem + 0x8001c));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530253 QDF_BUG(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800254 }
255
256 PCI_CLR_CAUSE0_REGISTER(sc);
257 }
258
259 if (HAS_FW_INDICATOR) {
260 fw_indicator_address = hif_state->fw_indicator_address;
261 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
262 if ((fw_indicator != ~0) &&
263 (fw_indicator & FW_IND_EVENT_PENDING))
264 ssr_irq = true;
265 }
266
267 if (Q_TARGET_ACCESS_END(scn) < 0)
268 return IRQ_HANDLED;
269 }
270 /* TBDXXX: Add support for WMAC */
271
272 if (ssr_irq) {
273 sc->irq_event = irq;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530274 qdf_atomic_set(&scn->tasklet_from_intr, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800275
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530276 qdf_atomic_inc(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800277 tasklet_schedule(&sc->intr_tq);
278 } else {
Houston Hoffman247f09b2016-04-06 21:21:40 -0700279 pci_dispatch_interrupt(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800280 }
281
282 return IRQ_HANDLED;
283}
284
Komal Seelam644263d2016-02-22 20:45:49 +0530285bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800286{
287 return 1; /* FIX THIS */
288}
289
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +0530290int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
291{
292 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
293 int i = 0;
294
295 if (!irq || !size) {
296 return -EINVAL;
297 }
298
299 if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
300 irq[0] = sc->irq;
301 return 1;
302 }
303
304 if (sc->num_msi_intrs > size) {
Aditya Sathish648ce112018-07-02 16:41:39 +0530305 qdf_print("Not enough space in irq buffer to return irqs");
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +0530306 return -EINVAL;
307 }
308
309 for (i = 0; i < sc->num_msi_intrs; i++) {
310 irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
311 }
312
313 return sc->num_msi_intrs;
314}
315
316
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800317/**
318 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
Komal Seelam644263d2016-02-22 20:45:49 +0530319 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800320 *
321 * Return: void
322 */
323#if CONFIG_ATH_PCIE_MAX_PERF == 0
Komal Seelam644263d2016-02-22 20:45:49 +0530324void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800325{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530326 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800327 A_target_id_t pci_addr = scn->mem;
328
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530329 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800330 /*
331 * If the deferred sleep timer is running cancel it
332 * and put the soc into sleep.
333 */
334 if (hif_state->fake_sleep == true) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530335 qdf_timer_stop(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 if (hif_state->verified_awake == false) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530337 hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800338 PCIE_SOC_WAKE_ADDRESS,
339 PCIE_SOC_WAKE_RESET);
340 }
341 hif_state->fake_sleep = false;
342 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530343 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800344}
345#else
Komal Seelam644263d2016-02-22 20:45:49 +0530346inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800347{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800348}
349#endif
350
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530351#define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
352 hif_read32_mb(sc, (char *)(mem) + \
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800353 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
354
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530355#define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
356 hif_write32_mb(sc, ((char *)(mem) + \
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800357 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
358
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700359#ifdef QCA_WIFI_3_0
360/**
361 * hif_targ_is_awake() - check to see if the target is awake
362 * @hif_ctx: hif context
363 *
364 * emulation never goes to sleep
365 *
366 * Return: true if target is awake
367 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700368static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700369{
370 return true;
371}
372#else
373/**
374 * hif_targ_is_awake() - check to see if the target is awake
375 * @hif_ctx: hif context
376 *
377 * Return: true if the targets clocks are on
378 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700379static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700380{
381 uint32_t val;
382
383 if (scn->recovery)
384 return false;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530385 val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700386 + RTC_STATE_ADDRESS);
Houston Hoffmanf241eb02016-05-10 17:07:36 -0700387 return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
Houston Hoffman00d42ae2016-03-14 21:11:47 -0700388}
389#endif
390
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800391#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
392static void hif_pci_device_reset(struct hif_pci_softc *sc)
393{
394 void __iomem *mem = sc->mem;
395 int i;
396 uint32_t val;
Komal Seelam644263d2016-02-22 20:45:49 +0530397 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800398
399 if (!scn->hostdef)
400 return;
401
402 /* NB: Don't check resetok here. This form of reset
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700403 * is integral to correct operation.
404 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800405
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700406 if (!SOC_GLOBAL_RESET_ADDRESS)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800407 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800408
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700409 if (!mem)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800410 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800411
412 HIF_ERROR("%s: Reset Device", __func__);
413
414 /*
415 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
416 * writing WAKE_V, the Target may scribble over Host memory!
417 */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530418 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800419 PCIE_SOC_WAKE_V_MASK);
420 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
421 if (hif_targ_is_awake(scn, mem))
422 break;
423
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530424 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800425 }
426
427 /* Put Target, including PCIe, into RESET. */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530428 val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800429 val |= 1;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530430 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800431 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530432 if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800433 RTC_STATE_COLD_RESET_MASK)
434 break;
435
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530436 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800437 }
438
439 /* Pull Target, including PCIe, out of RESET. */
440 val &= ~1;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530441 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800442 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
443 if (!
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530444 (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800445 RTC_STATE_COLD_RESET_MASK))
446 break;
447
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530448 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800449 }
450
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530451 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
452 PCIE_SOC_WAKE_RESET);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800453}
454
455/* CPU warm reset function
456 * Steps:
457 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
Jeff Johnson1002ca52018-05-12 11:29:24 -0700458 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800459 * correctly on WARM reset
460 * 3. Clear TARGET CPU LF timer interrupt
461 * 4. Reset all CEs to clear any pending CE tarnsactions
462 * 5. Warm reset CPU
463 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700464static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800465{
466 void __iomem *mem = sc->mem;
467 int i;
468 uint32_t val;
469 uint32_t fw_indicator;
Komal Seelam644263d2016-02-22 20:45:49 +0530470 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800471
472 /* NB: Don't check resetok here. This form of reset is
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700473 * integral to correct operation.
474 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800475
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700476 if (!mem)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800477 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800478
479 HIF_INFO_MED("%s: Target Warm Reset", __func__);
480
481 /*
482 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
483 * writing WAKE_V, the Target may scribble over Host memory!
484 */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530485 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800486 PCIE_SOC_WAKE_V_MASK);
487 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
488 if (hif_targ_is_awake(scn, mem))
489 break;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530490 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800491 }
492
493 /*
494 * Disable Pending interrupts
495 */
496 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530497 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800498 (SOC_CORE_BASE_ADDRESS |
499 PCIE_INTR_CAUSE_ADDRESS));
500 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
501 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
502 /* Target CPU Intr Cause */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530503 val = hif_read32_mb(sc, mem +
504 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800505 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
506
507 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530508 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800509 (SOC_CORE_BASE_ADDRESS |
510 PCIE_INTR_ENABLE_ADDRESS));
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530511 hif_write32_mb(sc, (mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800512 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530513 hif_write32_mb(sc, (mem +
514 (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
515 HOST_GROUP0_MASK);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800516
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530517 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800518
519 /* Clear FW_INDICATOR_ADDRESS */
520 if (HAS_FW_INDICATOR) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530521 fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
522 hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800523 }
524
525 /* Clear Target LF Timer interrupts */
526 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530527 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800528 (RTC_SOC_BASE_ADDRESS +
529 SOC_LF_TIMER_CONTROL0_ADDRESS));
530 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
531 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
532 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530533 hif_write32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800534 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
535 val);
536
537 /* Reset CE */
538 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530539 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800540 (RTC_SOC_BASE_ADDRESS |
541 SOC_RESET_CONTROL_ADDRESS));
542 val |= SOC_RESET_CONTROL_CE_RST_MASK;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530543 hif_write32_mb(sc, (mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800544 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
545 val);
546 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530547 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800548 (RTC_SOC_BASE_ADDRESS |
549 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530550 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800551
552 /* CE unreset */
553 val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530554 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
555 SOC_RESET_CONTROL_ADDRESS), val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800556 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530557 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800558 (RTC_SOC_BASE_ADDRESS |
559 SOC_RESET_CONTROL_ADDRESS));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530560 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800561
562 /* Read Target CPU Intr Cause */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530563 val = hif_read32_mb(sc, mem +
564 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800565 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
566 __func__, val);
567
568 /* CPU warm RESET */
569 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530570 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800571 (RTC_SOC_BASE_ADDRESS |
572 SOC_RESET_CONTROL_ADDRESS));
573 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530574 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
575 SOC_RESET_CONTROL_ADDRESS), val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800576 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530577 hif_read32_mb(sc, mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800578 (RTC_SOC_BASE_ADDRESS |
579 SOC_RESET_CONTROL_ADDRESS));
580 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
581 __func__, val);
582
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530583 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800584 HIF_INFO_MED("%s: Target Warm reset complete", __func__);
585
586}
587
588#ifndef QCA_WIFI_3_0
Yun Park3fb36442017-08-17 17:37:53 -0700589/* only applicable to legacy ce */
Komal Seelam5584a7c2016-02-24 19:22:48 +0530590int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800591{
Komal Seelam644263d2016-02-22 20:45:49 +0530592 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530593 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800594 void __iomem *mem = sc->mem;
595 uint32_t val;
596
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700597 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
598 return ATH_ISR_NOSCHED;
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530599 val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700600 if (Q_TARGET_ACCESS_END(scn) < 0)
601 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800602
603 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
604
605 if (val & FW_IND_HELPER)
606 return 0;
607
608 return 1;
609}
610#endif
611
Komal Seelam5584a7c2016-02-24 19:22:48 +0530612int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800613{
Komal Seelam644263d2016-02-22 20:45:49 +0530614 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
jiad8245c032018-01-03 12:35:39 +0800615 uint16_t device_id = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800616 uint32_t val;
617 uint16_t timeout_count = 0;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530618 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800619
620 /* Check device ID from PCIe configuration space for link status */
621 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
622 if (device_id != sc->devid) {
623 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
624 __func__, device_id, sc->devid);
625 return -EACCES;
626 }
627
628 /* Check PCIe local register for bar/memory access */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530629 val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800630 RTC_STATE_ADDRESS);
631 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
632
633 /* Try to wake up taget if it sleeps */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530634 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800635 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
636 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530637 hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800638 PCIE_SOC_WAKE_ADDRESS));
639
640 /* Check if taget can be woken up */
641 while (!hif_targ_is_awake(scn, sc->mem)) {
642 if (timeout_count >= PCIE_WAKE_TIMEOUT) {
643 HIF_ERROR("%s: wake up timeout, %08x, %08x",
644 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530645 hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800646 PCIE_LOCAL_BASE_ADDRESS +
647 RTC_STATE_ADDRESS),
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530648 hif_read32_mb(sc, sc->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800649 PCIE_LOCAL_BASE_ADDRESS +
650 PCIE_SOC_WAKE_ADDRESS));
651 return -EACCES;
652 }
653
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530654 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800655 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
656
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530657 qdf_mdelay(100);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800658 timeout_count += 100;
659 }
660
661 /* Check Power register for SoC internal bus issues */
662 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530663 hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800664 SOC_POWER_REG_OFFSET);
665 HIF_INFO_MED("%s: Power register is %08x", __func__, val);
666
667 return 0;
668}
669
Govind Singh2443fb32016-01-13 17:44:48 +0530670/**
Houston Hoffman3c017e72016-03-14 21:12:11 -0700671 * __hif_pci_dump_registers(): dump other PCI debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530672 * @scn: struct hif_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530673 *
Houston Hoffman3c017e72016-03-14 21:12:11 -0700674 * This function dumps pci debug registers. The parrent function
675 * dumps the copy engine registers before calling this function.
Govind Singh2443fb32016-01-13 17:44:48 +0530676 *
677 * Return: void
678 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700679static void __hif_pci_dump_registers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800680{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530681 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800682 void __iomem *mem = sc->mem;
683 uint32_t val, i, j;
684 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
685 uint32_t ce_base;
686
Houston Hoffmanbac94542016-03-14 21:11:59 -0700687 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
688 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800689
690 /* DEBUG_INPUT_SEL_SRC = 0x6 */
691 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530692 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693 WLAN_DEBUG_INPUT_SEL_OFFSET);
694 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
695 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530696 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
697 WLAN_DEBUG_INPUT_SEL_OFFSET, val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800698
699 /* DEBUG_CONTROL_ENABLE = 0x1 */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530700 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800701 WLAN_DEBUG_CONTROL_OFFSET);
702 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
703 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530704 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800705 WLAN_DEBUG_CONTROL_OFFSET, val);
706
707 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530708 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800709 WLAN_DEBUG_INPUT_SEL_OFFSET),
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530710 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800711 WLAN_DEBUG_CONTROL_OFFSET));
712
713 HIF_INFO_MED("%s: Debug CE", __func__);
714 /* Loop CE debug output */
715 /* AMBA_DEBUG_BUS_SEL = 0xc */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530716 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
717 AMBA_DEBUG_BUS_OFFSET);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800718 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
719 val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530720 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
721 val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800722
723 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
724 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530725 val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800726 CE_WRAPPER_DEBUG_OFFSET);
727 val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
728 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530729 hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800730 CE_WRAPPER_DEBUG_OFFSET, val);
731
732 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
733 __func__, wrapper_idx[i],
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530734 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800735 AMBA_DEBUG_BUS_OFFSET),
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530736 hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800737 CE_WRAPPER_DEBUG_OFFSET));
738
739 if (wrapper_idx[i] <= 7) {
740 for (j = 0; j <= 5; j++) {
741 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
742 /* For (j=0~5) write CE_DEBUG_SEL = j */
743 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530744 hif_read32_mb(sc, mem + ce_base +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800745 CE_DEBUG_OFFSET);
746 val &= ~CE_DEBUG_SEL_MASK;
747 val |= CE_DEBUG_SEL_SET(j);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530748 hif_write32_mb(sc, mem + ce_base +
749 CE_DEBUG_OFFSET, val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800750
751 /* read (@gpio_athr_wlan_reg)
Manikandan Mohanc23f28e2017-04-07 18:17:02 -0700752 * WLAN_DEBUG_OUT_DATA
753 */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530754 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
755 + WLAN_DEBUG_OUT_OFFSET);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800756 val = WLAN_DEBUG_OUT_DATA_GET(val);
757
758 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
759 __func__, j,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530760 hif_read32_mb(sc, mem + ce_base +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800761 CE_DEBUG_OFFSET), val);
762 }
763 } else {
764 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
765 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530766 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800767 WLAN_DEBUG_OUT_OFFSET);
768 val = WLAN_DEBUG_OUT_DATA_GET(val);
769
770 HIF_INFO_MED("%s: out: %x", __func__, val);
771 }
772 }
773
774 HIF_INFO_MED("%s: Debug PCIe:", __func__);
775 /* Loop PCIe debug output */
776 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530777 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
778 AMBA_DEBUG_BUS_OFFSET);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800779 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
780 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530781 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
782 AMBA_DEBUG_BUS_OFFSET, val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800783
784 for (i = 0; i <= 8; i++) {
785 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
786 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530787 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800788 AMBA_DEBUG_BUS_OFFSET);
789 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
790 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530791 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
792 AMBA_DEBUG_BUS_OFFSET, val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800793
794 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
795 val =
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530796 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800797 WLAN_DEBUG_OUT_OFFSET);
798 val = WLAN_DEBUG_OUT_DATA_GET(val);
799
800 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530801 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 WLAN_DEBUG_OUT_OFFSET), val,
Nirav Shahf1e3fb52018-06-12 14:39:34 +0530803 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800804 WLAN_DEBUG_OUT_OFFSET));
805 }
806
Houston Hoffmanbac94542016-03-14 21:11:59 -0700807 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800808}
809
Govind Singh2443fb32016-01-13 17:44:48 +0530810/**
811 * hif_dump_registers(): dump bus debug registers
Komal Seelam5584a7c2016-02-24 19:22:48 +0530812 * @scn: struct hif_opaque_softc
Govind Singh2443fb32016-01-13 17:44:48 +0530813 *
814 * This function dumps hif bus debug registers
815 *
816 * Return: 0 for success or error code
817 */
Houston Hoffman3c017e72016-03-14 21:12:11 -0700818int hif_pci_dump_registers(struct hif_softc *hif_ctx)
Govind Singh2443fb32016-01-13 17:44:48 +0530819{
820 int status;
Komal Seelam644263d2016-02-22 20:45:49 +0530821 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Govind Singh2443fb32016-01-13 17:44:48 +0530822
823 status = hif_dump_ce_registers(scn);
824
825 if (status)
826 HIF_ERROR("%s: Dump CE Registers Failed", __func__);
827
Houston Hoffman3c017e72016-03-14 21:12:11 -0700828 /* dump non copy engine pci registers */
829 __hif_pci_dump_registers(scn);
Govind Singh2443fb32016-01-13 17:44:48 +0530830
831 return 0;
832}
833
Venkateswara Swamy Bandaru814094e2016-11-11 15:24:27 +0530834#ifdef HIF_CONFIG_SLUB_DEBUG_ON
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800835
836/* worker thread to schedule wlan_tasklet in SLUB debug build */
Komal Seelamaa72bb72016-02-01 17:22:50 +0530837static void reschedule_tasklet_work_handler(void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800838{
Komal Seelamaa72bb72016-02-01 17:22:50 +0530839 struct hif_pci_softc *sc = arg;
Komal Seelam644263d2016-02-22 20:45:49 +0530840 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800841
Komal Seelamaa72bb72016-02-01 17:22:50 +0530842 if (!scn) {
Komal Seelam644263d2016-02-22 20:45:49 +0530843 HIF_ERROR("%s: hif_softc is NULL\n", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800844 return;
845 }
846
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800847 if (scn->hif_init_done == false) {
848 HIF_ERROR("%s: wlan driver is unloaded", __func__);
849 return;
850 }
851
852 tasklet_schedule(&sc->intr_tq);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800853}
854
Komal Seelamaa72bb72016-02-01 17:22:50 +0530855/**
856 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
857 * work
858 * @sc: HIF PCI Context
859 *
860 * Return: void
861 */
862static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
863{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530864 qdf_create_work(0, &sc->reschedule_tasklet_work,
865 reschedule_tasklet_work_handler, NULL);
Komal Seelamaa72bb72016-02-01 17:22:50 +0530866}
867#else
868static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
Venkateswara Swamy Bandaru814094e2016-11-11 15:24:27 +0530869#endif /* HIF_CONFIG_SLUB_DEBUG_ON */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800870
Houston Hoffman3db96a42016-05-05 19:54:39 -0700871void wlan_tasklet(unsigned long data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800872{
873 struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
Komal Seelam644263d2016-02-22 20:45:49 +0530874 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800875
876 if (scn->hif_init_done == false)
877 goto end;
878
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530879 if (qdf_atomic_read(&scn->link_suspended))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800880 goto end;
881
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800882 if (!ADRASTEA_BU) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800883 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
Komal Seelam6ee55902016-04-11 17:11:07 +0530884 if (scn->target_status == TARGET_STATUS_RESET)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800885 goto end;
886 }
887
888end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530889 qdf_atomic_set(&scn->tasklet_from_intr, 0);
890 qdf_atomic_dec(&scn->active_tasklet_cnt);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800891}
892
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800893#ifdef FEATURE_RUNTIME_PM
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +0530894static const char *hif_pm_runtime_state_to_string(uint32_t state)
895{
896 switch (state) {
897 case HIF_PM_RUNTIME_STATE_NONE:
898 return "INIT_STATE";
899 case HIF_PM_RUNTIME_STATE_ON:
900 return "ON";
901 case HIF_PM_RUNTIME_STATE_INPROGRESS:
902 return "INPROGRESS";
903 case HIF_PM_RUNTIME_STATE_SUSPENDED:
904 return "SUSPENDED";
905 default:
906 return "INVALID STATE";
907 }
908}
909
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800910#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
911 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800912/**
913 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
914 * @sc: hif_pci_softc context
915 * @msg: log message
916 *
917 * log runtime pm stats when something seems off.
918 *
919 * Return: void
920 */
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +0530921static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800922{
923 struct hif_pm_runtime_lock *ctx;
924
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +0530925 HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800926 msg, atomic_read(&sc->dev->power.usage_count),
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +0530927 hif_pm_runtime_state_to_string(
928 atomic_read(&sc->pm_state)),
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800929 sc->prevent_suspend_cnt);
930
931 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
932 sc->dev->power.runtime_status,
933 sc->dev->power.runtime_error,
934 sc->dev->power.disable_depth,
935 sc->dev->power.autosuspend_delay);
936
937 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
938 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
939 sc->pm_stats.request_resume);
940
941 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
942 sc->pm_stats.allow_suspend,
943 sc->pm_stats.prevent_suspend);
944
945 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
946 sc->pm_stats.prevent_suspend_timeout,
947 sc->pm_stats.allow_suspend_timeout);
948
949 HIF_ERROR("Suspended: %u, resumed: %u count",
950 sc->pm_stats.suspended,
951 sc->pm_stats.resumed);
952
953 HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
954 sc->pm_stats.suspend_err,
955 sc->pm_stats.runtime_get_err);
956
957 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
958
959 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
960 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
961 }
962
963 WARN_ON(1);
964}
965
966/**
967 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
968 * @s: file to print to
969 * @data: unused
970 *
971 * debugging tool added to the debug fs for displaying runtimepm stats
972 *
973 * Return: 0
974 */
975static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
976{
977 struct hif_pci_softc *sc = s->private;
978 static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
979 "SUSPENDED"};
980 unsigned int msecs_age;
Yue Maac6b2752019-05-08 17:17:12 -0700981 qdf_time_t usecs_age;
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800982 int pm_state = atomic_read(&sc->pm_state);
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +0530983 unsigned long timer_expires;
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800984 struct hif_pm_runtime_lock *ctx;
985
986 seq_printf(s, "%30s: %s\n", "Runtime PM state",
Yue Maac6b2752019-05-08 17:17:12 -0700987 autopm_state[pm_state]);
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800988 seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
Yue Maac6b2752019-05-08 17:17:12 -0700989 sc->pm_stats.last_resume_caller);
990 seq_printf(s, "%30s: %pf\n", "Last Busy Marker",
991 sc->pm_stats.last_busy_marker);
992
993 usecs_age = qdf_get_log_timestamp_usecs() -
994 sc->pm_stats.last_busy_timestamp;
995 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
996 sc->pm_stats.last_busy_timestamp / 1000000,
997 sc->pm_stats.last_busy_timestamp % 1000000);
998 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
999 usecs_age / 1000000, usecs_age % 1000000);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001000
1001 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
Yue Maac6b2752019-05-08 17:17:12 -07001002 msecs_age = jiffies_to_msecs(jiffies -
1003 sc->pm_stats.suspend_jiffies);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001004 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
Yue Maac6b2752019-05-08 17:17:12 -07001005 msecs_age / 1000, msecs_age % 1000);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001006 }
1007
1008 seq_printf(s, "%30s: %d\n", "PM Usage count",
Yue Maac6b2752019-05-08 17:17:12 -07001009 atomic_read(&sc->dev->power.usage_count));
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001010
1011 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
Yue Maac6b2752019-05-08 17:17:12 -07001012 sc->prevent_suspend_cnt);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001013
1014 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1015 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1016 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1017 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1018 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1019 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1020 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1021 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1022 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1023 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1024 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1025
1026 timer_expires = sc->runtime_timer_expires;
1027 if (timer_expires > 0) {
1028 msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1029 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
Yue Maac6b2752019-05-08 17:17:12 -07001030 msecs_age / 1000, msecs_age % 1000);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001031 }
1032
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301033 spin_lock_bh(&sc->runtime_lock);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001034 if (list_empty(&sc->prevent_suspend_list)) {
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301035 spin_unlock_bh(&sc->runtime_lock);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001036 return 0;
1037 }
1038
1039 seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1040 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1041 seq_printf(s, "%s", ctx->name);
1042 if (ctx->timeout)
1043 seq_printf(s, "(%d ms)", ctx->timeout);
1044 seq_puts(s, " ");
1045 }
1046 seq_puts(s, "\n");
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301047 spin_unlock_bh(&sc->runtime_lock);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001048
1049 return 0;
1050}
1051#undef HIF_PCI_RUNTIME_PM_STATS
1052
1053/**
1054 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1055 * @inode
1056 * @file
1057 *
1058 * Return: linux error code of single_open.
1059 */
1060static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1061{
1062 return single_open(file, hif_pci_pm_runtime_debugfs_show,
1063 inode->i_private);
1064}
1065
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001066static const struct file_operations hif_pci_runtime_pm_fops = {
1067 .owner = THIS_MODULE,
1068 .open = hif_pci_runtime_pm_open,
1069 .release = single_release,
1070 .read = seq_read,
1071 .llseek = seq_lseek,
1072};
1073
1074/**
1075 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1076 * @sc: pci context
1077 *
1078 * creates a debugfs entry to debug the runtime pm feature.
1079 */
1080static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1081{
1082 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07001083 0400, NULL, sc,
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001084 &hif_pci_runtime_pm_fops);
1085}
Komal Seelam81045d52016-09-26 17:08:34 +05301086
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001087/**
1088 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1089 * @sc: pci context
1090 *
1091 * removes the debugfs entry to debug the runtime pm feature.
1092 */
1093static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1094{
1095 debugfs_remove(sc->pm_dentry);
1096}
Komal Seelam81045d52016-09-26 17:08:34 +05301097
1098static void hif_runtime_init(struct device *dev, int delay)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001099{
Komal Seelam81045d52016-09-26 17:08:34 +05301100 pm_runtime_set_autosuspend_delay(dev, delay);
1101 pm_runtime_use_autosuspend(dev);
1102 pm_runtime_allow(dev);
1103 pm_runtime_mark_last_busy(dev);
1104 pm_runtime_put_noidle(dev);
1105 pm_suspend_ignore_children(dev, true);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001106}
Komal Seelam81045d52016-09-26 17:08:34 +05301107
1108static void hif_runtime_exit(struct device *dev)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001109{
Komal Seelam81045d52016-09-26 17:08:34 +05301110 pm_runtime_get_noresume(dev);
1111 pm_runtime_set_active(dev);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001112}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001113
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05301114static void hif_pm_runtime_lock_timeout_fn(void *data);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001115
1116/**
1117 * hif_pm_runtime_start(): start the runtime pm
1118 * @sc: pci context
1119 *
1120 * After this call, runtime pm will be active.
1121 */
1122static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1123{
Komal Seelam644263d2016-02-22 20:45:49 +05301124 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001125 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001126
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001127 if (!ol_sc->hif_config.enable_runtime_pm) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001128 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1129 return;
1130 }
1131
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001132 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001133 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1134 __func__);
1135 return;
1136 }
1137
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05301138 qdf_timer_init(NULL, &sc->runtime_timer,
1139 hif_pm_runtime_lock_timeout_fn,
1140 sc, QDF_TIMER_TYPE_WAKE_APPS);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001141
1142 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
Houston Hoffman4ede3492016-06-28 19:10:48 -07001143 ol_sc->hif_config.runtime_pm_delay);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001144
Komal Seelam81045d52016-09-26 17:08:34 +05301145 hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301146 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001147 hif_runtime_pm_debugfs_create(sc);
1148}
1149
1150/**
1151 * hif_pm_runtime_stop(): stop runtime pm
1152 * @sc: pci context
1153 *
1154 * Turns off runtime pm and frees corresponding resources
1155 * that were acquired by hif_runtime_pm_start().
1156 */
1157static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1158{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001159 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Komal Seelambd7c51d2016-02-24 10:27:30 +05301160 uint32_t mode = hif_get_conparam(ol_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001161
Houston Hoffmanb21a0532016-03-14 21:12:12 -07001162 if (!ol_sc->hif_config.enable_runtime_pm)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001163 return;
1164
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001165 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001166 return;
1167
Komal Seelam81045d52016-09-26 17:08:34 +05301168 hif_runtime_exit(sc->dev);
Houston Hoffmanc2611a22016-06-28 19:35:45 -07001169 hif_pm_runtime_resume(sc->dev);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001170
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301171 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001172
1173 hif_runtime_pm_debugfs_remove(sc);
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05301174 qdf_timer_free(&sc->runtime_timer);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001175 /* doesn't wait for penting trafic unlike cld-2.0 */
1176}
1177
1178/**
1179 * hif_pm_runtime_open(): initialize runtime pm
1180 * @sc: pci data structure
1181 *
1182 * Early initialization
1183 */
1184static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1185{
1186 spin_lock_init(&sc->runtime_lock);
1187
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301188 qdf_atomic_init(&sc->pm_state);
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08001189 qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301190 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001191 INIT_LIST_HEAD(&sc->prevent_suspend_list);
1192}
1193
1194/**
Houston Hoffman20968292016-03-23 17:55:47 -07001195 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1196 * @sc: pci context
1197 *
1198 * Ensure we have only one vote against runtime suspend before closing
1199 * the runtime suspend feature.
1200 *
1201 * all gets by the wlan driver should have been returned
1202 * one vote should remain as part of cnss_runtime_exit
1203 *
1204 * needs to be revisited if we share the root complex.
1205 */
1206static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1207{
Komal Seelam887b6482016-09-29 17:46:59 +05301208 struct hif_pm_runtime_lock *ctx, *tmp;
1209
Houston Hoffman20968292016-03-23 17:55:47 -07001210 if (atomic_read(&sc->dev->power.usage_count) != 1)
1211 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
Komal Seelam887b6482016-09-29 17:46:59 +05301212 else
1213 return;
1214
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301215 spin_lock_bh(&sc->runtime_lock);
Komal Seelam887b6482016-09-29 17:46:59 +05301216 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301217 spin_unlock_bh(&sc->runtime_lock);
Komal Seelam887b6482016-09-29 17:46:59 +05301218 hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301219 spin_lock_bh(&sc->runtime_lock);
Komal Seelam887b6482016-09-29 17:46:59 +05301220 }
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301221 spin_unlock_bh(&sc->runtime_lock);
Houston Hoffman20968292016-03-23 17:55:47 -07001222
1223 /* ensure 1 and only 1 usage count so that when the wlan
1224 * driver is re-insmodded runtime pm won't be
1225 * disabled also ensures runtime pm doesn't get
1226 * broken on by being less than 1.
1227 */
1228 if (atomic_read(&sc->dev->power.usage_count) <= 0)
1229 atomic_set(&sc->dev->power.usage_count, 1);
1230 while (atomic_read(&sc->dev->power.usage_count) > 1)
1231 hif_pm_runtime_put_auto(sc->dev);
1232}
1233
Komal Seelam887b6482016-09-29 17:46:59 +05301234static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1235 struct hif_pm_runtime_lock *lock);
1236
1237/**
1238 * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1239 * @sc: PCIe Context
1240 *
1241 * API is used to empty the runtime pm prevent suspend list.
1242 *
1243 * Return: void
1244 */
1245static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1246{
Komal Seelam887b6482016-09-29 17:46:59 +05301247 struct hif_pm_runtime_lock *ctx, *tmp;
1248
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301249 spin_lock_bh(&sc->runtime_lock);
Komal Seelam887b6482016-09-29 17:46:59 +05301250 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07001251 __hif_pm_runtime_allow_suspend(sc, ctx);
Komal Seelam887b6482016-09-29 17:46:59 +05301252 }
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05301253 spin_unlock_bh(&sc->runtime_lock);
Komal Seelam887b6482016-09-29 17:46:59 +05301254}
1255
Houston Hoffman20968292016-03-23 17:55:47 -07001256/**
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001257 * hif_pm_runtime_close(): close runtime pm
1258 * @sc: pci bus handle
1259 *
1260 * ensure runtime_pm is stopped before closing the driver
1261 */
1262static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1263{
Komal Seelam887b6482016-09-29 17:46:59 +05301264 struct hif_softc *scn = HIF_GET_SOFTC(sc);
1265
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08001266 qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301267 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001268 return;
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08001269
1270 hif_pm_runtime_stop(sc);
Houston Hoffman20968292016-03-23 17:55:47 -07001271
Komal Seelam887b6482016-09-29 17:46:59 +05301272 hif_is_recovery_in_progress(scn) ?
1273 hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1274 hif_pm_runtime_sanitize_on_exit(sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001275}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001276#else
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001277static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1278static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1279static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
Houston Hoffman53b34c42015-11-18 15:51:32 -08001280static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001281#endif
1282
1283/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001284 * hif_disable_power_gating() - disable HW power gating
1285 * @hif_ctx: hif context
1286 *
1287 * disables pcie L1 power states
1288 */
1289static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1290{
1291 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1292 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1293
Jeff Johnson8d639a02019-03-18 09:51:11 -07001294 if (!scn) {
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001295 HIF_ERROR("%s: Could not disable ASPM scn is null",
1296 __func__);
1297 return;
1298 }
1299
1300 /* Disable ASPM when pkt log is enabled */
1301 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1302 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1303}
1304
1305/**
1306 * hif_enable_power_gating() - enable HW power gating
1307 * @hif_ctx: hif context
1308 *
1309 * enables pcie L1 power states
1310 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001311static void hif_enable_power_gating(struct hif_pci_softc *sc)
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001312{
Jeff Johnson8d639a02019-03-18 09:51:11 -07001313 if (!sc) {
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001314 HIF_ERROR("%s: Could not disable ASPM scn is null",
1315 __func__);
1316 return;
1317 }
1318
1319 /* Re-enable ASPM after firmware/OTP download is complete */
1320 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1321}
1322
1323/**
1324 * hif_enable_power_management() - enable power management
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001325 * @hif_ctx: hif context
1326 *
Venkateswara Swamy Bandarue20c6dc2016-09-20 20:25:20 +05301327 * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1328 * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1329 *
1330 * note: epping mode does not call this function as it does not
1331 * care about saving power.
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001332 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001333void hif_pci_enable_power_management(struct hif_softc *hif_sc,
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001334 bool is_packet_log_enabled)
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001335{
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001336 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001337
Jeff Johnson8d639a02019-03-18 09:51:11 -07001338 if (!pci_ctx) {
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001339 HIF_ERROR("%s, hif_ctx null", __func__);
1340 return;
1341 }
1342
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001343 hif_pm_runtime_start(pci_ctx);
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001344
1345 if (!is_packet_log_enabled)
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001346 hif_enable_power_gating(pci_ctx);
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001347
1348 if (!CONFIG_ATH_PCIE_MAX_PERF &&
Houston Hoffman579c02f2017-08-02 01:57:38 -07001349 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1350 !ce_srng_based(hif_sc)) {
Venkateswara Swamy Bandarue20c6dc2016-09-20 20:25:20 +05301351 /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07001352 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
Houston Hoffmanb861cb32016-03-14 21:11:46 -07001353 HIF_ERROR("%s, failed to set target to sleep",
1354 __func__);
1355 }
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001356}
1357
Houston Hoffman53b34c42015-11-18 15:51:32 -08001358/**
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07001359 * hif_disable_power_management() - disable power management
Houston Hoffman53b34c42015-11-18 15:51:32 -08001360 * @hif_ctx: hif context
1361 *
1362 * Currently disables runtime pm. Should be updated to behave
1363 * if runtime pm is not started. Should be updated to take care
1364 * of aspm and soc sleep for driver load.
1365 */
Houston Hoffmanb4149dd2016-03-23 15:55:41 -07001366void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
Houston Hoffman53b34c42015-11-18 15:51:32 -08001367{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301368 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman53b34c42015-11-18 15:51:32 -08001369
Jeff Johnson8d639a02019-03-18 09:51:11 -07001370 if (!pci_ctx) {
Houston Hoffman53b34c42015-11-18 15:51:32 -08001371 HIF_ERROR("%s, hif_ctx null", __func__);
1372 return;
1373 }
1374
Houston Hoffman53b34c42015-11-18 15:51:32 -08001375 hif_pm_runtime_stop(pci_ctx);
1376}
1377
Nirav Shahb70bd732016-05-25 14:31:51 +05301378void hif_pci_display_stats(struct hif_softc *hif_ctx)
1379{
1380 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1381
Jeff Johnson8d639a02019-03-18 09:51:11 -07001382 if (!pci_ctx) {
Nirav Shahb70bd732016-05-25 14:31:51 +05301383 HIF_ERROR("%s, hif_ctx null", __func__);
1384 return;
1385 }
1386 hif_display_ce_stats(&pci_ctx->ce_sc);
1387}
1388
1389void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1390{
1391 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1392
Jeff Johnson8d639a02019-03-18 09:51:11 -07001393 if (!pci_ctx) {
Nirav Shahb70bd732016-05-25 14:31:51 +05301394 HIF_ERROR("%s, hif_ctx null", __func__);
1395 return;
1396 }
1397 hif_clear_ce_stats(&pci_ctx->ce_sc);
1398}
1399
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001400#define ATH_PCI_PROBE_RETRY_MAX 3
1401/**
1402 * hif_bus_open(): hif_bus_open
1403 * @scn: scn
1404 * @bus_type: bus type
1405 *
1406 * Return: n/a
1407 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001408QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001409{
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001410 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001411
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001412 hif_ctx->bus_type = bus_type;
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001413 hif_pm_runtime_open(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001414
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301415 qdf_spinlock_create(&sc->irq_lock);
Houston Hoffman8a13e5c2015-10-29 16:12:09 -07001416
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07001417 return hif_ce_open(hif_ctx);
Houston Hoffman108da402016-03-14 21:11:24 -07001418}
1419
Houston Hoffman108da402016-03-14 21:11:24 -07001420/**
Houston Hoffman854e67f2016-03-14 21:11:39 -07001421 * hif_wake_target_cpu() - wake the target's cpu
1422 * @scn: hif context
1423 *
1424 * Send an interrupt to the device to wake up the Target CPU
1425 * so it has an opportunity to notice any changed state.
1426 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001427static void hif_wake_target_cpu(struct hif_softc *scn)
Houston Hoffman854e67f2016-03-14 21:11:39 -07001428{
1429 QDF_STATUS rv;
1430 uint32_t core_ctrl;
1431 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1432
1433 rv = hif_diag_read_access(hif_hdl,
1434 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1435 &core_ctrl);
1436 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1437 /* A_INUM_FIRMWARE interrupt to Target CPU */
1438 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1439
1440 rv = hif_diag_write_access(hif_hdl,
1441 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1442 core_ctrl);
1443 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1444}
1445
Houston Hoffman63777f22016-03-14 21:11:49 -07001446/**
1447 * soc_wake_reset() - allow the target to go to sleep
1448 * @scn: hif_softc
1449 *
1450 * Clear the force wake register. This is done by
1451 * hif_sleep_entry and cancel defered timer sleep.
1452 */
1453static void soc_wake_reset(struct hif_softc *scn)
1454{
Nirav Shahf1e3fb52018-06-12 14:39:34 +05301455 hif_write32_mb(scn, scn->mem +
Houston Hoffman63777f22016-03-14 21:11:49 -07001456 PCIE_LOCAL_BASE_ADDRESS +
1457 PCIE_SOC_WAKE_ADDRESS,
1458 PCIE_SOC_WAKE_RESET);
1459}
1460
1461/**
1462 * hif_sleep_entry() - gate target sleep
1463 * @arg: hif context
1464 *
1465 * This function is the callback for the sleep timer.
1466 * Check if last force awake critical section was at least
1467 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
1468 * allow the target to go to sleep and cancel the sleep timer.
1469 * otherwise reschedule the sleep timer.
1470 */
1471static void hif_sleep_entry(void *arg)
1472{
1473 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1474 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1475 uint32_t idle_ms;
1476
1477 if (scn->recovery)
1478 return;
1479
1480 if (hif_is_driver_unloading(scn))
1481 return;
1482
1483 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1484 if (hif_state->verified_awake == false) {
1485 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1486 - hif_state->sleep_ticks);
1487 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1488 if (!qdf_atomic_read(&scn->link_suspended)) {
1489 soc_wake_reset(scn);
1490 hif_state->fake_sleep = false;
1491 }
1492 } else {
1493 qdf_timer_stop(&hif_state->sleep_timer);
1494 qdf_timer_start(&hif_state->sleep_timer,
1495 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1496 }
1497 } else {
1498 qdf_timer_stop(&hif_state->sleep_timer);
1499 qdf_timer_start(&hif_state->sleep_timer,
1500 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1501 }
1502 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1503}
1504
Houston Hoffman854e67f2016-03-14 21:11:39 -07001505#define HIF_HIA_MAX_POLL_LOOP 1000000
1506#define HIF_HIA_POLLING_DELAY_MS 10
1507
Akshay Kosigi181b2f52018-11-26 17:02:54 +05301508#ifdef QCA_HIF_HIA_EXTND
1509
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001510static void hif_set_hia_extnd(struct hif_softc *scn)
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001511{
1512 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1513 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1514 uint32_t target_type = tgt_info->target_type;
1515
1516 HIF_TRACE("%s: E", __func__);
1517
1518 if ((target_type == TARGET_TYPE_AR900B) ||
1519 target_type == TARGET_TYPE_QCA9984 ||
1520 target_type == TARGET_TYPE_QCA9888) {
1521 /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07001522 * in RTC space
1523 */
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001524 tgt_info->target_revision
Nirav Shahf1e3fb52018-06-12 14:39:34 +05301525 = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001526 + CHIP_ID_ADDRESS));
Aditya Sathish648ce112018-07-02 16:41:39 +05301527 qdf_print("chip_id 0x%x chip_revision 0x%x",
1528 target_type, tgt_info->target_revision);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001529 }
1530
1531 {
1532 uint32_t flag2_value = 0;
1533 uint32_t flag2_targ_addr =
1534 host_interest_item_address(target_type,
1535 offsetof(struct host_interest_s, hi_skip_clock_init));
1536
1537 if ((ar900b_20_targ_clk != -1) &&
1538 (frac != -1) && (intval != -1)) {
1539 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1540 &flag2_value);
Aditya Sathish648ce112018-07-02 16:41:39 +05301541 qdf_print("\n Setting clk_override");
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001542 flag2_value |= CLOCK_OVERRIDE;
1543
1544 hif_diag_write_access(hif_hdl, flag2_targ_addr,
1545 flag2_value);
Aditya Sathish648ce112018-07-02 16:41:39 +05301546 qdf_print("\n CLOCK PLL val set %d", flag2_value);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001547 } else {
Aditya Sathish648ce112018-07-02 16:41:39 +05301548 qdf_print("\n CLOCK PLL skipped");
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001549 }
1550 }
1551
1552 if (target_type == TARGET_TYPE_AR900B
1553 || target_type == TARGET_TYPE_QCA9984
1554 || target_type == TARGET_TYPE_QCA9888) {
1555
1556 /* for AR9980_2.0, 300 mhz clock is used, right now we assume
1557 * this would be supplied through module parameters,
1558 * if not supplied assumed default or same behavior as 1.0.
1559 * Assume 1.0 clock can't be tuned, reset to defaults
1560 */
1561
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07001562 qdf_print(KERN_INFO
Aditya Sathish648ce112018-07-02 16:41:39 +05301563 "%s: setting the target pll frac %x intval %x",
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001564 __func__, frac, intval);
1565
1566 /* do not touch frac, and int val, let them be default -1,
1567 * if desired, host can supply these through module params
1568 */
1569 if (frac != -1 || intval != -1) {
1570 uint32_t flag2_value = 0;
1571 uint32_t flag2_targ_addr;
1572
1573 flag2_targ_addr =
1574 host_interest_item_address(target_type,
1575 offsetof(struct host_interest_s,
1576 hi_clock_info));
1577 hif_diag_read_access(hif_hdl,
1578 flag2_targ_addr, &flag2_value);
Aditya Sathish648ce112018-07-02 16:41:39 +05301579 qdf_print("\n ====> FRAC Val %x Address %x", frac,
1580 flag2_value);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001581 hif_diag_write_access(hif_hdl, flag2_value, frac);
Aditya Sathish648ce112018-07-02 16:41:39 +05301582 qdf_print("\n INT Val %x Address %x",
1583 intval, flag2_value + 4);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001584 hif_diag_write_access(hif_hdl,
1585 flag2_value + 4, intval);
1586 } else {
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07001587 qdf_print(KERN_INFO
Aditya Sathish648ce112018-07-02 16:41:39 +05301588 "%s: no frac provided, skipping pre-configuring PLL",
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001589 __func__);
1590 }
1591
1592 /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1593 if ((target_type == TARGET_TYPE_AR900B)
1594 && (tgt_info->target_revision == AR900B_REV_2)
1595 && ar900b_20_targ_clk != -1) {
1596 uint32_t flag2_value = 0;
1597 uint32_t flag2_targ_addr;
1598
1599 flag2_targ_addr
1600 = host_interest_item_address(target_type,
1601 offsetof(struct host_interest_s,
1602 hi_desired_cpu_speed_hz));
1603 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1604 &flag2_value);
Aditya Sathish648ce112018-07-02 16:41:39 +05301605 qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001606 flag2_value);
1607 hif_diag_write_access(hif_hdl, flag2_value,
1608 ar900b_20_targ_clk/*300000000u*/);
1609 } else if (target_type == TARGET_TYPE_QCA9888) {
1610 uint32_t flag2_targ_addr;
1611
1612 if (200000000u != qca9888_20_targ_clk) {
1613 qca9888_20_targ_clk = 300000000u;
1614 /* Setting the target clock speed to 300 mhz */
1615 }
1616
1617 flag2_targ_addr
1618 = host_interest_item_address(target_type,
1619 offsetof(struct host_interest_s,
1620 hi_desired_cpu_speed_hz));
1621 hif_diag_write_access(hif_hdl, flag2_targ_addr,
1622 qca9888_20_targ_clk);
1623 } else {
Aditya Sathish648ce112018-07-02 16:41:39 +05301624 qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001625 __func__);
1626 }
1627 } else {
1628 if (frac != -1 || intval != -1) {
1629 uint32_t flag2_value = 0;
1630 uint32_t flag2_targ_addr =
1631 host_interest_item_address(target_type,
1632 offsetof(struct host_interest_s,
1633 hi_clock_info));
1634 hif_diag_read_access(hif_hdl, flag2_targ_addr,
1635 &flag2_value);
Aditya Sathish648ce112018-07-02 16:41:39 +05301636 qdf_print("\n ====> FRAC Val %x Address %x", frac,
1637 flag2_value);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001638 hif_diag_write_access(hif_hdl, flag2_value, frac);
Aditya Sathish648ce112018-07-02 16:41:39 +05301639 qdf_print("\n INT Val %x Address %x", intval,
1640 flag2_value + 4);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001641 hif_diag_write_access(hif_hdl, flag2_value + 4,
Aditya Sathish648ce112018-07-02 16:41:39 +05301642 intval);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001643 }
1644 }
1645}
1646
1647#else
1648
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001649static void hif_set_hia_extnd(struct hif_softc *scn)
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001650{
1651}
1652
1653#endif
1654
Houston Hoffman854e67f2016-03-14 21:11:39 -07001655/**
1656 * hif_set_hia() - fill out the host interest area
1657 * @scn: hif context
1658 *
1659 * This is replaced by hif_wlan_enable for integrated targets.
1660 * This fills out the host interest area. The firmware will
1661 * process these memory addresses when it is first brought out
1662 * of reset.
1663 *
1664 * Return: 0 for success.
1665 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001666static int hif_set_hia(struct hif_softc *scn)
Houston Hoffman854e67f2016-03-14 21:11:39 -07001667{
1668 QDF_STATUS rv;
1669 uint32_t interconnect_targ_addr = 0;
1670 uint32_t pcie_state_targ_addr = 0;
1671 uint32_t pipe_cfg_targ_addr = 0;
1672 uint32_t svc_to_pipe_map = 0;
1673 uint32_t pcie_config_flags = 0;
1674 uint32_t flag2_value = 0;
1675 uint32_t flag2_targ_addr = 0;
1676#ifdef QCA_WIFI_3_0
1677 uint32_t host_interest_area = 0;
1678 uint8_t i;
1679#else
1680 uint32_t ealloc_value = 0;
1681 uint32_t ealloc_targ_addr = 0;
1682 uint8_t banks_switched = 1;
1683 uint32_t chip_id;
1684#endif
1685 uint32_t pipe_cfg_addr;
1686 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1687 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1688 uint32_t target_type = tgt_info->target_type;
Houston Hoffman748e1a62017-03-30 17:20:42 -07001689 uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
Houston Hoffman854e67f2016-03-14 21:11:39 -07001690 static struct CE_pipe_config *target_ce_config;
1691 struct service_to_pipe *target_service_to_ce_map;
1692
1693 HIF_TRACE("%s: E", __func__);
1694
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301695 hif_get_target_ce_config(scn,
1696 &target_ce_config, &target_ce_config_sz,
Houston Hoffman854e67f2016-03-14 21:11:39 -07001697 &target_service_to_ce_map,
1698 &target_service_to_ce_map_sz,
1699 NULL, NULL);
1700
1701 if (ADRASTEA_BU)
1702 return QDF_STATUS_SUCCESS;
1703
1704#ifdef QCA_WIFI_3_0
1705 i = 0;
1706 while (i < HIF_HIA_MAX_POLL_LOOP) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +05301707 host_interest_area = hif_read32_mb(scn, scn->mem +
Houston Hoffman854e67f2016-03-14 21:11:39 -07001708 A_SOC_CORE_SCRATCH_0_ADDRESS);
1709 if ((host_interest_area & 0x01) == 0) {
1710 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1711 host_interest_area = 0;
1712 i++;
1713 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1714 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1715 } else {
1716 host_interest_area &= (~0x01);
Nirav Shahf1e3fb52018-06-12 14:39:34 +05301717 hif_write32_mb(scn, scn->mem + 0x113014, 0);
Houston Hoffman854e67f2016-03-14 21:11:39 -07001718 break;
1719 }
1720 }
1721
1722 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1723 HIF_ERROR("%s: hia polling timeout", __func__);
1724 return -EIO;
1725 }
1726
1727 if (host_interest_area == 0) {
1728 HIF_ERROR("%s: host_interest_area = 0", __func__);
1729 return -EIO;
1730 }
1731
1732 interconnect_targ_addr = host_interest_area +
1733 offsetof(struct host_interest_area_t,
1734 hi_interconnect_state);
1735
1736 flag2_targ_addr = host_interest_area +
1737 offsetof(struct host_interest_area_t, hi_option_flag2);
1738
1739#else
1740 interconnect_targ_addr = hif_hia_item_address(target_type,
1741 offsetof(struct host_interest_s, hi_interconnect_state));
1742 ealloc_targ_addr = hif_hia_item_address(target_type,
1743 offsetof(struct host_interest_s, hi_early_alloc));
1744 flag2_targ_addr = hif_hia_item_address(target_type,
1745 offsetof(struct host_interest_s, hi_option_flag2));
1746#endif
1747 /* Supply Target-side CE configuration */
1748 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1749 &pcie_state_targ_addr);
1750 if (rv != QDF_STATUS_SUCCESS) {
1751 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1752 __func__, interconnect_targ_addr, rv);
1753 goto done;
1754 }
1755 if (pcie_state_targ_addr == 0) {
1756 rv = QDF_STATUS_E_FAILURE;
1757 HIF_ERROR("%s: pcie state addr is 0", __func__);
1758 goto done;
1759 }
1760 pipe_cfg_addr = pcie_state_targ_addr +
1761 offsetof(struct pcie_state_s,
1762 pipe_cfg_addr);
1763 rv = hif_diag_read_access(hif_hdl,
1764 pipe_cfg_addr,
1765 &pipe_cfg_targ_addr);
1766 if (rv != QDF_STATUS_SUCCESS) {
1767 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1768 __func__, pipe_cfg_addr, rv);
1769 goto done;
1770 }
1771 if (pipe_cfg_targ_addr == 0) {
1772 rv = QDF_STATUS_E_FAILURE;
1773 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1774 goto done;
1775 }
1776
1777 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1778 (uint8_t *) target_ce_config,
1779 target_ce_config_sz);
1780
1781 if (rv != QDF_STATUS_SUCCESS) {
1782 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1783 goto done;
1784 }
1785
1786 rv = hif_diag_read_access(hif_hdl,
1787 pcie_state_targ_addr +
1788 offsetof(struct pcie_state_s,
1789 svc_to_pipe_map),
1790 &svc_to_pipe_map);
1791 if (rv != QDF_STATUS_SUCCESS) {
1792 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1793 goto done;
1794 }
1795 if (svc_to_pipe_map == 0) {
1796 rv = QDF_STATUS_E_FAILURE;
1797 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1798 goto done;
1799 }
1800
1801 rv = hif_diag_write_mem(hif_hdl,
1802 svc_to_pipe_map,
1803 (uint8_t *) target_service_to_ce_map,
1804 target_service_to_ce_map_sz);
1805 if (rv != QDF_STATUS_SUCCESS) {
1806 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1807 goto done;
1808 }
1809
1810 rv = hif_diag_read_access(hif_hdl,
1811 pcie_state_targ_addr +
1812 offsetof(struct pcie_state_s,
1813 config_flags),
1814 &pcie_config_flags);
1815 if (rv != QDF_STATUS_SUCCESS) {
1816 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1817 goto done;
1818 }
1819#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1820 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1821#else
1822 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1823#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1824 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1825#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1826 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1827#endif
1828 rv = hif_diag_write_mem(hif_hdl,
1829 pcie_state_targ_addr +
1830 offsetof(struct pcie_state_s,
1831 config_flags),
1832 (uint8_t *) &pcie_config_flags,
1833 sizeof(pcie_config_flags));
1834 if (rv != QDF_STATUS_SUCCESS) {
1835 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1836 goto done;
1837 }
1838
1839#ifndef QCA_WIFI_3_0
1840 /* configure early allocation */
1841 ealloc_targ_addr = hif_hia_item_address(target_type,
1842 offsetof(
1843 struct host_interest_s,
1844 hi_early_alloc));
1845
1846 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1847 &ealloc_value);
1848 if (rv != QDF_STATUS_SUCCESS) {
1849 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1850 goto done;
1851 }
1852
1853 /* 1 bank is switched to IRAM, except ROME 1.0 */
1854 ealloc_value |=
1855 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1856 HI_EARLY_ALLOC_MAGIC_MASK);
1857
1858 rv = hif_diag_read_access(hif_hdl,
1859 CHIP_ID_ADDRESS |
1860 RTC_SOC_BASE_ADDRESS, &chip_id);
1861 if (rv != QDF_STATUS_SUCCESS) {
1862 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1863 goto done;
1864 }
1865 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1866 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1867 switch (CHIP_ID_REVISION_GET(chip_id)) {
1868 case 0x2: /* ROME 1.3 */
1869 /* 2 banks are switched to IRAM */
1870 banks_switched = 2;
1871 break;
1872 case 0x4: /* ROME 2.1 */
1873 case 0x5: /* ROME 2.2 */
1874 banks_switched = 6;
1875 break;
1876 case 0x8: /* ROME 3.0 */
1877 case 0x9: /* ROME 3.1 */
1878 case 0xA: /* ROME 3.2 */
1879 banks_switched = 9;
1880 break;
1881 case 0x0: /* ROME 1.0 */
1882 case 0x1: /* ROME 1.1 */
1883 default:
1884 /* 3 banks are switched to IRAM */
1885 banks_switched = 3;
1886 break;
1887 }
1888 }
1889
1890 ealloc_value |=
1891 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1892 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1893
1894 rv = hif_diag_write_access(hif_hdl,
1895 ealloc_targ_addr,
1896 ealloc_value);
1897 if (rv != QDF_STATUS_SUCCESS) {
1898 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1899 goto done;
1900 }
1901#endif
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001902 if ((target_type == TARGET_TYPE_AR900B)
1903 || (target_type == TARGET_TYPE_QCA9984)
1904 || (target_type == TARGET_TYPE_QCA9888)
Aravind Narasimhane79befa2016-06-24 12:03:15 +05301905 || (target_type == TARGET_TYPE_AR9888)) {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001906 hif_set_hia_extnd(scn);
1907 }
Houston Hoffman854e67f2016-03-14 21:11:39 -07001908
1909 /* Tell Target to proceed with initialization */
1910 flag2_targ_addr = hif_hia_item_address(target_type,
1911 offsetof(
1912 struct host_interest_s,
1913 hi_option_flag2));
1914
1915 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1916 &flag2_value);
1917 if (rv != QDF_STATUS_SUCCESS) {
1918 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1919 goto done;
1920 }
1921
1922 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1923 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1924 flag2_value);
1925 if (rv != QDF_STATUS_SUCCESS) {
1926 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1927 goto done;
1928 }
1929
1930 hif_wake_target_cpu(scn);
1931
1932done:
1933
1934 return rv;
1935}
1936
1937/**
Houston Hoffman108da402016-03-14 21:11:24 -07001938 * hif_bus_configure() - configure the pcie bus
1939 * @hif_sc: pointer to the hif context.
1940 *
1941 * return: 0 for success. nonzero for failure.
1942 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07001943int hif_pci_bus_configure(struct hif_softc *hif_sc)
Houston Hoffman108da402016-03-14 21:11:24 -07001944{
1945 int status = 0;
Houston Hoffman63777f22016-03-14 21:11:49 -07001946 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Pratik Gandhi815c6d82016-10-19 12:06:32 +05301947 struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07001948
Houston Hoffman108da402016-03-14 21:11:24 -07001949 hif_ce_prepare_config(hif_sc);
1950
Houston Hoffman63777f22016-03-14 21:11:49 -07001951 /* initialize sleep state adjust variables */
1952 hif_state->sleep_timer_init = true;
1953 hif_state->keep_awake_count = 0;
1954 hif_state->fake_sleep = false;
1955 hif_state->sleep_ticks = 0;
1956
1957 qdf_timer_init(NULL, &hif_state->sleep_timer,
1958 hif_sleep_entry, (void *)hif_state,
1959 QDF_TIMER_TYPE_WAKE_APPS);
1960 hif_state->sleep_timer_init = true;
1961
Houston Hoffmana15d0b02016-11-23 15:10:15 -08001962 status = hif_wlan_enable(hif_sc);
1963 if (status) {
1964 HIF_ERROR("%s: hif_wlan_enable error = %d",
1965 __func__, status);
1966 goto timer_free;
Houston Hoffman108da402016-03-14 21:11:24 -07001967 }
1968
1969 A_TARGET_ACCESS_LIKELY(hif_sc);
Houston Hoffmanf7718622016-03-14 21:11:37 -07001970
Houston Hoffman579c02f2017-08-02 01:57:38 -07001971 if ((CONFIG_ATH_PCIE_MAX_PERF ||
1972 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1973 !ce_srng_based(hif_sc)) {
Venkateswara Swamy Bandarue20c6dc2016-09-20 20:25:20 +05301974 /*
1975 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1976 * prevent sleep when we want to keep firmware always awake
1977 * note: when we want to keep firmware always awake,
1978 * hif_target_sleep_state_adjust will point to a dummy
1979 * function, and hif_pci_target_sleep_state_adjust must
1980 * be called instead.
1981 * note: bus type check is here because AHB bus is reusing
1982 * hif_pci_bus_configure code.
1983 */
1984 if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001985 if (hif_pci_target_sleep_state_adjust(hif_sc,
1986 false, true) < 0) {
1987 status = -EACCES;
1988 goto disable_wlan;
1989 }
Houston Hoffmanf7718622016-03-14 21:11:37 -07001990 }
1991 }
1992
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001993 /* todo: consider replacing this with an srng field */
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301994 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05301995 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1996 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301997 (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05301998 hif_sc->per_ce_irq = true;
1999 }
2000
Houston Hoffman108da402016-03-14 21:11:24 -07002001 status = hif_config_ce(hif_sc);
2002 if (status)
2003 goto disable_wlan;
2004
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302005 /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
2006 if (hif_needs_bmi(hif_osc)) {
2007 status = hif_set_hia(hif_sc);
2008 if (status)
2009 goto unconfig_ce;
Houston Hoffman108da402016-03-14 21:11:24 -07002010
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302011 HIF_INFO_MED("%s: hif_set_hia done", __func__);
Houston Hoffman108da402016-03-14 21:11:24 -07002012
Pratik Gandhi815c6d82016-10-19 12:06:32 +05302013 }
Houston Hoffman108da402016-03-14 21:11:24 -07002014
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05302015 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05302016 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2017 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05302018 (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002019 HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2020 __func__);
2021 else {
2022 status = hif_configure_irq(hif_sc);
2023 if (status < 0)
2024 goto unconfig_ce;
2025 }
Houston Hoffman108da402016-03-14 21:11:24 -07002026
2027 A_TARGET_ACCESS_UNLIKELY(hif_sc);
2028
2029 return status;
2030
2031unconfig_ce:
2032 hif_unconfig_ce(hif_sc);
2033disable_wlan:
2034 A_TARGET_ACCESS_UNLIKELY(hif_sc);
Houston Hoffmana15d0b02016-11-23 15:10:15 -08002035 hif_wlan_disable(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002036
Houston Hoffman63777f22016-03-14 21:11:49 -07002037timer_free:
2038 qdf_timer_stop(&hif_state->sleep_timer);
2039 qdf_timer_free(&hif_state->sleep_timer);
2040 hif_state->sleep_timer_init = false;
2041
Houston Hoffman108da402016-03-14 21:11:24 -07002042 HIF_ERROR("%s: failed, status = %d", __func__, status);
2043 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002044}
2045
2046/**
2047 * hif_bus_close(): hif_bus_close
2048 *
2049 * Return: n/a
2050 */
Houston Hoffman32bc8eb2016-03-14 21:11:34 -07002051void hif_pci_close(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002052{
Houston Hoffman108da402016-03-14 21:11:24 -07002053 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002054
Houston Hoffman108da402016-03-14 21:11:24 -07002055 hif_pm_runtime_close(hif_pci_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002056 hif_ce_close(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002057}
2058
2059#define BAR_NUM 0
2060
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302061static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2062 struct pci_dev *pdev,
2063 const struct pci_device_id *id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002064{
2065 void __iomem *mem;
2066 int ret = 0;
jiad8245c032018-01-03 12:35:39 +08002067 uint16_t device_id = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05302068 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302070 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2071 if (device_id != id->device) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002072 HIF_ERROR(
2073 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2074 __func__, device_id, id->device);
2075 /* pci link is down, so returing with error code */
2076 return -EIO;
2077 }
2078
2079 /* FIXME: temp. commenting out assign_resource
2080 * call for dev_attach to work on 2.6.38 kernel
2081 */
Amar Singhal901e33f2015-10-08 11:55:32 -07002082#if (!defined(__LINUX_ARM_ARCH__))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002083 if (pci_assign_resource(pdev, BAR_NUM)) {
2084 HIF_ERROR("%s: pci_assign_resource error", __func__);
2085 return -EIO;
2086 }
2087#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002088 if (pci_enable_device(pdev)) {
2089 HIF_ERROR("%s: pci_enable_device error",
2090 __func__);
2091 return -EIO;
2092 }
2093
2094 /* Request MMIO resources */
2095 ret = pci_request_region(pdev, BAR_NUM, "ath");
2096 if (ret) {
2097 HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2098 ret = -EIO;
2099 goto err_region;
2100 }
Houston Hoffmand0620a32016-11-09 20:44:56 -08002101
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002102#ifdef CONFIG_ARM_LPAE
2103 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002104 * for 32 bits device also.
2105 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002106 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2107 if (ret) {
2108 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2109 goto err_dma;
2110 }
2111 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2112 if (ret) {
2113 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2114 goto err_dma;
2115 }
2116#else
2117 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2118 if (ret) {
2119 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2120 goto err_dma;
2121 }
2122 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2123 if (ret) {
2124 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2125 __func__);
2126 goto err_dma;
2127 }
2128#endif
2129
2130 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2131
2132 /* Set bus master bit in PCI_COMMAND to enable DMA */
2133 pci_set_master(pdev);
2134
2135 /* Arrange for access to Target SoC registers. */
2136 mem = pci_iomap(pdev, BAR_NUM, 0);
2137 if (!mem) {
2138 HIF_ERROR("%s: PCI iomap error", __func__);
2139 ret = -EIO;
2140 goto err_iomap;
2141 }
Houston Hoffmanf7bc3082016-10-17 19:52:55 -07002142
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07002143 HIF_INFO("*****BAR is %pK\n", (void *)mem);
Houston Hoffmanf7bc3082016-10-17 19:52:55 -07002144
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002145 sc->mem = mem;
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002146
Karunakar Dasineni8a8afe22016-10-18 13:10:13 -07002147 /* Hawkeye emulation specific change */
2148 if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
Sathish Kumar2d2f19a2017-02-13 15:52:07 +05302149 (device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2150 (device_id == RUMIM2M_DEVICE_ID_NODE2) ||
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05302151 (device_id == RUMIM2M_DEVICE_ID_NODE3) ||
2152 (device_id == RUMIM2M_DEVICE_ID_NODE4) ||
2153 (device_id == RUMIM2M_DEVICE_ID_NODE5)) {
Karunakar Dasineni8a8afe22016-10-18 13:10:13 -07002154 mem = mem + 0x0c000000;
2155 sc->mem = mem;
Jeff Johnsonb9450212017-09-18 10:12:38 -07002156 HIF_INFO("%s: Changing PCI mem base to %pK\n",
Karunakar Dasineni8a8afe22016-10-18 13:10:13 -07002157 __func__, sc->mem);
2158 }
2159
Surabhi Vishnoi6f752b42017-08-31 17:54:50 +05302160 sc->mem_len = pci_resource_len(pdev, BAR_NUM);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002161 ol_sc->mem = mem;
Houston Hoffmand0620a32016-11-09 20:44:56 -08002162 ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002163 sc->pci_enabled = true;
2164 return ret;
2165
2166err_iomap:
2167 pci_clear_master(pdev);
2168err_dma:
2169 pci_release_region(pdev, BAR_NUM);
2170err_region:
2171 pci_disable_device(pdev);
2172 return ret;
2173}
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302174
2175static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2176 struct pci_dev *pdev,
2177 const struct pci_device_id *id)
Houston Hoffmand0620a32016-11-09 20:44:56 -08002178{
2179 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2180 sc->pci_enabled = true;
2181 return 0;
2182}
Houston Hoffmand0620a32016-11-09 20:44:56 -08002183
2184
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302185static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
Houston Hoffmand0620a32016-11-09 20:44:56 -08002186{
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302187 pci_disable_msi(sc->pdev);
Houston Hoffmand0620a32016-11-09 20:44:56 -08002188 pci_iounmap(sc->pdev, sc->mem);
2189 pci_clear_master(sc->pdev);
2190 pci_release_region(sc->pdev, BAR_NUM);
2191 pci_disable_device(sc->pdev);
2192}
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302193
2194static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002195
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002196static void hif_disable_pci(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002197{
Komal Seelam644263d2016-02-22 20:45:49 +05302198 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2199
Jeff Johnson8d639a02019-03-18 09:51:11 -07002200 if (!ol_sc) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002201 HIF_ERROR("%s: ol_sc = NULL", __func__);
2202 return;
2203 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002204 hif_pci_device_reset(sc);
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302205 sc->hif_pci_deinit(sc);
Houston Hoffmand0620a32016-11-09 20:44:56 -08002206
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207 sc->mem = NULL;
2208 ol_sc->mem = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002209}
2210
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002211static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002212{
2213 int ret = 0;
2214 int targ_awake_limit = 500;
2215#ifndef QCA_WIFI_3_0
2216 uint32_t fw_indicator;
2217#endif
Komal Seelam644263d2016-02-22 20:45:49 +05302218 struct hif_softc *scn = HIF_GET_SOFTC(sc);
2219
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002220 /*
2221 * Verify that the Target was started cleanly.*
2222 * The case where this is most likely is with an AUX-powered
2223 * Target and a Host in WoW mode. If the Host crashes,
2224 * loses power, or is restarted (without unloading the driver)
2225 * then the Target is left (aux) powered and running. On a
2226 * subsequent driver load, the Target is in an unexpected state.
2227 * We try to catch that here in order to reset the Target and
2228 * retry the probe.
2229 */
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302230 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002231 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2232 while (!hif_targ_is_awake(scn, sc->mem)) {
2233 if (0 == targ_awake_limit) {
2234 HIF_ERROR("%s: target awake timeout", __func__);
2235 ret = -EAGAIN;
2236 goto end;
2237 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302238 qdf_mdelay(1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002239 targ_awake_limit--;
2240 }
2241
2242#if PCIE_BAR0_READY_CHECKING
2243 {
2244 int wait_limit = 200;
2245 /* Synchronization point: wait the BAR0 is configured */
2246 while (wait_limit-- &&
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302247 !(hif_read32_mb(sc, c->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002248 PCIE_LOCAL_BASE_ADDRESS +
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002249 PCIE_SOC_RDY_STATUS_ADDRESS)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002250 & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302251 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002252 }
2253 if (wait_limit < 0) {
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002254 /* AR6320v1 doesn't support checking of BAR0
2255 * configuration, takes one sec to wait BAR0 ready
2256 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002257 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2258 __func__);
2259 }
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002260 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002261#endif
2262
2263#ifndef QCA_WIFI_3_0
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302264 fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2265 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002266 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2267
2268 if (fw_indicator & FW_IND_INITIALIZED) {
2269 HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2270 __func__);
2271 ret = -EAGAIN;
2272 goto end;
2273 }
2274#endif
2275
2276end:
2277 return ret;
2278}
2279
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002280static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2281{
2282 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05302283 struct hif_softc *scn = HIF_GET_SOFTC(sc);
Aravind Narasimhane79befa2016-06-24 12:03:15 +05302284 uint32_t target_type = scn->target_info.target_type;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002285
2286 HIF_TRACE("%s: E", __func__);
2287
2288 /* do notn support MSI or MSI IRQ failed */
2289 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2290 ret = request_irq(sc->pdev->irq,
Yun Park3fb36442017-08-17 17:37:53 -07002291 hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002292 "wlan_pci", sc);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302293 if (ret) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002294 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2295 goto end;
2296 }
Dustin Brown2af3d672017-05-30 16:14:01 -07002297 scn->wake_irq = sc->pdev->irq;
Houston Hoffman3db96a42016-05-05 19:54:39 -07002298 /* Use sc->irq instead of sc->pdev-irq
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002299 * platform_device pdev doesn't have an irq field
2300 */
Houston Hoffman3db96a42016-05-05 19:54:39 -07002301 sc->irq = sc->pdev->irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002302 /* Use Legacy PCI Interrupts */
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302303 hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002304 PCIE_INTR_ENABLE_ADDRESS),
2305 HOST_GROUP0_MASK);
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302306 hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002307 PCIE_INTR_ENABLE_ADDRESS));
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302308 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002309 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
Aravind Narasimhane79befa2016-06-24 12:03:15 +05302310
2311 if ((target_type == TARGET_TYPE_IPQ4019) ||
2312 (target_type == TARGET_TYPE_AR900B) ||
2313 (target_type == TARGET_TYPE_QCA9984) ||
2314 (target_type == TARGET_TYPE_AR9888) ||
wadesongc79aed02017-04-19 19:43:21 +08002315 (target_type == TARGET_TYPE_QCA9888) ||
2316 (target_type == TARGET_TYPE_AR6320V1) ||
2317 (target_type == TARGET_TYPE_AR6320V2) ||
2318 (target_type == TARGET_TYPE_AR6320V3)) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302319 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
Aravind Narasimhane79befa2016-06-24 12:03:15 +05302320 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2321 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002322end:
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302323 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002324 "%s: X, ret = %d", __func__, ret);
2325 return ret;
2326}
2327
Houston Hoffman15010772016-09-16 14:01:13 -07002328static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2329{
2330 int ret;
2331 int ce_id, irq;
2332 uint32_t msi_data_start;
2333 uint32_t msi_data_count;
2334 uint32_t msi_irq_start;
2335 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2336
2337 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2338 &msi_data_count, &msi_data_start,
2339 &msi_irq_start);
2340 if (ret)
2341 return ret;
2342
2343 /* needs to match the ce_id -> irq data mapping
2344 * used in the srng parameter configuration
2345 */
2346 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2347 unsigned int msi_data;
Yun Parkb5e74bb2017-09-19 20:13:48 -07002348
2349 if (!ce_sc->tasklets[ce_id].inited)
2350 continue;
2351
Houston Hoffman15010772016-09-16 14:01:13 -07002352 msi_data = (ce_id % msi_data_count) + msi_irq_start;
2353 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2354
Rakesh Pillai51264a62019-05-08 19:15:56 +05302355 hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
Houston Hoffman15010772016-09-16 14:01:13 -07002356 ce_id, msi_data, irq);
2357
2358 free_irq(irq, &ce_sc->tasklets[ce_id]);
2359 }
2360
2361 return ret;
2362}
2363
Houston Hoffman648a9182017-05-21 23:27:50 -07002364static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2365{
2366 int i, j, irq;
2367 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2368 struct hif_exec_context *hif_ext_group;
2369
2370 for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2371 hif_ext_group = hif_state->hif_ext_group[i];
2372 if (hif_ext_group->irq_requested) {
2373 hif_ext_group->irq_requested = false;
2374 for (j = 0; j < hif_ext_group->numirq; j++) {
2375 irq = hif_ext_group->os_irq[j];
2376 free_irq(irq, hif_ext_group);
2377 }
2378 hif_ext_group->numirq = 0;
2379 }
2380 }
2381}
2382
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002383/**
2384 * hif_nointrs(): disable IRQ
2385 *
2386 * This function stops interrupt(s)
2387 *
Komal Seelam644263d2016-02-22 20:45:49 +05302388 * @scn: struct hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002389 *
2390 * Return: none
2391 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002392void hif_pci_nointrs(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002393{
Houston Hoffman15010772016-09-16 14:01:13 -07002394 int i, ret;
Komal Seelam02cf2f82016-02-22 20:44:25 +05302395 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2396 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002397
Kiran Venkatappaa17e5e52016-12-20 11:32:06 +05302398 ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2399
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002400 if (scn->request_irq_done == false)
2401 return;
Houston Hoffman15010772016-09-16 14:01:13 -07002402
Houston Hoffman648a9182017-05-21 23:27:50 -07002403 hif_pci_deconfigure_grp_irq(scn);
2404
Houston Hoffman15010772016-09-16 14:01:13 -07002405 ret = hif_ce_srng_msi_free_irq(scn);
Yun Parkb5e74bb2017-09-19 20:13:48 -07002406 if (ret != -EINVAL) {
2407 /* ce irqs freed in hif_ce_srng_msi_free_irq */
2408
2409 if (scn->wake_irq)
2410 free_irq(scn->wake_irq, scn);
2411 scn->wake_irq = 0;
2412 } else if (sc->num_msi_intrs > 0) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002413 /* MSI interrupt(s) */
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002414 for (i = 0; i < sc->num_msi_intrs; i++)
Houston Hoffman3db96a42016-05-05 19:54:39 -07002415 free_irq(sc->irq + i, sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002416 sc->num_msi_intrs = 0;
2417 } else {
Houston Hoffman3db96a42016-05-05 19:54:39 -07002418 /* Legacy PCI line interrupt
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002419 * Use sc->irq instead of sc->pdev-irq
2420 * platform_device pdev doesn't have an irq field
2421 */
Houston Hoffman3db96a42016-05-05 19:54:39 -07002422 free_irq(sc->irq, sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002423 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002424 scn->request_irq_done = false;
2425}
2426
2427/**
2428 * hif_disable_bus(): hif_disable_bus
2429 *
2430 * This function disables the bus
2431 *
2432 * @bdev: bus dev
2433 *
2434 * Return: none
2435 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07002436void hif_pci_disable_bus(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002437{
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302438 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302439 struct pci_dev *pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002440 void __iomem *mem;
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302441 struct hif_target_info *tgt_info = &scn->target_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002442
2443 /* Attach did not succeed, all resources have been
2444 * freed in error handler
2445 */
2446 if (!sc)
2447 return;
2448
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302449 pdev = sc->pdev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002450 if (ADRASTEA_BU) {
Houston Hoffmanc1064a82016-07-25 13:22:25 -07002451 hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2452
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302453 hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2454 hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002455 HOST_GROUP0_MASK);
2456 }
2457
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302458#if defined(CPU_WARM_RESET_WAR)
2459 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
2460 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2461 * verified for AR9888_REV1
2462 */
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002463 if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2464 (tgt_info->target_version == AR9887_REV1_VERSION))
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302465 hif_pci_device_warm_reset(sc);
2466 else
2467 hif_pci_device_reset(sc);
2468#else
Houston Hoffmanf241eb02016-05-10 17:07:36 -07002469 hif_pci_device_reset(sc);
Venkateswara Swamy Bandarufa291a72016-07-28 18:55:23 +05302470#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002471 mem = (void __iomem *)sc->mem;
2472 if (mem) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002473 hif_dump_pipe_debug_count(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002474 if (scn->athdiag_procfs_inited) {
2475 athdiag_procfs_remove();
2476 scn->athdiag_procfs_inited = false;
2477 }
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05302478 sc->hif_pci_deinit(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002479 scn->mem = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002480 }
2481 HIF_INFO("%s: X", __func__);
2482}
2483
2484#define OL_ATH_PCI_PM_CONTROL 0x44
2485
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002486#ifdef FEATURE_RUNTIME_PM
Houston Hoffmancceec342015-11-11 11:37:20 -08002487/**
Jeff Johnson1002ca52018-05-12 11:29:24 -07002488 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
Houston Hoffmancceec342015-11-11 11:37:20 -08002489 * @scn: hif context
2490 * @flag: prevent linkdown if true otherwise allow
2491 *
2492 * this api should only be called as part of bus prevent linkdown
2493 */
Komal Seelam644263d2016-02-22 20:45:49 +05302494static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002495{
Komal Seelam644263d2016-02-22 20:45:49 +05302496 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffmancceec342015-11-11 11:37:20 -08002497
2498 if (flag)
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08002499 qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002500 else
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08002501 qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
Houston Hoffmancceec342015-11-11 11:37:20 -08002502}
2503#else
Komal Seelam644263d2016-02-22 20:45:49 +05302504static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
Houston Hoffmancceec342015-11-11 11:37:20 -08002505{
2506}
2507#endif
2508
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002509#if defined(CONFIG_PCI_MSM)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002510/**
2511 * hif_bus_prevent_linkdown(): allow or permit linkdown
2512 * @flag: true prevents linkdown, false allows
2513 *
2514 * Calls into the platform driver to vote against taking down the
2515 * pcie link.
2516 *
2517 * Return: n/a
2518 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002519void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002520{
Dustin Brown0d7163d2017-09-07 14:24:30 -07002521 int errno;
2522
Rajeev Kumard60134e2018-11-28 14:50:03 -08002523 HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
Houston Hoffmancceec342015-11-11 11:37:20 -08002524 hif_runtime_prevent_linkdown(scn, flag);
Dustin Brown0d7163d2017-09-07 14:24:30 -07002525
2526 errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2527 if (errno)
2528 HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2529 __func__, errno);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002530}
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002531#else
Houston Hoffman4849fcc2016-05-05 15:42:35 -07002532void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002533{
Rajeev Kumard60134e2018-11-28 14:50:03 -08002534 HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
Ryan Hsu0f6d3302016-01-21 16:21:17 -08002535 hif_runtime_prevent_linkdown(scn, flag);
2536}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002537#endif
2538
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002539/**
Dustin Brown782a07e2016-12-07 14:14:24 -08002540 * hif_pci_bus_suspend(): prepare hif for suspend
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002541 *
Dustin Brownda4fb2a2018-11-19 12:08:38 -08002542 * Return: Errno
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002543 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002544int hif_pci_bus_suspend(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002545{
Dustin Brown13835b12019-03-25 13:34:35 -07002546 hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2547
2548 if (hif_drain_tasklets(scn)) {
2549 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2550 return -EBUSY;
2551 }
2552
Kai Liuf26fa0e2019-03-14 21:08:43 +08002553 /* Stop the HIF Sleep Timer */
2554 hif_cancel_deferred_target_sleep(scn);
Dustin Brown13835b12019-03-25 13:34:35 -07002555
Dustin Brownda4fb2a2018-11-19 12:08:38 -08002556 return 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002557}
2558
2559/**
Komal Seelam2510b582016-08-22 18:43:28 +05302560 * __hif_check_link_status() - API to check if PCIe link is active/not
2561 * @scn: HIF Context
2562 *
2563 * API reads the PCIe config space to verify if PCIe link training is
2564 * successful or not.
2565 *
2566 * Return: Success/Failure
2567 */
2568static int __hif_check_link_status(struct hif_softc *scn)
2569{
jiad8245c032018-01-03 12:35:39 +08002570 uint16_t dev_id = 0;
Komal Seelam2510b582016-08-22 18:43:28 +05302571 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2572 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2573
2574 if (!sc) {
2575 HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2576 return -EINVAL;
2577 }
2578
2579 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2580
2581 if (dev_id == sc->devid)
2582 return 0;
2583
2584 HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2585 __func__, dev_id);
2586
2587 scn->recovery = true;
2588
2589 if (cbk && cbk->set_recovery_in_progress)
2590 cbk->set_recovery_in_progress(cbk->context, true);
2591 else
2592 HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2593
2594 pld_is_pci_link_down(sc->dev);
2595 return -EACCES;
2596}
2597
Dustin Brown782a07e2016-12-07 14:14:24 -08002598/**
2599 * hif_pci_bus_resume(): prepare hif for resume
2600 *
Dustin Brownda4fb2a2018-11-19 12:08:38 -08002601 * Return: Errno
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002602 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002603int hif_pci_bus_resume(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002604{
Dustin Brown13835b12019-03-25 13:34:35 -07002605 int errno;
2606
2607 errno = __hif_check_link_status(scn);
2608 if (errno)
2609 return errno;
2610
2611 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2612
2613 return 0;
Dustin Brown782a07e2016-12-07 14:14:24 -08002614}
2615
2616/**
2617 * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2618 * @scn: hif context
2619 *
Jeff Johnson2d821eb2018-05-06 16:25:49 -07002620 * Ensure that if we received the wakeup message before the irq
Dustin Brown782a07e2016-12-07 14:14:24 -08002621 * was disabled that the message is pocessed before suspending.
2622 *
2623 * Return: -EBUSY if we fail to flush the tasklets.
2624 */
2625int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2626{
Dustin Brown782a07e2016-12-07 14:14:24 -08002627 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2628 qdf_atomic_set(&scn->link_suspended, 1);
2629
Dustin Brown13835b12019-03-25 13:34:35 -07002630 hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2631
Dustin Brown782a07e2016-12-07 14:14:24 -08002632 return 0;
2633}
2634
2635/**
2636 * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2637 * @scn: hif context
2638 *
Jeff Johnson2d821eb2018-05-06 16:25:49 -07002639 * Ensure that if we received the wakeup message before the irq
Dustin Brown782a07e2016-12-07 14:14:24 -08002640 * was disabled that the message is pocessed before suspending.
2641 *
2642 * Return: -EBUSY if we fail to flush the tasklets.
2643 */
2644int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2645{
Dustin Brown13835b12019-03-25 13:34:35 -07002646 hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2647
Dustin Brown782a07e2016-12-07 14:14:24 -08002648 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2649 qdf_atomic_set(&scn->link_suspended, 0);
2650
2651 return 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002652}
2653
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002654#ifdef FEATURE_RUNTIME_PM
2655/**
2656 * __hif_runtime_pm_set_state(): utility function
2657 * @state: state to set
2658 *
2659 * indexes into the runtime pm state and sets it.
2660 */
Komal Seelam644263d2016-02-22 20:45:49 +05302661static void __hif_runtime_pm_set_state(struct hif_softc *scn,
Komal Seelamf8600682016-02-02 18:17:13 +05302662 enum hif_pm_runtime_state state)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002663{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002664 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002665
Jeff Johnson8d639a02019-03-18 09:51:11 -07002666 if (!sc) {
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002667 HIF_ERROR("%s: HIF_CTX not initialized",
2668 __func__);
2669 return;
2670 }
2671
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302672 qdf_atomic_set(&sc->pm_state, state);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002673}
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002674
2675/**
2676 * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2677 *
2678 * Notify hif that a runtime pm opperation has started
2679 */
Komal Seelam644263d2016-02-22 20:45:49 +05302680static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002681{
Komal Seelamf8600682016-02-02 18:17:13 +05302682 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002683}
2684
2685/**
2686 * hif_runtime_pm_set_state_on(): adjust runtime pm state
2687 *
2688 * Notify hif that a the runtime pm state should be on
2689 */
Komal Seelam644263d2016-02-22 20:45:49 +05302690static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002691{
Komal Seelamf8600682016-02-02 18:17:13 +05302692 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002693}
2694
2695/**
2696 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
2697 *
2698 * Notify hif that a runtime suspend attempt has been completed successfully
2699 */
Komal Seelam644263d2016-02-22 20:45:49 +05302700static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002701{
Komal Seelamf8600682016-02-02 18:17:13 +05302702 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08002703}
2704
Houston Hoffman692cc052015-11-10 18:42:47 -08002705/**
2706 * hif_log_runtime_suspend_success() - log a successful runtime suspend
2707 */
Komal Seelam644263d2016-02-22 20:45:49 +05302708static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002709{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002710 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002711
Jeff Johnson8d639a02019-03-18 09:51:11 -07002712 if (!sc)
Houston Hoffman692cc052015-11-10 18:42:47 -08002713 return;
2714
2715 sc->pm_stats.suspended++;
2716 sc->pm_stats.suspend_jiffies = jiffies;
2717}
2718
2719/**
2720 * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2721 *
2722 * log a failed runtime suspend
2723 * mark last busy to prevent immediate runtime suspend
2724 */
Komal Seelamf8600682016-02-02 18:17:13 +05302725static void hif_log_runtime_suspend_failure(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002726{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002727 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002728
Jeff Johnson8d639a02019-03-18 09:51:11 -07002729 if (!sc)
Houston Hoffman692cc052015-11-10 18:42:47 -08002730 return;
2731
2732 sc->pm_stats.suspend_err++;
Houston Hoffman692cc052015-11-10 18:42:47 -08002733}
2734
2735/**
2736 * hif_log_runtime_resume_success() - log a successful runtime resume
2737 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07002738 * log a successful runtime resume
Houston Hoffman692cc052015-11-10 18:42:47 -08002739 * mark last busy to prevent immediate runtime suspend
2740 */
Komal Seelamf8600682016-02-02 18:17:13 +05302741static void hif_log_runtime_resume_success(void *hif_ctx)
Houston Hoffman692cc052015-11-10 18:42:47 -08002742{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002743 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07002744
Jeff Johnson8d639a02019-03-18 09:51:11 -07002745 if (!sc)
Houston Hoffman692cc052015-11-10 18:42:47 -08002746 return;
2747
2748 sc->pm_stats.resumed++;
Houston Hoffman78467a82016-01-05 20:08:56 -08002749}
2750
2751/**
2752 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2753 *
2754 * Record the failure.
2755 * mark last busy to delay a retry.
2756 * adjust the runtime_pm state.
2757 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302758void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002759{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002760 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08002761
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002762 hif_log_runtime_suspend_failure(hif_ctx);
Yue Maac6b2752019-05-08 17:17:12 -07002763 hif_pm_runtime_mark_last_busy(hif_ctx);
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002764 hif_runtime_pm_set_state_on(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002765}
2766
2767/**
2768 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2769 *
2770 * Makes sure that the pci link will be taken down by the suspend opperation.
2771 * If the hif layer is configured to leave the bus on, runtime suspend will
2772 * not save any power.
2773 *
2774 * Set the runtime suspend state to in progress.
2775 *
2776 * return -EINVAL if the bus won't go down. otherwise return 0
2777 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302778int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002779{
Komal Seelam644263d2016-02-22 20:45:49 +05302780 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2781
Komal Seelamf8600682016-02-02 18:17:13 +05302782 if (!hif_can_suspend_link(hif_ctx)) {
Houston Hoffman78467a82016-01-05 20:08:56 -08002783 HIF_ERROR("Runtime PM not supported for link up suspend");
2784 return -EINVAL;
2785 }
2786
Komal Seelam644263d2016-02-22 20:45:49 +05302787 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002788 return 0;
2789}
2790
2791/**
2792 * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2793 *
2794 * Record the success.
2795 * adjust the runtime_pm state
2796 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302797void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002798{
Komal Seelam644263d2016-02-22 20:45:49 +05302799 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2800
2801 hif_runtime_pm_set_state_suspended(scn);
Yue Mafab77ad2019-06-25 18:19:47 -07002802 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
Komal Seelam644263d2016-02-22 20:45:49 +05302803 hif_log_runtime_suspend_success(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002804}
2805
2806/**
2807 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2808 *
2809 * update the runtime pm state.
2810 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302811void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002812{
Komal Seelam644263d2016-02-22 20:45:49 +05302813 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2814
Yue Mafab77ad2019-06-25 18:19:47 -07002815 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
Komal Seelam644263d2016-02-22 20:45:49 +05302816 hif_runtime_pm_set_state_inprogress(scn);
Houston Hoffman78467a82016-01-05 20:08:56 -08002817}
2818
2819/**
2820 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2821 *
2822 * record the success.
2823 * adjust the runtime_pm state
2824 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302825void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
Houston Hoffman78467a82016-01-05 20:08:56 -08002826{
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002827 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffman78467a82016-01-05 20:08:56 -08002828
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002829 hif_log_runtime_resume_success(hif_ctx);
Yue Maac6b2752019-05-08 17:17:12 -07002830 hif_pm_runtime_mark_last_busy(hif_ctx);
Houston Hoffmanb21a0532016-03-14 21:12:12 -07002831 hif_runtime_pm_set_state_on(scn);
Houston Hoffman692cc052015-11-10 18:42:47 -08002832}
Houston Hoffman692cc052015-11-10 18:42:47 -08002833
Houston Hoffman1688fba2015-11-10 16:47:27 -08002834/**
2835 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2836 *
2837 * Return: 0 for success and non-zero error code for failure
2838 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302839int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08002840{
Dustin Brown6834d322017-03-20 15:02:48 -07002841 int errno;
Dustin Brown782a07e2016-12-07 14:14:24 -08002842
Dustin Brown6834d322017-03-20 15:02:48 -07002843 errno = hif_bus_suspend(hif_ctx);
2844 if (errno) {
2845 HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
2846 return errno;
2847 }
Dustin Brown782a07e2016-12-07 14:14:24 -08002848
Dustin Brown6834d322017-03-20 15:02:48 -07002849 errno = hif_apps_irqs_disable(hif_ctx);
2850 if (errno) {
2851 HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
Dustin Brown782a07e2016-12-07 14:14:24 -08002852 goto bus_resume;
Dustin Brown6834d322017-03-20 15:02:48 -07002853 }
2854
2855 errno = hif_bus_suspend_noirq(hif_ctx);
2856 if (errno) {
2857 HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
2858 goto irqs_enable;
2859 }
2860
2861 /* link should always be down; skip enable wake irq */
Dustin Brown782a07e2016-12-07 14:14:24 -08002862
2863 return 0;
2864
Dustin Brown62af8f32017-03-30 10:39:49 -07002865irqs_enable:
Dustin Brown6834d322017-03-20 15:02:48 -07002866 QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2867
2868bus_resume:
2869 QDF_BUG(!hif_bus_resume(hif_ctx));
2870
Dustin Brown62af8f32017-03-30 10:39:49 -07002871 return errno;
Houston Hoffman1688fba2015-11-10 16:47:27 -08002872}
2873
Houston Hoffmanf4607852015-12-17 17:14:40 -08002874/**
2875 * hif_fastpath_resume() - resume fastpath for runtimepm
2876 *
2877 * ensure that the fastpath write index register is up to date
2878 * since runtime pm may cause ce_send_fast to skip the register
2879 * write.
Yun Park3fb36442017-08-17 17:37:53 -07002880 *
2881 * fastpath only applicable to legacy copy engine
Houston Hoffmanf4607852015-12-17 17:14:40 -08002882 */
Yue Ma9c6f84d2017-02-17 10:45:55 -08002883void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08002884{
Komal Seelam644263d2016-02-22 20:45:49 +05302885 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002886 struct CE_state *ce_state;
2887
2888 if (!scn)
2889 return;
2890
2891 if (scn->fastpath_mode_on) {
Houston Hoffmanfaf8ab52016-07-01 04:22:40 -07002892 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2893 return;
Houston Hoffmanf4607852015-12-17 17:14:40 -08002894
Houston Hoffmanfaf8ab52016-07-01 04:22:40 -07002895 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2896 qdf_spin_lock_bh(&ce_state->ce_index_lock);
2897
2898 /*war_ce_src_ring_write_idx_set */
2899 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2900 ce_state->src_ring->write_index);
2901 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2902 Q_TARGET_ACCESS_END(scn);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002903 }
2904}
Houston Hoffmanf4607852015-12-17 17:14:40 -08002905
Houston Hoffman1688fba2015-11-10 16:47:27 -08002906/**
2907 * hif_runtime_resume() - do the bus resume part of a runtime resume
2908 *
2909 * Return: 0 for success and non-zero error code for failure
2910 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302911int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
Houston Hoffman1688fba2015-11-10 16:47:27 -08002912{
Dustin Brown6834d322017-03-20 15:02:48 -07002913 /* link should always be down; skip disable wake irq */
Dustin Brown782a07e2016-12-07 14:14:24 -08002914
Dustin Brown6834d322017-03-20 15:02:48 -07002915 QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
2916 QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2917 QDF_BUG(!hif_bus_resume(hif_ctx));
Dustin Brown782a07e2016-12-07 14:14:24 -08002918 return 0;
Houston Hoffman1688fba2015-11-10 16:47:27 -08002919}
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002920#endif /* #ifdef FEATURE_RUNTIME_PM */
Houston Hoffman1688fba2015-11-10 16:47:27 -08002921
Komal Seelamaa72bb72016-02-01 17:22:50 +05302922#if CONFIG_PCIE_64BIT_MSI
Komal Seelam644263d2016-02-22 20:45:49 +05302923static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05302924{
2925 struct hif_pci_softc *sc = scn->hif_sc;
2926 struct hif_msi_info *info = &sc->msi_info;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302927 struct device *dev = scn->qdf_dev->dev;
Komal Seelamaa72bb72016-02-01 17:22:50 +05302928
Vishwajith Upendra3f78aa62016-02-09 17:53:02 +05302929 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2930 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
Komal Seelamaa72bb72016-02-01 17:22:50 +05302931 info->magic = NULL;
2932 info->magic_dma = 0;
2933}
2934#else
Komal Seelam644263d2016-02-22 20:45:49 +05302935static void hif_free_msi_ctx(struct hif_softc *scn)
Komal Seelamaa72bb72016-02-01 17:22:50 +05302936{
2937}
2938#endif
2939
Houston Hoffman8f239f62016-03-14 21:12:05 -07002940void hif_pci_disable_isr(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002941{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302942 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002943
Houston Hoffmandef86a32017-04-21 20:23:45 -07002944 hif_exec_kill(&scn->osc);
Komal Seelam644263d2016-02-22 20:45:49 +05302945 hif_nointrs(scn);
Komal Seelamaa72bb72016-02-01 17:22:50 +05302946 hif_free_msi_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002947 /* Cancel the pending tasklet */
Komal Seelam644263d2016-02-22 20:45:49 +05302948 ce_tasklet_kill(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002949 tasklet_kill(&sc->intr_tq);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302950 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
Venkateswara Swamy Bandaru31108f32016-08-08 18:04:29 +05302951 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002952}
2953
2954/* Function to reset SoC */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002955void hif_pci_reset_soc(struct hif_softc *hif_sc)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002956{
Houston Hoffman4ca03b62016-03-14 21:11:51 -07002957 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2958 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
Komal Seelam644263d2016-02-22 20:45:49 +05302959 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002960
2961#if defined(CPU_WARM_RESET_WAR)
2962 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
2963 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2964 * verified for AR9888_REV1
2965 */
Komal Seelam91553ce2016-01-27 18:57:10 +05302966 if (tgt_info->target_version == AR9888_REV2_VERSION)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002967 hif_pci_device_warm_reset(sc);
Komal Seelam91553ce2016-01-27 18:57:10 +05302968 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002969 hif_pci_device_reset(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002970#else
2971 hif_pci_device_reset(sc);
2972#endif
2973}
2974
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002975#ifdef CONFIG_PCI_MSM
2976static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2977{
2978 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2979 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2980}
2981#else
2982static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2983#endif
2984
Komal Seelambd7c51d2016-02-24 10:27:30 +05302985/**
2986 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2987 * @sc: HIF PCIe Context
2988 *
2989 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2990 *
2991 * Return: Failure to caller
2992 */
2993static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2994{
jiad8245c032018-01-03 12:35:39 +08002995 uint16_t val = 0;
2996 uint32_t bar = 0;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302997 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2998 struct hif_softc *scn = HIF_GET_SOFTC(sc);
2999 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3000 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
Komal Seelam75080122016-03-02 15:18:25 +05303001 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303002 A_target_id_t pci_addr = scn->mem;
3003
3004 HIF_ERROR("%s: keep_awake_count = %d",
3005 __func__, hif_state->keep_awake_count);
3006
3007 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3008
3009 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3010
3011 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3012
3013 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3014
3015 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3016
3017 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3018
3019 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3020
3021 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3022
3023 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3024
3025 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3026
3027 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303028 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
Komal Seelambd7c51d2016-02-24 10:27:30 +05303029 PCIE_SOC_WAKE_ADDRESS));
3030
3031 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303032 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
Komal Seelambd7c51d2016-02-24 10:27:30 +05303033 RTC_STATE_ADDRESS));
3034
3035 HIF_ERROR("%s:error, wakeup target", __func__);
3036 hif_msm_pcie_debug_info(sc);
3037
3038 if (!cfg->enable_self_recovery)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303039 QDF_BUG(0);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303040
3041 scn->recovery = true;
3042
3043 if (cbk->set_recovery_in_progress)
3044 cbk->set_recovery_in_progress(cbk->context, true);
3045
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003046 pld_is_pci_link_down(sc->dev);
Komal Seelambd7c51d2016-02-24 10:27:30 +05303047 return -EACCES;
3048}
3049
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003050/*
3051 * For now, we use simple on-demand sleep/wake.
3052 * Some possible improvements:
3053 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3054 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3055 * Careful, though, these functions may be used by
3056 * interrupt handlers ("atomic")
3057 * -Don't use host_reg_table for this code; instead use values directly
3058 * -Use a separate timer to track activity and allow Target to sleep only
3059 * if it hasn't done anything for a while; may even want to delay some
3060 * processing for a short while in order to "batch" (e.g.) transmit
3061 * requests with completion processing into "windows of up time". Costs
3062 * some performance, but improves power utilization.
3063 * -On some platforms, it might be possible to eliminate explicit
3064 * sleep/wakeup. Instead, take a chance that each access works OK. If not,
3065 * recover from the failure by forcing the Target awake.
3066 * -Change keep_awake_count to an atomic_t in order to avoid spin lock
3067 * overhead in some cases. Perhaps this makes more sense when
3068 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3069 * disabled.
3070 * -It is possible to compile this code out and simply force the Target
3071 * to remain awake. That would yield optimal performance at the cost of
3072 * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3073 *
3074 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3075 */
3076/**
3077 * hif_target_sleep_state_adjust() - on-demand sleep/wake
Komal Seelam644263d2016-02-22 20:45:49 +05303078 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003079 * @sleep_ok: bool
3080 * @wait_for_it: bool
3081 *
3082 * Output the pipe error counts of each pipe to log file
3083 *
3084 * Return: int
3085 */
Houston Hoffman4ca03b62016-03-14 21:11:51 -07003086int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003087 bool sleep_ok, bool wait_for_it)
3088{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303089 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003090 A_target_id_t pci_addr = scn->mem;
3091 static int max_delay;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303092 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003093 static int debug;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003094 if (scn->recovery)
3095 return -EACCES;
3096
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303097 if (qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003098 HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3099 debug = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303100 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003101 return -EACCES;
3102 }
3103
3104 if (debug) {
3105 wait_for_it = true;
3106 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3107 __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303108 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003109 }
3110
3111 if (sleep_ok) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303112 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003113 hif_state->keep_awake_count--;
3114 if (hif_state->keep_awake_count == 0) {
3115 /* Allow sleep */
3116 hif_state->verified_awake = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303117 hif_state->sleep_ticks = qdf_system_ticks();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003118 }
3119 if (hif_state->fake_sleep == false) {
3120 /* Set the Fake Sleep */
3121 hif_state->fake_sleep = true;
3122
3123 /* Start the Sleep Timer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303124 qdf_timer_stop(&hif_state->sleep_timer);
3125 qdf_timer_start(&hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003126 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3127 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303128 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003129 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303130 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003131
3132 if (hif_state->fake_sleep) {
3133 hif_state->verified_awake = true;
3134 } else {
3135 if (hif_state->keep_awake_count == 0) {
3136 /* Force AWAKE */
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303137 hif_write32_mb(sc, pci_addr +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003138 PCIE_LOCAL_BASE_ADDRESS +
3139 PCIE_SOC_WAKE_ADDRESS,
3140 PCIE_SOC_WAKE_V_MASK);
3141 }
3142 }
3143 hif_state->keep_awake_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303144 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003145
3146 if (wait_for_it && !hif_state->verified_awake) {
3147#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
3148 int tot_delay = 0;
3149 int curr_delay = 5;
3150
3151 for (;; ) {
3152 if (hif_targ_is_awake(scn, pci_addr)) {
3153 hif_state->verified_awake = true;
3154 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003155 }
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07003156 if (!hif_pci_targ_is_present(scn, pci_addr))
3157 break;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303158 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3159 return hif_log_soc_wakeup_timeout(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003160
3161 OS_DELAY(curr_delay);
3162 tot_delay += curr_delay;
3163
3164 if (curr_delay < 50)
3165 curr_delay += 5;
3166 }
3167
3168 /*
3169 * NB: If Target has to come out of Deep Sleep,
3170 * this may take a few Msecs. Typically, though
3171 * this delay should be <30us.
3172 */
3173 if (tot_delay > max_delay)
3174 max_delay = tot_delay;
3175 }
3176 }
3177
3178 if (debug && hif_state->verified_awake) {
3179 debug = 0;
3180 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3181 __func__,
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303182 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003183 PCIE_INTR_ENABLE_ADDRESS),
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303184 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003185 PCIE_INTR_CAUSE_ADDRESS),
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303186 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003187 CPU_INTR_ADDRESS),
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303188 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003189 PCIE_INTR_CLR_ADDRESS),
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303190 hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003191 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3192 }
3193
3194 return 0;
3195}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003196
3197#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303198uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003199{
3200 uint32_t value;
3201 void *addr;
3202
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003203 addr = scn->mem + offset;
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303204 value = hif_read32_mb(scn, addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003205
3206 {
3207 unsigned long irq_flags;
3208 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3209
3210 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3211 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3212 pcie_access_log[idx].is_write = false;
3213 pcie_access_log[idx].addr = addr;
3214 pcie_access_log[idx].value = value;
3215 pcie_access_log_seqnum++;
3216 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3217 }
3218
3219 return value;
3220}
3221
3222void
Komal Seelam644263d2016-02-22 20:45:49 +05303223hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003224{
3225 void *addr;
3226
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003227 addr = scn->mem + (offset);
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303228 hif_write32_mb(scn, addr, value);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003229
3230 {
3231 unsigned long irq_flags;
3232 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3233
3234 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3235 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3236 pcie_access_log[idx].is_write = true;
3237 pcie_access_log[idx].addr = addr;
3238 pcie_access_log[idx].value = value;
3239 pcie_access_log_seqnum++;
3240 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3241 }
3242}
3243
3244/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003245 * hif_target_dump_access_log() - dump access log
3246 *
3247 * dump access log
3248 *
3249 * Return: n/a
3250 */
3251void hif_target_dump_access_log(void)
3252{
3253 int idx, len, start_idx, cur_idx;
3254 unsigned long irq_flags;
3255
3256 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3257 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3258 len = PCIE_ACCESS_LOG_NUM;
3259 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3260 } else {
3261 len = pcie_access_log_seqnum;
3262 start_idx = 0;
3263 }
3264
3265 for (idx = 0; idx < len; idx++) {
3266 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
Jeff Johnsonb9450212017-09-18 10:12:38 -07003267 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003268 __func__, idx,
3269 pcie_access_log[cur_idx].seqnum,
3270 pcie_access_log[cur_idx].is_write,
3271 pcie_access_log[cur_idx].addr,
3272 pcie_access_log[cur_idx].value);
3273 }
3274
3275 pcie_access_log_seqnum = 0;
3276 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3277}
3278#endif
3279
Houston Hoffman3db96a42016-05-05 19:54:39 -07003280#ifndef HIF_AHB
3281int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3282{
3283 QDF_BUG(0);
3284 return -EINVAL;
3285}
Houston Hoffman471e9a02016-08-23 11:40:19 -07003286
3287int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3288{
3289 QDF_BUG(0);
3290 return -EINVAL;
3291}
Houston Hoffman3db96a42016-05-05 19:54:39 -07003292#endif
3293
Jeff Johnsonbf8ed0a2016-12-17 16:31:03 -08003294static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
Houston Hoffman15010772016-09-16 14:01:13 -07003295{
3296 struct ce_tasklet_entry *tasklet_entry = context;
3297 return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3298}
3299extern const char *ce_name[];
3300
Dustin Brown6834d322017-03-20 15:02:48 -07003301static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3302{
3303 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3304
3305 return pci_scn->ce_msi_irq_num[ce_id];
3306}
3307
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003308/* hif_srng_msi_irq_disable() - disable the irq for msi
3309 * @hif_sc: hif context
3310 * @ce_id: which ce to disable copy complete interrupts for
3311 *
3312 * since MSI interrupts are not level based, the system can function
3313 * without disabling these interrupts. Interrupt mitigation can be
3314 * added here for better system performance.
3315 */
3316static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003317{
Dustin Brown6834d322017-03-20 15:02:48 -07003318 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003319}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003320
3321static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003322{
Dustin Brown6834d322017-03-20 15:02:48 -07003323 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003324}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003325
3326static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
Nirav Shah59b6c452018-08-09 18:12:23 +05303327{
3328 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3329}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003330
3331static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
Nirav Shah59b6c452018-08-09 18:12:23 +05303332{
3333 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3334}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003335
3336static int hif_ce_msi_configure_irq(struct hif_softc *scn)
Houston Hoffman15010772016-09-16 14:01:13 -07003337{
3338 int ret;
3339 int ce_id, irq;
3340 uint32_t msi_data_start;
3341 uint32_t msi_data_count;
3342 uint32_t msi_irq_start;
3343 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003344 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
Houston Hoffman15010772016-09-16 14:01:13 -07003345
Dustin Brown2af3d672017-05-30 16:14:01 -07003346 /* do wake irq assignment */
3347 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3348 &msi_data_count, &msi_data_start,
3349 &msi_irq_start);
3350 if (ret)
3351 return ret;
3352
3353 scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
Yue Mae933eff2019-06-05 12:51:32 -07003354 ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler,
3355 IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
Dustin Brown2af3d672017-05-30 16:14:01 -07003356 if (ret)
3357 return ret;
3358
3359 /* do ce irq assignments */
Houston Hoffman15010772016-09-16 14:01:13 -07003360 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3361 &msi_data_count, &msi_data_start,
3362 &msi_irq_start);
Houston Hoffman15010772016-09-16 14:01:13 -07003363 if (ret)
Dustin Brown2af3d672017-05-30 16:14:01 -07003364 goto free_wake_irq;
Houston Hoffman15010772016-09-16 14:01:13 -07003365
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003366 if (ce_srng_based(scn)) {
Dustin Brown6834d322017-03-20 15:02:48 -07003367 scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3368 scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003369 } else {
Dustin Brown6834d322017-03-20 15:02:48 -07003370 scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3371 scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003372 }
3373
Dustin Brown6834d322017-03-20 15:02:48 -07003374 scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3375
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003376 /* needs to match the ce_id -> irq data mapping
3377 * used in the srng parameter configuration
3378 */
3379 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3380 unsigned int msi_data = (ce_id % msi_data_count) +
3381 msi_irq_start;
3382 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
Jeff Johnsonb9450212017-09-18 10:12:38 -07003383 HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003384 __func__, ce_id, msi_data, irq,
3385 &ce_sc->tasklets[ce_id]);
3386
Houston Hoffman648a9182017-05-21 23:27:50 -07003387 /* implies the ce is also initialized */
3388 if (!ce_sc->tasklets[ce_id].inited)
3389 continue;
3390
Houston Hoffman5caa32f2016-12-21 14:11:38 -08003391 pci_sc->ce_msi_irq_num[ce_id] = irq;
3392 ret = request_irq(irq, hif_ce_interrupt_handler,
3393 IRQF_SHARED,
3394 ce_name[ce_id],
3395 &ce_sc->tasklets[ce_id]);
3396 if (ret)
3397 goto free_irq;
3398 }
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003399
Houston Hoffman15010772016-09-16 14:01:13 -07003400 return ret;
3401
3402free_irq:
3403 /* the request_irq for the last ce_id failed so skip it. */
3404 while (ce_id > 0 && ce_id < scn->ce_count) {
3405 unsigned int msi_data;
3406
3407 ce_id--;
3408 msi_data = (ce_id % msi_data_count) + msi_data_start;
3409 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3410 free_irq(irq, &ce_sc->tasklets[ce_id]);
3411 }
Dustin Brown2af3d672017-05-30 16:14:01 -07003412
3413free_wake_irq:
3414 free_irq(scn->wake_irq, scn->qdf_dev->dev);
3415 scn->wake_irq = 0;
3416
Houston Hoffman15010772016-09-16 14:01:13 -07003417 return ret;
3418}
3419
Houston Hoffman648a9182017-05-21 23:27:50 -07003420static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3421{
3422 int i;
3423
3424 for (i = 0; i < hif_ext_group->numirq; i++)
3425 disable_irq_nosync(hif_ext_group->os_irq[i]);
3426}
3427
3428static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3429{
3430 int i;
3431
3432 for (i = 0; i < hif_ext_group->numirq; i++)
3433 enable_irq(hif_ext_group->os_irq[i]);
3434}
3435
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05303436/**
3437 * hif_pci_get_irq_name() - get irqname
3438 * This function gives irqnumber to irqname
3439 * mapping.
3440 *
3441 * @irq_no: irq number
3442 *
3443 * Return: irq name
3444 */
3445const char *hif_pci_get_irq_name(int irq_no)
3446{
3447 return "pci-dummy";
3448}
Houston Hoffman648a9182017-05-21 23:27:50 -07003449
3450int hif_pci_configure_grp_irq(struct hif_softc *scn,
3451 struct hif_exec_context *hif_ext_group)
3452{
3453 int ret = 0;
3454 int irq = 0;
3455 int j;
3456
3457 hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3458 hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05303459 hif_ext_group->irq_name = &hif_pci_get_irq_name;
Houston Hoffman648a9182017-05-21 23:27:50 -07003460 hif_ext_group->work_complete = &hif_dummy_grp_done;
3461
3462 for (j = 0; j < hif_ext_group->numirq; j++) {
3463 irq = hif_ext_group->irq[j];
3464
3465 HIF_DBG("%s: request_irq = %d for grp %d",
3466 __func__, irq, hif_ext_group->grp_id);
3467 ret = request_irq(irq,
3468 hif_ext_group_interrupt_handler,
3469 IRQF_SHARED, "wlan_EXT_GRP",
3470 hif_ext_group);
3471 if (ret) {
3472 HIF_ERROR("%s: request_irq failed ret = %d",
3473 __func__, ret);
3474 return -EFAULT;
3475 }
3476 hif_ext_group->os_irq[j] = irq;
3477 }
3478 hif_ext_group->irq_requested = true;
3479 return 0;
3480}
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003481
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003482/**
Houston Hoffman3db96a42016-05-05 19:54:39 -07003483 * hif_configure_irq() - configure interrupt
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003484 *
3485 * This function configures interrupt(s)
3486 *
3487 * @sc: PCIe control struct
3488 * @hif_hdl: struct HIF_CE_state
3489 *
3490 * Return: 0 - for success
3491 */
Komal Seelam644263d2016-02-22 20:45:49 +05303492int hif_configure_irq(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003493{
3494 int ret = 0;
Komal Seelam644263d2016-02-22 20:45:49 +05303495 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003496
3497 HIF_TRACE("%s: E", __func__);
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05303498
3499 if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05303500 scn->request_irq_done = false;
3501 return 0;
3502 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003503
Komal Seelamaa72bb72016-02-01 17:22:50 +05303504 hif_init_reschedule_tasklet_work(sc);
3505
Houston Hoffman3aa074f2016-11-23 11:53:25 -08003506 ret = hif_ce_msi_configure_irq(scn);
Houston Hoffman15010772016-09-16 14:01:13 -07003507 if (ret == 0) {
3508 goto end;
3509 }
3510
Houston Hoffman3db96a42016-05-05 19:54:39 -07003511 switch (scn->target_info.target_type) {
3512 case TARGET_TYPE_IPQ4019:
3513 ret = hif_ahb_configure_legacy_irq(sc);
3514 break;
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303515 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05303516 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05303517 case TARGET_TYPE_QCA6018:
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303518 ret = hif_ahb_configure_irq(sc);
3519 break;
Houston Hoffman3db96a42016-05-05 19:54:39 -07003520 default:
3521 ret = hif_pci_configure_legacy_irq(sc);
3522 break;
3523 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003524 if (ret < 0) {
3525 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3526 __func__, ret);
3527 return ret;
3528 }
3529end:
3530 scn->request_irq_done = true;
3531 return 0;
3532}
3533
3534/**
Sathyanarayanan Esakkiappanfa1ddd52018-09-06 12:43:36 +05303535 * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3536 * @scn: hif control structure
3537 *
3538 * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3539 * stuck at a polling loop in pcie_address_config in FW
3540 *
3541 * Return: none
3542 */
3543static void hif_trigger_timer_irq(struct hif_softc *scn)
3544{
3545 int tmp;
3546 /* Trigger IRQ on Peregrine/Swift by setting
3547 * IRQ Bit of LF_TIMER 0
3548 */
3549 tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3550 SOC_LF_TIMER_STATUS0_ADDRESS));
3551 /* Set Raw IRQ Bit */
3552 tmp |= 1;
3553 /* SOC_LF_TIMER_STATUS0 */
3554 hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3555 SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3556}
3557
3558/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003559 * hif_target_sync() : ensure the target is ready
Jeff Johnson1002ca52018-05-12 11:29:24 -07003560 * @scn: hif control structure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003561 *
3562 * Informs fw that we plan to use legacy interupts so that
3563 * it can begin booting. Ensures that the fw finishes booting
3564 * before continuing. Should be called before trying to write
3565 * to the targets other registers for the first time.
3566 *
3567 * Return: none
3568 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07003569static void hif_target_sync(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003570{
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303571 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
narayan8235eb52018-07-18 12:40:55 +05303572 PCIE_INTR_ENABLE_ADDRESS),
3573 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3574 /* read to flush pcie write */
3575 (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3576 PCIE_INTR_ENABLE_ADDRESS));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003577
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303578 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003579 PCIE_SOC_WAKE_ADDRESS,
3580 PCIE_SOC_WAKE_V_MASK);
3581 while (!hif_targ_is_awake(scn, scn->mem))
3582 ;
3583
3584 if (HAS_FW_INDICATOR) {
3585 int wait_limit = 500;
3586 int fw_ind = 0;
Sathyanarayanan Esakkiappanfa1ddd52018-09-06 12:43:36 +05303587 int retry_count = 0;
3588 uint32_t target_type = scn->target_info.target_type;
3589fw_retry:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003590 HIF_TRACE("%s: Loop checking FW signal", __func__);
3591 while (1) {
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303592 fw_ind = hif_read32_mb(scn, scn->mem +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003593 FW_INDICATOR_ADDRESS);
3594 if (fw_ind & FW_IND_INITIALIZED)
3595 break;
3596 if (wait_limit-- < 0)
3597 break;
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303598 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
narayan8235eb52018-07-18 12:40:55 +05303599 PCIE_INTR_ENABLE_ADDRESS),
3600 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3601 /* read to flush pcie write */
3602 (void)hif_read32_mb(scn, scn->mem +
3603 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003604
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303605 qdf_mdelay(10);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003606 }
Sathyanarayanan Esakkiappanfa1ddd52018-09-06 12:43:36 +05303607 if (wait_limit < 0) {
3608 if (target_type == TARGET_TYPE_AR9888 &&
3609 retry_count++ < 2) {
3610 hif_trigger_timer_irq(scn);
3611 wait_limit = 500;
3612 goto fw_retry;
3613 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003614 HIF_TRACE("%s: FW signal timed out",
3615 __func__);
Sathyanarayanan Esakkiappanfa1ddd52018-09-06 12:43:36 +05303616 qdf_assert_always(0);
3617 } else {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003618 HIF_TRACE("%s: Got FW signal, retries = %x",
3619 __func__, 500-wait_limit);
Sathyanarayanan Esakkiappanfa1ddd52018-09-06 12:43:36 +05303620 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003621 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303622 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003623 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3624}
3625
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303626static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3627 struct device *dev)
Houston Hoffmand0620a32016-11-09 20:44:56 -08003628{
3629 struct pld_soc_info info;
3630
3631 pld_get_soc_info(dev, &info);
3632 sc->mem = info.v_addr;
3633 sc->ce_sc.ol_sc.mem = info.v_addr;
3634 sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3635}
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303636
3637static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3638 struct device *dev)
Houston Hoffmand0620a32016-11-09 20:44:56 -08003639{}
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303640
Yu Ouyang59364952019-04-22 15:04:54 +08003641static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3642 int device_id)
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303643{
Yu Ouyang59364952019-04-22 15:04:54 +08003644 if (!pld_have_platform_driver_support(sc->dev))
3645 return false;
3646
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303647 switch (device_id) {
3648 case QCA6290_DEVICE_ID:
3649 case QCA6290_EMULATION_DEVICE_ID:
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05303650#ifdef QCA_WIFI_QCA6390
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303651 case QCA6390_DEVICE_ID:
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05303652#endif
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303653 case AR6320_DEVICE_ID:
Nirav Shah59b6c452018-08-09 18:12:23 +05303654 case QCN7605_DEVICE_ID:
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303655 return true;
3656 }
3657 return false;
3658}
3659
3660static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3661 int device_id)
3662{
Yu Ouyang59364952019-04-22 15:04:54 +08003663 if (hif_is_pld_based_target(sc, device_id)) {
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303664 sc->hif_enable_pci = hif_enable_pci_pld;
3665 sc->hif_pci_deinit = hif_pci_deinit_pld;
3666 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3667 } else {
3668 sc->hif_enable_pci = hif_enable_pci_nopld;
3669 sc->hif_pci_deinit = hif_pci_deinit_nopld;
3670 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3671 }
3672}
Houston Hoffmand0620a32016-11-09 20:44:56 -08003673
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303674#ifdef HIF_REG_WINDOW_SUPPORT
3675static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3676 u32 target_type)
3677{
3678 switch (target_type) {
3679 case TARGET_TYPE_QCN7605:
3680 sc->use_register_windowing = true;
3681 qdf_spinlock_create(&sc->register_access_lock);
3682 sc->register_window = 0;
3683 break;
3684 default:
3685 sc->use_register_windowing = false;
3686 }
3687}
3688#else
3689static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3690 u32 target_type)
3691{
3692 sc->use_register_windowing = false;
3693}
3694#endif
3695
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003696/**
3697 * hif_enable_bus(): enable bus
3698 *
3699 * This function enables the bus
3700 *
3701 * @ol_sc: soft_sc struct
3702 * @dev: device pointer
3703 * @bdev: bus dev pointer
3704 * bid: bus id pointer
3705 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303706 * Return: QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003707 */
Houston Hoffman8f239f62016-03-14 21:12:05 -07003708QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003709 struct device *dev, void *bdev,
Manikandan Mohanbd0ef8a2017-04-10 13:10:21 -07003710 const struct hif_bus_id *bid,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003711 enum hif_enable_type type)
3712{
3713 int ret = 0;
3714 uint32_t hif_type, target_type;
Komal Seelam02cf2f82016-02-22 20:44:25 +05303715 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
Komal Seelam5584a7c2016-02-24 19:22:48 +05303716 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
Lin Baie4599382018-11-15 15:40:11 +08003717 uint16_t revision_id = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003718 int probe_again = 0;
3719 struct pci_dev *pdev = bdev;
Houston Hoffmanf303f912016-03-14 21:11:42 -07003720 const struct pci_device_id *id = (const struct pci_device_id *)bid;
Komal Seelam91553ce2016-01-27 18:57:10 +05303721 struct hif_target_info *tgt_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003722
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003723 if (!ol_sc) {
3724 HIF_ERROR("%s: hif_ctx is NULL", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303725 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003726 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003727
Komal Seelambd7c51d2016-02-24 10:27:30 +05303728 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3729 __func__, hif_get_conparam(ol_sc), id->device);
3730
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003731 sc->pdev = pdev;
3732 sc->dev = &pdev->dev;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003733 sc->devid = id->device;
3734 sc->cacheline_sz = dma_get_cache_alignment();
Komal Seelam644263d2016-02-22 20:45:49 +05303735 tgt_info = hif_get_target_info_handle(hif_hdl);
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303736 hif_pci_init_deinit_ops_attach(sc, id->device);
3737 sc->hif_pci_get_soc_info(sc, dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003738again:
Balamurugan Mahalingam1b4476e2018-06-25 12:57:44 +05303739 ret = sc->hif_enable_pci(sc, pdev, id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003740 if (ret < 0) {
3741 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3742 __func__, ret);
3743 goto err_enable_pci;
3744 }
3745 HIF_TRACE("%s: hif_enable_pci done", __func__);
3746
3747 /* Temporary FIX: disable ASPM on peregrine.
3748 * Will be removed after the OTP is programmed
3749 */
Houston Hoffmanfb7d6122016-03-14 21:11:46 -07003750 hif_disable_power_gating(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003751
3752 device_disable_async_suspend(&pdev->dev);
3753 pci_read_config_word(pdev, 0x08, &revision_id);
3754
3755 ret = hif_get_device_type(id->device, revision_id,
3756 &hif_type, &target_type);
3757 if (ret < 0) {
3758 HIF_ERROR("%s: invalid device id/revision_id", __func__);
3759 goto err_tgtstate;
3760 }
3761 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3762 __func__, hif_type, target_type);
3763
Komal Seelam02cf2f82016-02-22 20:44:25 +05303764 hif_register_tbl_attach(ol_sc, hif_type);
Govind Singh051a8c42016-05-10 12:23:41 +05303765 hif_target_register_tbl_attach(ol_sc, target_type);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003766
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303767 hif_pci_init_reg_windowing_support(sc, target_type);
3768
Houston Hoffman1ef0c772017-08-01 10:47:06 -07003769 tgt_info->target_type = target_type;
3770
3771 if (ce_srng_based(ol_sc)) {
3772 HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3773 } else {
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003774 ret = hif_pci_probe_tgt_wakeup(sc);
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003775 if (ret < 0) {
3776 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3777 __func__, ret);
3778 if (ret == -EAGAIN)
3779 probe_again++;
3780 goto err_tgtstate;
3781 }
3782 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003783 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003784
Houston Hoffmand0620a32016-11-09 20:44:56 -08003785 if (!ol_sc->mem_pa) {
3786 HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003787 ret = -EIO;
3788 goto err_tgtstate;
3789 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003790
Houston Hoffman1ef0c772017-08-01 10:47:06 -07003791 if (!ce_srng_based(ol_sc)) {
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003792 hif_target_sync(ol_sc);
Houston Hoffmanc1064a82016-07-25 13:22:25 -07003793
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003794 if (ADRASTEA_BU)
3795 hif_vote_link_up(hif_hdl);
3796 }
Houston Hoffmanc1064a82016-07-25 13:22:25 -07003797
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003798 return 0;
3799
3800err_tgtstate:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003801 hif_disable_pci(sc);
3802 sc->pci_enabled = false;
3803 HIF_ERROR("%s: error, hif_disable_pci done", __func__);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303804 return QDF_STATUS_E_ABORTED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003805
3806err_enable_pci:
3807 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3808 int delay_time;
3809
3810 HIF_INFO("%s: pci reprobe", __func__);
3811 /* 10, 40, 90, 100, 100, ... */
3812 delay_time = max(100, 10 * (probe_again * probe_again));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303813 qdf_mdelay(delay_time);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003814 goto again;
3815 }
3816 return ret;
3817}
3818
3819/**
Houston Hoffman8f239f62016-03-14 21:12:05 -07003820 * hif_pci_irq_enable() - ce_irq_enable
3821 * @scn: hif_softc
3822 * @ce_id: ce_id
3823 *
3824 * Return: void
3825 */
3826void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3827{
3828 uint32_t tmp = 1 << ce_id;
3829 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3830
3831 qdf_spin_lock_irqsave(&sc->irq_lock);
3832 scn->ce_irq_summary &= ~tmp;
3833 if (scn->ce_irq_summary == 0) {
3834 /* Enable Legacy PCI line interrupts */
3835 if (LEGACY_INTERRUPTS(sc) &&
Komal Seelam6ee55902016-04-11 17:11:07 +05303836 (scn->target_status != TARGET_STATUS_RESET) &&
Houston Hoffman8f239f62016-03-14 21:12:05 -07003837 (!qdf_atomic_read(&scn->link_suspended))) {
3838
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303839 hif_write32_mb(scn, scn->mem +
Houston Hoffman8f239f62016-03-14 21:12:05 -07003840 (SOC_CORE_BASE_ADDRESS |
3841 PCIE_INTR_ENABLE_ADDRESS),
3842 HOST_GROUP0_MASK);
3843
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303844 hif_read32_mb(scn, scn->mem +
Houston Hoffman8f239f62016-03-14 21:12:05 -07003845 (SOC_CORE_BASE_ADDRESS |
3846 PCIE_INTR_ENABLE_ADDRESS));
3847 }
3848 }
3849 if (scn->hif_init_done == true)
3850 Q_TARGET_ACCESS_END(scn);
3851 qdf_spin_unlock_irqrestore(&sc->irq_lock);
3852
3853 /* check for missed firmware crash */
3854 hif_fw_interrupt_handler(0, scn);
3855}
Dustin Brown6834d322017-03-20 15:02:48 -07003856
Houston Hoffman8f239f62016-03-14 21:12:05 -07003857/**
3858 * hif_pci_irq_disable() - ce_irq_disable
3859 * @scn: hif_softc
3860 * @ce_id: ce_id
3861 *
Yun Park3fb36442017-08-17 17:37:53 -07003862 * only applicable to legacy copy engine...
3863 *
Houston Hoffman8f239f62016-03-14 21:12:05 -07003864 * Return: void
3865 */
3866void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3867{
3868 /* For Rome only need to wake up target */
Jeff Johnson1002ca52018-05-12 11:29:24 -07003869 /* target access is maintained until interrupts are re-enabled */
Houston Hoffman8f239f62016-03-14 21:12:05 -07003870 Q_TARGET_ACCESS_BEGIN(scn);
3871}
3872
Houston Hoffman9078a152015-11-02 16:15:02 -08003873#ifdef FEATURE_RUNTIME_PM
Yue Ma2bdc2ba2019-05-09 14:47:55 -07003874int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
3875{
3876 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3877
3878 if (!sc)
3879 return -EINVAL;
3880
3881 return hif_pm_request_resume(sc->dev);
3882}
Houston Hoffmanf4607852015-12-17 17:14:40 -08003883
Yue Maac6b2752019-05-08 17:17:12 -07003884void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
3885{
3886 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3887
3888 if (!sc)
3889 return;
3890
3891 sc->pm_stats.last_busy_marker = (void *)_RET_IP_;
3892 sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
3893
3894 return pm_runtime_mark_last_busy(sc->dev);
3895}
3896
Komal Seelam5584a7c2016-02-24 19:22:48 +05303897void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
Houston Hoffmanf4607852015-12-17 17:14:40 -08003898{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303899 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffmanf4607852015-12-17 17:14:40 -08003900
Jeff Johnson8d639a02019-03-18 09:51:11 -07003901 if (!sc)
Houston Hoffmanf4607852015-12-17 17:14:40 -08003902 return;
3903
3904 sc->pm_stats.runtime_get++;
3905 pm_runtime_get_noresume(sc->dev);
3906}
3907
Houston Hoffman9078a152015-11-02 16:15:02 -08003908/**
3909 * hif_pm_runtime_get() - do a get opperation on the device
3910 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07003911 * A get opperation will prevent a runtime suspend until a
Houston Hoffman9078a152015-11-02 16:15:02 -08003912 * corresponding put is done. This api should be used when sending
3913 * data.
3914 *
3915 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3916 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3917 *
3918 * return: success if the bus is up and a get has been issued
3919 * otherwise an error code.
3920 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303921int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003922{
Komal Seelam644263d2016-02-22 20:45:49 +05303923 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303924 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003925 int ret;
3926 int pm_state;
3927
Jeff Johnson8d639a02019-03-18 09:51:11 -07003928 if (!scn) {
Houston Hoffman9078a152015-11-02 16:15:02 -08003929 HIF_ERROR("%s: Could not do runtime get, scn is null",
3930 __func__);
3931 return -EFAULT;
3932 }
Houston Hoffman9078a152015-11-02 16:15:02 -08003933
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303934 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08003935
3936 if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
3937 pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3938 sc->pm_stats.runtime_get++;
3939 ret = __hif_pm_runtime_get(sc->dev);
3940
3941 /* Get can return 1 if the device is already active, just return
3942 * success in that case
3943 */
3944 if (ret > 0)
3945 ret = 0;
3946
3947 if (ret)
3948 hif_pm_runtime_put(hif_ctx);
3949
3950 if (ret && ret != -EINPROGRESS) {
3951 sc->pm_stats.runtime_get_err++;
3952 HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303953 __func__, qdf_atomic_read(&sc->pm_state), ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08003954 }
3955
3956 return ret;
3957 }
3958
3959 sc->pm_stats.request_resume++;
3960 sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3961 ret = hif_pm_request_resume(sc->dev);
3962
3963 return -EAGAIN;
3964}
3965
3966/**
3967 * hif_pm_runtime_put() - do a put opperation on the device
3968 *
3969 * A put opperation will allow a runtime suspend after a corresponding
3970 * get was done. This api should be used when sending data.
3971 *
3972 * This api will return a failure if runtime pm is stopped
3973 * This api will return failure if it would decrement the usage count below 0.
3974 *
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303975 * return: QDF_STATUS_SUCCESS if the put is performed
Houston Hoffman9078a152015-11-02 16:15:02 -08003976 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303977int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
Houston Hoffman9078a152015-11-02 16:15:02 -08003978{
Komal Seelam644263d2016-02-22 20:45:49 +05303979 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05303980 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08003981 int pm_state, usage_count;
Houston Hoffman9078a152015-11-02 16:15:02 -08003982 char *error = NULL;
3983
Jeff Johnson8d639a02019-03-18 09:51:11 -07003984 if (!scn) {
Houston Hoffman9078a152015-11-02 16:15:02 -08003985 HIF_ERROR("%s: Could not do runtime put, scn is null",
3986 __func__);
3987 return -EFAULT;
3988 }
Houston Hoffman9078a152015-11-02 16:15:02 -08003989 usage_count = atomic_read(&sc->dev->power.usage_count);
3990
3991 if (usage_count == 1) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303992 pm_state = qdf_atomic_read(&sc->pm_state);
Houston Hoffman9078a152015-11-02 16:15:02 -08003993
3994 if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3995 error = "Ignoring unexpected put when runtime pm is disabled";
3996
3997 } else if (usage_count == 0) {
3998 error = "PUT Without a Get Operation";
3999 }
4000
4001 if (error) {
Houston Hoffman9078a152015-11-02 16:15:02 -08004002 hif_pci_runtime_pm_warn(sc, error);
Houston Hoffman9078a152015-11-02 16:15:02 -08004003 return -EINVAL;
4004 }
4005
4006 sc->pm_stats.runtime_put++;
4007
Yue Maac6b2752019-05-08 17:17:12 -07004008 hif_pm_runtime_mark_last_busy(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08004009 hif_pm_runtime_put_auto(sc->dev);
4010
4011 return 0;
4012}
4013
4014
4015/**
Manikandan Mohanc23f28e2017-04-07 18:17:02 -07004016 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4017 * reason
Houston Hoffman9078a152015-11-02 16:15:02 -08004018 * @hif_sc: pci context
4019 * @lock: runtime_pm lock being acquired
4020 *
4021 * Return 0 if successful.
4022 */
4023static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4024 *hif_sc, struct hif_pm_runtime_lock *lock)
4025{
4026 int ret = 0;
4027
4028 /*
4029 * We shouldn't be setting context->timeout to zero here when
4030 * context is active as we will have a case where Timeout API's
4031 * for the same context called back to back.
4032 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4033 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4034 * API to ensure the timeout version is no more active and
4035 * list entry of this context will be deleted during allow suspend.
4036 */
4037 if (lock->active)
4038 return 0;
4039
4040 ret = __hif_pm_runtime_get(hif_sc->dev);
4041
4042 /**
4043 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4044 * RPM_SUSPENDING. Any other negative value is an error.
4045 * We shouldn't be do runtime_put here as in later point allow
4046 * suspend gets called with the the context and there the usage count
4047 * is decremented, so suspend will be prevented.
4048 */
4049
4050 if (ret < 0 && ret != -EINPROGRESS) {
4051 hif_sc->pm_stats.runtime_get_err++;
4052 hif_pci_runtime_pm_warn(hif_sc,
4053 "Prevent Suspend Runtime PM Error");
4054 }
4055
4056 hif_sc->prevent_suspend_cnt++;
4057
4058 lock->active = true;
4059
4060 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4061
4062 hif_sc->pm_stats.prevent_suspend++;
4063
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +05304064 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4065 hif_pm_runtime_state_to_string(
4066 qdf_atomic_read(&hif_sc->pm_state)),
4067 ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004068
4069 return ret;
4070}
4071
4072static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4073 struct hif_pm_runtime_lock *lock)
4074{
Yue Maac6b2752019-05-08 17:17:12 -07004075 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08004076 int ret = 0;
4077 int usage_count;
4078
4079 if (hif_sc->prevent_suspend_cnt == 0)
4080 return ret;
4081
4082 if (!lock->active)
4083 return ret;
4084
4085 usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4086
4087 /*
4088 * During Driver unload, platform driver increments the usage
4089 * count to prevent any runtime suspend getting called.
4090 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4091 * usage_count should be one. Ideally this shouldn't happen as
4092 * context->active should be active for allow suspend to happen
4093 * Handling this case here to prevent any failures.
4094 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304095 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
Houston Hoffman9078a152015-11-02 16:15:02 -08004096 && usage_count == 1) || usage_count == 0) {
4097 hif_pci_runtime_pm_warn(hif_sc,
4098 "Allow without a prevent suspend");
4099 return -EINVAL;
4100 }
4101
4102 list_del(&lock->list);
4103
4104 hif_sc->prevent_suspend_cnt--;
4105
4106 lock->active = false;
4107 lock->timeout = 0;
4108
Yue Maac6b2752019-05-08 17:17:12 -07004109 hif_pm_runtime_mark_last_busy(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08004110 ret = hif_pm_runtime_put_auto(hif_sc->dev);
4111
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +05304112 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4113 hif_pm_runtime_state_to_string(
4114 qdf_atomic_read(&hif_sc->pm_state)),
4115 ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004116
4117 hif_sc->pm_stats.allow_suspend++;
4118 return ret;
4119}
4120
4121/**
4122 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4123 * @data: calback data that is the pci context
4124 *
Jeff Johnsonab0aeed2018-05-06 16:07:20 -07004125 * if runtime locks are acquired with a timeout, this function releases
Houston Hoffman9078a152015-11-02 16:15:02 -08004126 * the locks when the last runtime lock expires.
4127 *
4128 * dummy implementation until lock acquisition is implemented.
4129 */
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05304130static void hif_pm_runtime_lock_timeout_fn(void *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08004131{
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05304132 struct hif_pci_softc *hif_sc = data;
Houston Hoffman9078a152015-11-02 16:15:02 -08004133 unsigned long timer_expires;
4134 struct hif_pm_runtime_lock *context, *temp;
4135
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304136 spin_lock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004137
4138 timer_expires = hif_sc->runtime_timer_expires;
4139
4140 /* Make sure we are not called too early, this should take care of
4141 * following case
4142 *
4143 * CPU0 CPU1 (timeout function)
4144 * ---- ----------------------
4145 * spin_lock_irq
4146 * timeout function called
4147 *
4148 * mod_timer()
4149 *
4150 * spin_unlock_irq
4151 * spin_lock_irq
4152 */
4153 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4154 hif_sc->runtime_timer_expires = 0;
4155 list_for_each_entry_safe(context, temp,
4156 &hif_sc->prevent_suspend_list, list) {
4157 if (context->timeout) {
4158 __hif_pm_runtime_allow_suspend(hif_sc, context);
4159 hif_sc->pm_stats.allow_suspend_timeout++;
4160 }
4161 }
4162 }
4163
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304164 spin_unlock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004165}
4166
Komal Seelam5584a7c2016-02-24 19:22:48 +05304167int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08004168 struct hif_pm_runtime_lock *data)
4169{
Komal Seelam644263d2016-02-22 20:45:49 +05304170 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4171 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08004172 struct hif_pm_runtime_lock *context = data;
Houston Hoffman9078a152015-11-02 16:15:02 -08004173
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004174 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08004175 return 0;
4176
4177 if (!context)
4178 return -EINVAL;
4179
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304180 if (in_irq())
4181 WARN_ON(1);
4182
4183 spin_lock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004184 context->timeout = 0;
4185 __hif_pm_runtime_prevent_suspend(hif_sc, context);
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304186 spin_unlock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004187
4188 return 0;
4189}
4190
Komal Seelam5584a7c2016-02-24 19:22:48 +05304191int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
Komal Seelam644263d2016-02-22 20:45:49 +05304192 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08004193{
Komal Seelam644263d2016-02-22 20:45:49 +05304194 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4195 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08004196 struct hif_pm_runtime_lock *context = data;
4197
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004198 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08004199 return 0;
4200
4201 if (!context)
4202 return -EINVAL;
4203
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304204 if (in_irq())
4205 WARN_ON(1);
4206
4207 spin_lock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004208
4209 __hif_pm_runtime_allow_suspend(hif_sc, context);
4210
4211 /* The list can be empty as well in cases where
4212 * we have one context in the list and the allow
4213 * suspend came before the timer expires and we delete
4214 * context above from the list.
4215 * When list is empty prevent_suspend count will be zero.
4216 */
4217 if (hif_sc->prevent_suspend_cnt == 0 &&
4218 hif_sc->runtime_timer_expires > 0) {
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05304219 qdf_timer_free(&hif_sc->runtime_timer);
Houston Hoffman9078a152015-11-02 16:15:02 -08004220 hif_sc->runtime_timer_expires = 0;
4221 }
4222
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304223 spin_unlock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004224
4225 return 0;
4226}
4227
4228/**
4229 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4230 * @ol_sc: HIF context
4231 * @lock: which lock is being acquired
4232 * @delay: Timeout in milliseconds
4233 *
4234 * Prevent runtime suspend with a timeout after which runtime suspend would be
4235 * allowed. This API uses a single timer to allow the suspend and timer is
4236 * modified if the timeout is changed before timer fires.
4237 * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4238 * of starting the timer.
4239 *
4240 * It is wise to try not to use this API and correct the design if possible.
4241 *
4242 * Return: 0 on success and negative error code on failure
4243 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05304244int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
Houston Hoffman9078a152015-11-02 16:15:02 -08004245 struct hif_pm_runtime_lock *lock, unsigned int delay)
4246{
Komal Seelam644263d2016-02-22 20:45:49 +05304247 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4248 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4249
Houston Hoffman9078a152015-11-02 16:15:02 -08004250 int ret = 0;
4251 unsigned long expires;
Houston Hoffman9078a152015-11-02 16:15:02 -08004252 struct hif_pm_runtime_lock *context = lock;
4253
Komal Seelambd7c51d2016-02-24 10:27:30 +05304254 if (hif_is_load_or_unload_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08004255 HIF_ERROR("%s: Load/unload in progress, ignore!",
4256 __func__);
4257 return -EINVAL;
4258 }
4259
Komal Seelambd7c51d2016-02-24 10:27:30 +05304260 if (hif_is_recovery_in_progress(sc)) {
Houston Hoffman9078a152015-11-02 16:15:02 -08004261 HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4262 return -EINVAL;
4263 }
4264
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004265 if (!sc->hif_config.enable_runtime_pm)
Houston Hoffman9078a152015-11-02 16:15:02 -08004266 return 0;
4267
4268 if (!context)
4269 return -EINVAL;
4270
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304271 if (in_irq())
4272 WARN_ON(1);
4273
Houston Hoffman9078a152015-11-02 16:15:02 -08004274 /*
4275 * Don't use internal timer if the timeout is less than auto suspend
4276 * delay.
4277 */
4278 if (delay <= hif_sc->dev->power.autosuspend_delay) {
4279 hif_pm_request_resume(hif_sc->dev);
Yue Maac6b2752019-05-08 17:17:12 -07004280 hif_pm_runtime_mark_last_busy(ol_sc);
Houston Hoffman9078a152015-11-02 16:15:02 -08004281 return ret;
4282 }
4283
4284 expires = jiffies + msecs_to_jiffies(delay);
4285 expires += !expires;
4286
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304287 spin_lock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004288
4289 context->timeout = delay;
4290 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4291 hif_sc->pm_stats.prevent_suspend_timeout++;
4292
4293 /* Modify the timer only if new timeout is after already configured
4294 * timeout
4295 */
4296 if (time_after(expires, hif_sc->runtime_timer_expires)) {
Ashish Kumar Dhanotiyaad85c382019-01-17 20:43:53 +05304297 qdf_timer_mod(&hif_sc->runtime_timer, delay);
Houston Hoffman9078a152015-11-02 16:15:02 -08004298 hif_sc->runtime_timer_expires = expires;
4299 }
4300
Sarada Prasanna Garnayaked8018d2017-02-08 21:22:36 +05304301 spin_unlock_bh(&hif_sc->runtime_lock);
Houston Hoffman9078a152015-11-02 16:15:02 -08004302
Sarada Prasanna Garnayakdf1c4b22016-10-06 11:03:45 +05304303 HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4304 hif_pm_runtime_state_to_string(
4305 qdf_atomic_read(&hif_sc->pm_state)),
4306 delay, ret);
Houston Hoffman9078a152015-11-02 16:15:02 -08004307
4308 return ret;
4309}
4310
4311/**
4312 * hif_runtime_lock_init() - API to initialize Runtime PM context
4313 * @name: Context name
4314 *
Jeff Johnsonc9e37ee2018-05-06 17:14:43 -07004315 * This API initializes the Runtime PM context of the caller and
Houston Hoffman9078a152015-11-02 16:15:02 -08004316 * return the pointer.
4317 *
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08004318 * Return: None
Houston Hoffman9078a152015-11-02 16:15:02 -08004319 */
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08004320int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
Houston Hoffman9078a152015-11-02 16:15:02 -08004321{
4322 struct hif_pm_runtime_lock *context;
4323
Yue Mab4e12682017-11-15 11:05:11 -08004324 HIF_INFO("Initializing Runtime PM wakelock %s", name);
4325
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304326 context = qdf_mem_malloc(sizeof(*context));
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05304327 if (!context)
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08004328 return -ENOMEM;
Houston Hoffman9078a152015-11-02 16:15:02 -08004329
4330 context->name = name ? name : "Default";
Prashanth Bhatta65b0eaa2017-01-19 15:33:43 -08004331 lock->lock = context;
4332
4333 return 0;
Houston Hoffman9078a152015-11-02 16:15:02 -08004334}
4335
4336/**
4337 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4338 * @data: Runtime PM context
4339 *
4340 * Return: void
4341 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05304342void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
Komal Seelam644263d2016-02-22 20:45:49 +05304343 struct hif_pm_runtime_lock *data)
Houston Hoffman9078a152015-11-02 16:15:02 -08004344{
Houston Hoffman9078a152015-11-02 16:15:02 -08004345 struct hif_pm_runtime_lock *context = data;
Houston Hoffmanb21a0532016-03-14 21:12:12 -07004346 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
Houston Hoffman9078a152015-11-02 16:15:02 -08004347
Yue Mab4e12682017-11-15 11:05:11 -08004348 if (!context) {
4349 HIF_ERROR("Runtime PM wakelock context is NULL");
Houston Hoffman9078a152015-11-02 16:15:02 -08004350 return;
Yue Mab4e12682017-11-15 11:05:11 -08004351 }
4352
4353 HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4354
Houston Hoffman9078a152015-11-02 16:15:02 -08004355 /*
4356 * Ensure to delete the context list entry and reduce the usage count
4357 * before freeing the context if context is active.
4358 */
Manikandan Mohanb01696b2017-05-09 18:03:19 -07004359 if (sc) {
4360 spin_lock_bh(&sc->runtime_lock);
4361 __hif_pm_runtime_allow_suspend(sc, context);
4362 spin_unlock_bh(&sc->runtime_lock);
4363 }
Yue Mab4e12682017-11-15 11:05:11 -08004364
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05304365 qdf_mem_free(context);
Houston Hoffman9078a152015-11-02 16:15:02 -08004366}
Yue Mafab77ad2019-06-25 18:19:47 -07004367
4368/**
4369 * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
4370 * @hif_ctx: HIF context
4371 *
4372 * Return: true for runtime suspended, otherwise false
4373 */
4374bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
4375{
4376 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4377
4378 return qdf_atomic_read(&sc->pm_state) ==
4379 HIF_PM_RUNTIME_STATE_SUSPENDED;
4380}
4381
4382/**
4383 * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
4384 * @hif_ctx: HIF context
4385 *
4386 * monitor_wake_intr variable can be used to indicate if driver expects wake
4387 * MSI for runtime PM
4388 *
4389 * Return: monitor_wake_intr variable
4390 */
4391int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
4392{
4393 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4394
4395 return qdf_atomic_read(&sc->monitor_wake_intr);
4396}
4397
4398/**
4399 * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
4400 * @hif_ctx: HIF context
4401 * @val: value to set
4402 *
4403 * monitor_wake_intr variable can be used to indicate if driver expects wake
4404 * MSI for runtime PM
4405 *
4406 * Return: void
4407 */
4408void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
4409 int val)
4410{
4411 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4412
4413 qdf_atomic_set(&sc->monitor_wake_intr, val);
4414}
Houston Hoffman9078a152015-11-02 16:15:02 -08004415#endif /* FEATURE_RUNTIME_PM */
Dustin Brown6834d322017-03-20 15:02:48 -07004416
4417int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4418{
4419 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4420
4421 /* legacy case only has one irq */
4422 return pci_scn->irq;
4423}
Surabhi Vishnoi6f752b42017-08-31 17:54:50 +05304424
4425int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4426{
4427 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
Lin Baiebf8d632017-12-12 10:55:48 +08004428 struct hif_target_info *tgt_info;
4429
4430 tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4431
4432 if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07004433 tgt_info->target_type == TARGET_TYPE_QCA6390 ||
Lin Baiebf8d632017-12-12 10:55:48 +08004434 tgt_info->target_type == TARGET_TYPE_QCA8074) {
4435 /*
4436 * Need to consider offset's memtype for QCA6290/QCA8074,
4437 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4438 * well initialized/defined.
4439 */
4440 return 0;
4441 }
Surabhi Vishnoi6f752b42017-08-31 17:54:50 +05304442
4443 if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4444 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4445 return 0;
4446 }
4447
jiad43202192018-11-02 15:34:26 +08004448 HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
4449 offset, (uint32_t)(offset + sizeof(unsigned int)),
4450 sc->mem_len);
Surabhi Vishnoi6f752b42017-08-31 17:54:50 +05304451
4452 return -EINVAL;
4453}
Nirav Shahd9dce6e2018-02-26 14:50:25 +05304454
4455/**
4456 * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4457 * @scn: hif context
4458 *
4459 * Return: true if soc needs driver bmi otherwise false
4460 */
4461bool hif_pci_needs_bmi(struct hif_softc *scn)
4462{
4463 return !ce_srng_based(scn);
4464}