blob: cdf25b56252458b213b3f320dd2acd40b3a42be7 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include <linux/pci.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/if_arp.h>
33#ifdef CONFIG_PCI_MSM
34#include <linux/msm_pcie.h>
35#endif
36#include "hif_io32.h"
37#include "if_pci.h"
38#include "hif.h"
39#include "hif_main.h"
40#include "ce_api.h"
41#include "ce_internal.h"
42#include "ce_reg.h"
43#include "bmi_msg.h" /* TARGET_TYPE_ */
44#include "regtable.h"
45#include "ol_fw.h"
Houston Hoffman62aa58d2015-11-02 21:14:55 -080046#include <linux/debugfs.h>
47#include <linux/seq_file.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include <osapi_linux.h>
49#include "cds_api.h"
50#include "cdf_status.h"
51#include "cds_sched.h"
52#include "wma_api.h"
53#include "cdf_atomic.h"
54#include "wlan_hdd_power.h"
55#include "wlan_hdd_main.h"
56#ifdef CONFIG_CNSS
57#include <net/cnss.h>
58#else
59#include "cnss_stub.h"
60#endif
61#include "epping_main.h"
62#include "mp_dev.h"
63#include "hif_debug.h"
64
65#ifndef REMOVE_PKT_LOG
66#include "ol_txrx_types.h"
67#include "pktlog_ac_api.h"
68#include "pktlog_ac.h"
69#endif
70#include "if_pci_internal.h"
71#include "icnss_stub.h"
72#include "ce_tasklet.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053073#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080074
75/* Maximum ms timeout for host to wake up target */
76#define PCIE_WAKE_TIMEOUT 1000
77#define RAMDUMP_EVENT_TIMEOUT 2500
78
79unsigned int msienable = 0;
80module_param(msienable, int, 0644);
81
82int hif_pci_war1 = 0;
83static DEFINE_SPINLOCK(pciwar_lock);
84
85#ifndef REMOVE_PKT_LOG
86struct ol_pl_os_dep_funcs *g_ol_pl_os_dep_funcs = NULL;
87#endif
88
89/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
90 * PCIe data bus error
91 * As workaround for this issue - changing the reset sequence to
92 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
93 */
94#define CPU_WARM_RESET_WAR
95/*
96 * Top-level interrupt handler for all PCI interrupts from a Target.
97 * When a block of MSI interrupts is allocated, this top-level handler
98 * is not used; instead, we directly call the correct sub-handler.
99 */
100struct ce_irq_reg_table {
101 uint32_t irq_enable;
102 uint32_t irq_status;
103};
104
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800105#if !defined(QCA_WIFI_3_0_ADRASTEA)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800106static inline void cnss_intr_notify_q6(void)
107{
108}
109#endif
110
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800111#if !defined(QCA_WIFI_3_0_ADRASTEA)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800112static inline void *cnss_get_target_smem(void)
113{
114 return NULL;
115}
116#endif
117
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800118#ifndef QCA_WIFI_3_0_ADRASTEA
119static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
120{
121 return;
122}
123#else
124void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
125{
126 struct ol_softc *scn = sc->ol_sc;
127 unsigned int target_enable0, target_enable1;
128 unsigned int target_cause0, target_cause1;
129
130 target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
131 target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
132 target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
133 target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
134
135 if ((target_enable0 & target_cause0) ||
136 (target_enable1 & target_cause1)) {
137 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
138 hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
139
140 if (scn->notice_send)
141 cnss_intr_notify_q6();
142 }
143}
144#endif
145
146static irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
147{
148 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
149 struct ol_softc *scn = sc->ol_sc;
150 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
151 volatile int tmp;
152 uint16_t val;
153 uint32_t bar0;
154 uint32_t fw_indicator_address, fw_indicator;
155 bool ssr_irq = false;
156 unsigned int host_cause, host_enable;
157
158 if (LEGACY_INTERRUPTS(sc)) {
159 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
160 return IRQ_HANDLED;
161
162 if (ADRASTEA_BU) {
163 host_enable = hif_read32_mb(sc->mem +
164 PCIE_INTR_ENABLE_ADDRESS);
165 host_cause = hif_read32_mb(sc->mem +
166 PCIE_INTR_CAUSE_ADDRESS);
167 if (!(host_enable & host_cause)) {
168 hif_pci_route_adrastea_interrupt(sc);
169 return IRQ_HANDLED;
170 }
171 }
172
173 /* Clear Legacy PCI line interrupts
174 * IMPORTANT: INTR_CLR regiser has to be set
175 * after INTR_ENABLE is set to 0,
176 * otherwise interrupt can not be really cleared */
177 hif_write32_mb(sc->mem +
178 (SOC_CORE_BASE_ADDRESS |
179 PCIE_INTR_ENABLE_ADDRESS), 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800180
181 hif_write32_mb(sc->mem +
182 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
183 ADRASTEA_BU ?
184 (host_enable & host_cause) :
185 HOST_GROUP0_MASK);
186
187 if (ADRASTEA_BU)
188 hif_write32_mb(sc->mem + 0x2f100c , (host_cause >> 1));
189
190 /* IMPORTANT: this extra read transaction is required to
191 * flush the posted write buffer */
192 if (!ADRASTEA_BU) {
193 tmp =
194 hif_read32_mb(sc->mem +
195 (SOC_CORE_BASE_ADDRESS |
196 PCIE_INTR_ENABLE_ADDRESS));
197
198 if (tmp == 0xdeadbeef) {
199 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
200 __func__);
201
202 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
203 HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
204 __func__, val);
205
206 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
207 HIF_ERROR("%s: PCI Device ID = 0x%04x",
208 __func__, val);
209
210 pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
211 HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
212 val);
213
214 pci_read_config_word(sc->pdev, PCI_STATUS, &val);
215 HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
216 val);
217
218 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
219 &bar0);
220 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
221 bar0);
222
223 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
224 __func__,
225 hif_read32_mb(sc->mem +
226 PCIE_LOCAL_BASE_ADDRESS
227 + RTC_STATE_ADDRESS));
228 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
229 __func__,
230 hif_read32_mb(sc->mem +
231 PCIE_LOCAL_BASE_ADDRESS
232 + PCIE_SOC_WAKE_ADDRESS));
233 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
234 __func__,
235 hif_read32_mb(sc->mem + 0x80008),
236 hif_read32_mb(sc->mem + 0x8000c));
237 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
238 __func__,
239 hif_read32_mb(sc->mem + 0x80010),
240 hif_read32_mb(sc->mem + 0x80014));
241 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
242 __func__,
243 hif_read32_mb(sc->mem + 0x80018),
244 hif_read32_mb(sc->mem + 0x8001c));
245 CDF_BUG(0);
246 }
247
248 PCI_CLR_CAUSE0_REGISTER(sc);
249 }
250
251 if (HAS_FW_INDICATOR) {
252 fw_indicator_address = hif_state->fw_indicator_address;
253 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
254 if ((fw_indicator != ~0) &&
255 (fw_indicator & FW_IND_EVENT_PENDING))
256 ssr_irq = true;
257 }
258
259 if (Q_TARGET_ACCESS_END(scn) < 0)
260 return IRQ_HANDLED;
261 }
262 /* TBDXXX: Add support for WMAC */
263
264 if (ssr_irq) {
265 sc->irq_event = irq;
266 cdf_atomic_set(&scn->tasklet_from_intr, 1);
267
268 cdf_atomic_inc(&scn->active_tasklet_cnt);
269 tasklet_schedule(&sc->intr_tq);
270 } else {
271 icnss_dispatch_ce_irq(scn);
272 }
273
274 return IRQ_HANDLED;
275}
276
277static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
278{
279 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
280
281 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, sc->ol_sc);
282
283 return IRQ_HANDLED;
284}
285
286bool hif_targ_is_awake(struct ol_softc *scn, void *__iomem *mem)
287{
288 HIF_PCI_TARG_IS_AWAKE(scn, mem);
289}
290
291bool hif_pci_targ_is_present(struct ol_softc *scn, void *__iomem *mem)
292{
293 return 1; /* FIX THIS */
294}
295
296/**
297 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
298 * @scn: ol_softc
299 *
300 * Return: void
301 */
302#if CONFIG_ATH_PCIE_MAX_PERF == 0
303void hif_pci_cancel_deferred_target_sleep(struct ol_softc *scn)
304{
305 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
306 A_target_id_t pci_addr = scn->mem;
307
308 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
309 /*
310 * If the deferred sleep timer is running cancel it
311 * and put the soc into sleep.
312 */
313 if (hif_state->fake_sleep == true) {
314 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
315 if (hif_state->verified_awake == false) {
316 hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
317 PCIE_SOC_WAKE_ADDRESS,
318 PCIE_SOC_WAKE_RESET);
319 }
320 hif_state->fake_sleep = false;
321 }
322 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
323}
324#else
325inline void hif_pci_cancel_deferred_target_sleep(struct ol_softc *scn)
326{
327 return;
328}
329#endif
330
331#define A_PCIE_LOCAL_REG_READ(mem, addr) \
332 hif_read32_mb((char *)(mem) + \
333 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
334
335#define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
336 hif_write32_mb(((char *)(mem) + \
337 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
338
339#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
340static void hif_pci_device_reset(struct hif_pci_softc *sc)
341{
342 void __iomem *mem = sc->mem;
343 int i;
344 uint32_t val;
345 struct ol_softc *scn = sc->ol_sc;
346
347 if (!scn->hostdef)
348 return;
349
350 /* NB: Don't check resetok here. This form of reset
351 * is integral to correct operation. */
352
353 if (!SOC_GLOBAL_RESET_ADDRESS) {
354 return;
355 }
356
357 if (!mem) {
358 return;
359 }
360
361 HIF_ERROR("%s: Reset Device", __func__);
362
363 /*
364 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
365 * writing WAKE_V, the Target may scribble over Host memory!
366 */
367 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
368 PCIE_SOC_WAKE_V_MASK);
369 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
370 if (hif_targ_is_awake(scn, mem))
371 break;
372
373 cdf_mdelay(1);
374 }
375
376 /* Put Target, including PCIe, into RESET. */
377 val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
378 val |= 1;
379 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
380 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
381 if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
382 RTC_STATE_COLD_RESET_MASK)
383 break;
384
385 cdf_mdelay(1);
386 }
387
388 /* Pull Target, including PCIe, out of RESET. */
389 val &= ~1;
390 A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
391 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
392 if (!
393 (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
394 RTC_STATE_COLD_RESET_MASK))
395 break;
396
397 cdf_mdelay(1);
398 }
399
400 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
401}
402
403/* CPU warm reset function
404 * Steps:
405 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
406 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
407 * correctly on WARM reset
408 * 3. Clear TARGET CPU LF timer interrupt
409 * 4. Reset all CEs to clear any pending CE tarnsactions
410 * 5. Warm reset CPU
411 */
412void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
413{
414 void __iomem *mem = sc->mem;
415 int i;
416 uint32_t val;
417 uint32_t fw_indicator;
418 struct ol_softc *scn = sc->ol_sc;
419
420 /* NB: Don't check resetok here. This form of reset is
421 * integral to correct operation. */
422
423 if (!mem) {
424 return;
425 }
426
427 HIF_INFO_MED("%s: Target Warm Reset", __func__);
428
429 /*
430 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
431 * writing WAKE_V, the Target may scribble over Host memory!
432 */
433 A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
434 PCIE_SOC_WAKE_V_MASK);
435 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
436 if (hif_targ_is_awake(scn, mem))
437 break;
438 cdf_mdelay(1);
439 }
440
441 /*
442 * Disable Pending interrupts
443 */
444 val =
445 hif_read32_mb(mem +
446 (SOC_CORE_BASE_ADDRESS |
447 PCIE_INTR_CAUSE_ADDRESS));
448 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
449 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
450 /* Target CPU Intr Cause */
451 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
452 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
453
454 val =
455 hif_read32_mb(mem +
456 (SOC_CORE_BASE_ADDRESS |
457 PCIE_INTR_ENABLE_ADDRESS));
458 hif_write32_mb((mem +
459 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
460 hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
461 HOST_GROUP0_MASK);
462
463 cdf_mdelay(100);
464
465 /* Clear FW_INDICATOR_ADDRESS */
466 if (HAS_FW_INDICATOR) {
467 fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
468 hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
469 }
470
471 /* Clear Target LF Timer interrupts */
472 val =
473 hif_read32_mb(mem +
474 (RTC_SOC_BASE_ADDRESS +
475 SOC_LF_TIMER_CONTROL0_ADDRESS));
476 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
477 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
478 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
479 hif_write32_mb(mem +
480 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
481 val);
482
483 /* Reset CE */
484 val =
485 hif_read32_mb(mem +
486 (RTC_SOC_BASE_ADDRESS |
487 SOC_RESET_CONTROL_ADDRESS));
488 val |= SOC_RESET_CONTROL_CE_RST_MASK;
489 hif_write32_mb((mem +
490 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
491 val);
492 val =
493 hif_read32_mb(mem +
494 (RTC_SOC_BASE_ADDRESS |
495 SOC_RESET_CONTROL_ADDRESS));
496 cdf_mdelay(10);
497
498 /* CE unreset */
499 val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
500 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
501 val);
502 val =
503 hif_read32_mb(mem +
504 (RTC_SOC_BASE_ADDRESS |
505 SOC_RESET_CONTROL_ADDRESS));
506 cdf_mdelay(10);
507
508 /* Read Target CPU Intr Cause */
509 val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
510 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
511 __func__, val);
512
513 /* CPU warm RESET */
514 val =
515 hif_read32_mb(mem +
516 (RTC_SOC_BASE_ADDRESS |
517 SOC_RESET_CONTROL_ADDRESS));
518 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
519 hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
520 val);
521 val =
522 hif_read32_mb(mem +
523 (RTC_SOC_BASE_ADDRESS |
524 SOC_RESET_CONTROL_ADDRESS));
525 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
526 __func__, val);
527
528 cdf_mdelay(100);
529 HIF_INFO_MED("%s: Target Warm reset complete", __func__);
530
531}
532
533#ifndef QCA_WIFI_3_0
534int hif_check_fw_reg(struct ol_softc *scn)
535{
536 struct hif_pci_softc *sc = scn->hif_sc;
537 void __iomem *mem = sc->mem;
538 uint32_t val;
539
540 A_TARGET_ACCESS_BEGIN_RET(scn);
541 val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
542 A_TARGET_ACCESS_END_RET(scn);
543
544 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
545
546 if (val & FW_IND_HELPER)
547 return 0;
548
549 return 1;
550}
551#endif
552
553int hif_check_soc_status(struct ol_softc *scn)
554{
555 uint16_t device_id;
556 uint32_t val;
557 uint16_t timeout_count = 0;
558 struct hif_pci_softc *sc = scn->hif_sc;
559
560 /* Check device ID from PCIe configuration space for link status */
561 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
562 if (device_id != sc->devid) {
563 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
564 __func__, device_id, sc->devid);
565 return -EACCES;
566 }
567
568 /* Check PCIe local register for bar/memory access */
569 val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
570 RTC_STATE_ADDRESS);
571 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
572
573 /* Try to wake up taget if it sleeps */
574 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
575 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
576 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
577 hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
578 PCIE_SOC_WAKE_ADDRESS));
579
580 /* Check if taget can be woken up */
581 while (!hif_targ_is_awake(scn, sc->mem)) {
582 if (timeout_count >= PCIE_WAKE_TIMEOUT) {
583 HIF_ERROR("%s: wake up timeout, %08x, %08x",
584 __func__,
585 hif_read32_mb(sc->mem +
586 PCIE_LOCAL_BASE_ADDRESS +
587 RTC_STATE_ADDRESS),
588 hif_read32_mb(sc->mem +
589 PCIE_LOCAL_BASE_ADDRESS +
590 PCIE_SOC_WAKE_ADDRESS));
591 return -EACCES;
592 }
593
594 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
595 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
596
597 cdf_mdelay(100);
598 timeout_count += 100;
599 }
600
601 /* Check Power register for SoC internal bus issues */
602 val =
603 hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
604 SOC_POWER_REG_OFFSET);
605 HIF_INFO_MED("%s: Power register is %08x", __func__, val);
606
607 return 0;
608}
609
Govind Singh2443fb32016-01-13 17:44:48 +0530610/**
611 * hif_dump_pci_registers(): dump PCI debug registers
612 * @scn: struct ol_softc
613 *
614 * This function dumps pci debug registers
615 *
616 * Return: void
617 */
618static void hif_dump_pci_registers(struct ol_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800619{
620 struct hif_pci_softc *sc = scn->hif_sc;
621 void __iomem *mem = sc->mem;
622 uint32_t val, i, j;
623 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
624 uint32_t ce_base;
625
626 A_TARGET_ACCESS_BEGIN(scn);
627
628 /* DEBUG_INPUT_SEL_SRC = 0x6 */
629 val =
630 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
631 WLAN_DEBUG_INPUT_SEL_OFFSET);
632 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
633 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
634 hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
635 val);
636
637 /* DEBUG_CONTROL_ENABLE = 0x1 */
638 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
639 WLAN_DEBUG_CONTROL_OFFSET);
640 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
641 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
642 hif_write32_mb(mem + GPIO_BASE_ADDRESS +
643 WLAN_DEBUG_CONTROL_OFFSET, val);
644
645 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
646 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
647 WLAN_DEBUG_INPUT_SEL_OFFSET),
648 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
649 WLAN_DEBUG_CONTROL_OFFSET));
650
651 HIF_INFO_MED("%s: Debug CE", __func__);
652 /* Loop CE debug output */
653 /* AMBA_DEBUG_BUS_SEL = 0xc */
654 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
655 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
656 val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
657 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
658
659 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
660 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
661 val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
662 CE_WRAPPER_DEBUG_OFFSET);
663 val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
664 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
665 hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
666 CE_WRAPPER_DEBUG_OFFSET, val);
667
668 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
669 __func__, wrapper_idx[i],
670 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
671 AMBA_DEBUG_BUS_OFFSET),
672 hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
673 CE_WRAPPER_DEBUG_OFFSET));
674
675 if (wrapper_idx[i] <= 7) {
676 for (j = 0; j <= 5; j++) {
677 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
678 /* For (j=0~5) write CE_DEBUG_SEL = j */
679 val =
680 hif_read32_mb(mem + ce_base +
681 CE_DEBUG_OFFSET);
682 val &= ~CE_DEBUG_SEL_MASK;
683 val |= CE_DEBUG_SEL_SET(j);
684 hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
685 val);
686
687 /* read (@gpio_athr_wlan_reg)
688 * WLAN_DEBUG_OUT_DATA */
689 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
690 WLAN_DEBUG_OUT_OFFSET);
691 val = WLAN_DEBUG_OUT_DATA_GET(val);
692
693 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
694 __func__, j,
695 hif_read32_mb(mem + ce_base +
696 CE_DEBUG_OFFSET), val);
697 }
698 } else {
699 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
700 val =
701 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
702 WLAN_DEBUG_OUT_OFFSET);
703 val = WLAN_DEBUG_OUT_DATA_GET(val);
704
705 HIF_INFO_MED("%s: out: %x", __func__, val);
706 }
707 }
708
709 HIF_INFO_MED("%s: Debug PCIe:", __func__);
710 /* Loop PCIe debug output */
711 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
712 val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
713 val &= ~AMBA_DEBUG_BUS_SEL_MASK;
714 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
715 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
716
717 for (i = 0; i <= 8; i++) {
718 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
719 val =
720 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
721 AMBA_DEBUG_BUS_OFFSET);
722 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
723 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
724 hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
725 val);
726
727 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
728 val =
729 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
730 WLAN_DEBUG_OUT_OFFSET);
731 val = WLAN_DEBUG_OUT_DATA_GET(val);
732
733 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
734 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
735 WLAN_DEBUG_OUT_OFFSET), val,
736 hif_read32_mb(mem + GPIO_BASE_ADDRESS +
737 WLAN_DEBUG_OUT_OFFSET));
738 }
739
740 A_TARGET_ACCESS_END(scn);
741}
742
Govind Singh2443fb32016-01-13 17:44:48 +0530743/**
744 * hif_dump_registers(): dump bus debug registers
745 * @scn: struct ol_softc
746 *
747 * This function dumps hif bus debug registers
748 *
749 * Return: 0 for success or error code
750 */
751int hif_dump_registers(struct ol_softc *scn)
752{
753 int status;
754
755 status = hif_dump_ce_registers(scn);
756
757 if (status)
758 HIF_ERROR("%s: Dump CE Registers Failed", __func__);
759
760 hif_dump_pci_registers(scn);
761
762 return 0;
763}
764
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800765/*
766 * Handler for a per-engine interrupt on a PARTICULAR CE.
767 * This is used in cases where each CE has a private
768 * MSI interrupt.
769 */
770static irqreturn_t ce_per_engine_handler(int irq, void *arg)
771{
772 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
773 int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
774
775 /*
776 * NOTE: We are able to derive CE_id from irq because we
777 * use a one-to-one mapping for CE's 0..5.
778 * CE's 6 & 7 do not use interrupts at all.
779 *
780 * This mapping must be kept in sync with the mapping
781 * used by firmware.
782 */
783
784 ce_per_engine_service(sc->ol_sc, CE_id);
785
786 return IRQ_HANDLED;
787}
788
789#ifdef CONFIG_SLUB_DEBUG_ON
790
791/* worker thread to schedule wlan_tasklet in SLUB debug build */
Komal Seelamaa72bb72016-02-01 17:22:50 +0530792static void reschedule_tasklet_work_handler(void *arg)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800793{
Komal Seelamaa72bb72016-02-01 17:22:50 +0530794 struct hif_pci_softc *sc = arg;
795 struct ol_softc *scn = sc->ol_sc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800796
Komal Seelamaa72bb72016-02-01 17:22:50 +0530797 if (!scn) {
798 HIF_ERROR("%s: ol_softc is NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800799 return;
800 }
801
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 if (scn->hif_init_done == false) {
803 HIF_ERROR("%s: wlan driver is unloaded", __func__);
804 return;
805 }
806
807 tasklet_schedule(&sc->intr_tq);
808 return;
809}
810
Komal Seelamaa72bb72016-02-01 17:22:50 +0530811/**
812 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
813 * work
814 * @sc: HIF PCI Context
815 *
816 * Return: void
817 */
818static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
819{
820 cdf_create_work(&sc->reschedule_tasklet_work,
821 reschedule_tasklet_work_handler, sc);
822}
823#else
824static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
825#endif /* CONFIG_SLUB_DEBUG_ON */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800826
827static void wlan_tasklet(unsigned long data)
828{
829 struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
830 struct ol_softc *scn = sc->ol_sc;
831
832 if (scn->hif_init_done == false)
833 goto end;
834
835 if (cdf_atomic_read(&scn->link_suspended))
836 goto end;
837
Houston Hoffman06bc4f52015-12-16 18:43:34 -0800838 if (!ADRASTEA_BU) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800839 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
840 if (sc->ol_sc->target_status == OL_TRGET_STATUS_RESET)
841 goto end;
842 }
843
844end:
845 cdf_atomic_set(&scn->tasklet_from_intr, 0);
846 cdf_atomic_dec(&scn->active_tasklet_cnt);
847}
848
Houston Hoffman62aa58d2015-11-02 21:14:55 -0800849#ifdef FEATURE_RUNTIME_PM
850#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
851 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
852
853/**
854 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
855 * @sc: hif_pci_softc context
856 * @msg: log message
857 *
858 * log runtime pm stats when something seems off.
859 *
860 * Return: void
861 */
862void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
863{
864 struct hif_pm_runtime_lock *ctx;
865
866 HIF_ERROR("%s: usage_count: %d, pm_state: %d, prevent_suspend_cnt: %d",
867 msg, atomic_read(&sc->dev->power.usage_count),
868 atomic_read(&sc->pm_state),
869 sc->prevent_suspend_cnt);
870
871 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
872 sc->dev->power.runtime_status,
873 sc->dev->power.runtime_error,
874 sc->dev->power.disable_depth,
875 sc->dev->power.autosuspend_delay);
876
877 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
878 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
879 sc->pm_stats.request_resume);
880
881 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
882 sc->pm_stats.allow_suspend,
883 sc->pm_stats.prevent_suspend);
884
885 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
886 sc->pm_stats.prevent_suspend_timeout,
887 sc->pm_stats.allow_suspend_timeout);
888
889 HIF_ERROR("Suspended: %u, resumed: %u count",
890 sc->pm_stats.suspended,
891 sc->pm_stats.resumed);
892
893 HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
894 sc->pm_stats.suspend_err,
895 sc->pm_stats.runtime_get_err);
896
897 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
898
899 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
900 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
901 }
902
903 WARN_ON(1);
904}
905
906/**
907 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
908 * @s: file to print to
909 * @data: unused
910 *
911 * debugging tool added to the debug fs for displaying runtimepm stats
912 *
913 * Return: 0
914 */
915static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
916{
917 struct hif_pci_softc *sc = s->private;
918 static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
919 "SUSPENDED"};
920 unsigned int msecs_age;
921 int pm_state = atomic_read(&sc->pm_state);
922 unsigned long timer_expires, flags;
923 struct hif_pm_runtime_lock *ctx;
924
925 seq_printf(s, "%30s: %s\n", "Runtime PM state",
926 autopm_state[pm_state]);
927 seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
928 sc->pm_stats.last_resume_caller);
929
930 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
931 msecs_age = jiffies_to_msecs(
932 jiffies - sc->pm_stats.suspend_jiffies);
933 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
934 msecs_age / 1000, msecs_age % 1000);
935 }
936
937 seq_printf(s, "%30s: %d\n", "PM Usage count",
938 atomic_read(&sc->dev->power.usage_count));
939
940 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
941 sc->prevent_suspend_cnt);
942
943 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
944 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
945 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
946 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
947 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
948 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
949 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
950 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
951 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
952 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
953 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
954
955 timer_expires = sc->runtime_timer_expires;
956 if (timer_expires > 0) {
957 msecs_age = jiffies_to_msecs(timer_expires - jiffies);
958 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
959 msecs_age / 1000, msecs_age % 1000);
960 }
961
962 spin_lock_irqsave(&sc->runtime_lock, flags);
963 if (list_empty(&sc->prevent_suspend_list)) {
964 spin_unlock_irqrestore(&sc->runtime_lock, flags);
965 return 0;
966 }
967
968 seq_printf(s, "%30s: ", "Active Wakeup_Sources");
969 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
970 seq_printf(s, "%s", ctx->name);
971 if (ctx->timeout)
972 seq_printf(s, "(%d ms)", ctx->timeout);
973 seq_puts(s, " ");
974 }
975 seq_puts(s, "\n");
976 spin_unlock_irqrestore(&sc->runtime_lock, flags);
977
978 return 0;
979}
980#undef HIF_PCI_RUNTIME_PM_STATS
981
982/**
983 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
984 * @inode
985 * @file
986 *
987 * Return: linux error code of single_open.
988 */
989static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
990{
991 return single_open(file, hif_pci_pm_runtime_debugfs_show,
992 inode->i_private);
993}
994
995#ifdef WLAN_OPEN_SOURCE
996static const struct file_operations hif_pci_runtime_pm_fops = {
997 .owner = THIS_MODULE,
998 .open = hif_pci_runtime_pm_open,
999 .release = single_release,
1000 .read = seq_read,
1001 .llseek = seq_lseek,
1002};
1003
1004/**
1005 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1006 * @sc: pci context
1007 *
1008 * creates a debugfs entry to debug the runtime pm feature.
1009 */
1010static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1011{
1012 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1013 S_IRUSR, NULL, sc,
1014 &hif_pci_runtime_pm_fops);
1015}
1016/**
1017 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1018 * @sc: pci context
1019 *
1020 * removes the debugfs entry to debug the runtime pm feature.
1021 */
1022static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1023{
1024 debugfs_remove(sc->pm_dentry);
1025}
1026#else
1027static inline void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1028{
1029}
1030static inline void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1031{
1032}
1033#endif
1034
Houston Hoffman9078a152015-11-02 16:15:02 -08001035static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001036
1037/**
1038 * hif_pm_runtime_start(): start the runtime pm
1039 * @sc: pci context
1040 *
1041 * After this call, runtime pm will be active.
1042 */
1043static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1044{
1045 struct ol_softc *ol_sc;
1046
1047 ol_sc = sc->ol_sc;
1048
1049 if (!ol_sc->enable_runtime_pm) {
1050 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1051 return;
1052 }
1053
1054 if (cds_get_conparam() == CDF_FTM_MODE ||
1055 WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1056 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1057 __func__);
1058 return;
1059 }
1060
1061 setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1062 (unsigned long)sc);
1063
1064 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1065 ol_sc->runtime_pm_delay);
1066
1067 cnss_runtime_init(sc->dev, ol_sc->runtime_pm_delay);
1068 cdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1069 hif_runtime_pm_debugfs_create(sc);
1070}
1071
1072/**
1073 * hif_pm_runtime_stop(): stop runtime pm
1074 * @sc: pci context
1075 *
1076 * Turns off runtime pm and frees corresponding resources
1077 * that were acquired by hif_runtime_pm_start().
1078 */
1079static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1080{
1081 struct ol_softc *ol_sc = sc->ol_sc;
1082
1083 if (!ol_sc->enable_runtime_pm)
1084 return;
1085
1086 if (cds_get_conparam() == CDF_FTM_MODE ||
1087 WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
1088 return;
1089
1090 cnss_runtime_exit(sc->dev);
1091 cnss_pm_runtime_request(sc->dev, CNSS_PM_RUNTIME_RESUME);
1092
1093 cdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1094
1095 hif_runtime_pm_debugfs_remove(sc);
1096 del_timer_sync(&sc->runtime_timer);
1097 /* doesn't wait for penting trafic unlike cld-2.0 */
1098}
1099
1100/**
1101 * hif_pm_runtime_open(): initialize runtime pm
1102 * @sc: pci data structure
1103 *
1104 * Early initialization
1105 */
1106static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1107{
1108 spin_lock_init(&sc->runtime_lock);
1109
1110 cdf_atomic_init(&sc->pm_state);
Houston Hoffmancceec342015-11-11 11:37:20 -08001111 sc->prevent_linkdown_lock =
1112 hif_runtime_lock_init("linkdown suspend disabled");
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001113 cdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1114 INIT_LIST_HEAD(&sc->prevent_suspend_list);
1115}
1116
1117/**
1118 * hif_pm_runtime_close(): close runtime pm
1119 * @sc: pci bus handle
1120 *
1121 * ensure runtime_pm is stopped before closing the driver
1122 */
1123static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1124{
1125 if (cdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1126 return;
1127 else
1128 hif_pm_runtime_stop(sc);
1129}
1130
1131
1132#else
1133
1134static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1135static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1136static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
Houston Hoffman53b34c42015-11-18 15:51:32 -08001137static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001138#endif
1139
1140/**
1141 * hif_enable_power_management(): enable power management
1142 * @hif_ctx: hif context
1143 *
1144 * Currently only does runtime pm. Eventually this function could
1145 * consolidate other power state features such as only letting
1146 * the soc sleep after the driver finishes loading and re-enabling
1147 * aspm (hif_enable_power_gating).
1148 */
1149void hif_enable_power_management(void *hif_ctx)
1150{
1151 struct hif_pci_softc *pci_ctx;
1152
1153 if (hif_ctx == NULL) {
1154 HIF_ERROR("%s, hif_ctx null", __func__);
1155 return;
1156 }
1157
1158 pci_ctx = ((struct ol_softc *)hif_ctx)->hif_sc;
1159
1160 hif_pm_runtime_start(pci_ctx);
1161}
1162
Houston Hoffman53b34c42015-11-18 15:51:32 -08001163/**
1164 * hif_disable_power_management(): disable power management
1165 * @hif_ctx: hif context
1166 *
1167 * Currently disables runtime pm. Should be updated to behave
1168 * if runtime pm is not started. Should be updated to take care
1169 * of aspm and soc sleep for driver load.
1170 */
1171void hif_disable_power_management(void *hif_ctx)
1172{
1173 struct hif_pci_softc *pci_ctx;
1174
1175 if (hif_ctx == NULL) {
1176 HIF_ERROR("%s, hif_ctx null", __func__);
1177 return;
1178 }
1179
1180 pci_ctx = ((struct ol_softc *)hif_ctx)->hif_sc;
1181
1182 hif_pm_runtime_stop(pci_ctx);
1183}
1184
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001185#define ATH_PCI_PROBE_RETRY_MAX 3
1186/**
1187 * hif_bus_open(): hif_bus_open
1188 * @scn: scn
1189 * @bus_type: bus type
1190 *
1191 * Return: n/a
1192 */
1193CDF_STATUS hif_bus_open(struct ol_softc *ol_sc, enum ath_hal_bus_type bus_type)
1194{
1195 struct hif_pci_softc *sc;
1196
1197 sc = cdf_mem_malloc(sizeof(*sc));
1198 if (!sc) {
1199 HIF_ERROR("%s: no mem", __func__);
1200 return CDF_STATUS_E_NOMEM;
1201 }
1202 ol_sc->hif_sc = (void *)sc;
1203 sc->ol_sc = ol_sc;
1204 ol_sc->bus_type = bus_type;
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001205 hif_pm_runtime_open(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001206
Komal Seelamaa72bb72016-02-01 17:22:50 +05301207 cdf_spinlock_init(&sc->irq_lock);
Houston Hoffman8a13e5c2015-10-29 16:12:09 -07001208
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001209 return CDF_STATUS_SUCCESS;
1210}
1211
1212/**
1213 * hif_bus_close(): hif_bus_close
1214 *
1215 * Return: n/a
1216 */
1217void hif_bus_close(struct ol_softc *ol_sc)
1218{
1219 struct hif_pci_softc *sc;
1220
1221 if (ol_sc == NULL) {
1222 HIF_ERROR("%s: ol_softc is NULL", __func__);
1223 return;
1224 }
1225 sc = ol_sc->hif_sc;
1226 if (sc == NULL)
1227 return;
Houston Hoffman62aa58d2015-11-02 21:14:55 -08001228
1229 hif_pm_runtime_close(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001230 cdf_mem_free(sc);
1231 ol_sc->hif_sc = NULL;
1232}
1233
1234#define BAR_NUM 0
1235
1236int hif_enable_pci(struct hif_pci_softc *sc,
1237 struct pci_dev *pdev,
1238 const struct pci_device_id *id)
1239{
1240 void __iomem *mem;
1241 int ret = 0;
1242 uint16_t device_id;
1243 struct ol_softc *ol_sc = sc->ol_sc;
1244
1245 pci_read_config_word(pdev,PCI_DEVICE_ID,&device_id);
1246 if(device_id != id->device) {
1247 HIF_ERROR(
1248 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
1249 __func__, device_id, id->device);
1250 /* pci link is down, so returing with error code */
1251 return -EIO;
1252 }
1253
1254 /* FIXME: temp. commenting out assign_resource
1255 * call for dev_attach to work on 2.6.38 kernel
1256 */
Amar Singhal901e33f2015-10-08 11:55:32 -07001257#if (!defined(__LINUX_ARM_ARCH__))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001258 if (pci_assign_resource(pdev, BAR_NUM)) {
1259 HIF_ERROR("%s: pci_assign_resource error", __func__);
1260 return -EIO;
1261 }
1262#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001263 if (pci_enable_device(pdev)) {
1264 HIF_ERROR("%s: pci_enable_device error",
1265 __func__);
1266 return -EIO;
1267 }
1268
1269 /* Request MMIO resources */
1270 ret = pci_request_region(pdev, BAR_NUM, "ath");
1271 if (ret) {
1272 HIF_ERROR("%s: PCI MMIO reservation error", __func__);
1273 ret = -EIO;
1274 goto err_region;
1275 }
1276#ifdef CONFIG_ARM_LPAE
1277 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1278 * for 32 bits device also. */
1279 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1280 if (ret) {
1281 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
1282 goto err_dma;
1283 }
1284 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1285 if (ret) {
1286 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
1287 goto err_dma;
1288 }
1289#else
1290 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1291 if (ret) {
1292 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
1293 goto err_dma;
1294 }
1295 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1296 if (ret) {
1297 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
1298 __func__);
1299 goto err_dma;
1300 }
1301#endif
1302
1303 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1304
1305 /* Set bus master bit in PCI_COMMAND to enable DMA */
1306 pci_set_master(pdev);
1307
1308 /* Arrange for access to Target SoC registers. */
1309 mem = pci_iomap(pdev, BAR_NUM, 0);
1310 if (!mem) {
1311 HIF_ERROR("%s: PCI iomap error", __func__);
1312 ret = -EIO;
1313 goto err_iomap;
1314 }
1315 sc->mem = mem;
1316 sc->pdev = pdev;
1317 sc->dev = &pdev->dev;
1318 ol_sc->aps_osdev.bdev = pdev;
1319 ol_sc->aps_osdev.device = &pdev->dev;
1320 ol_sc->aps_osdev.bc.bc_handle = (void *)mem;
1321 ol_sc->aps_osdev.bc.bc_bustype = HAL_BUS_TYPE_PCI;
1322 sc->devid = id->device;
1323 sc->cacheline_sz = dma_get_cache_alignment();
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001324 ol_sc->mem = mem;
1325 sc->pci_enabled = true;
1326 return ret;
1327
1328err_iomap:
1329 pci_clear_master(pdev);
1330err_dma:
1331 pci_release_region(pdev, BAR_NUM);
1332err_region:
1333 pci_disable_device(pdev);
1334 return ret;
1335}
1336
1337void hif_disable_pci(struct hif_pci_softc *sc)
1338{
1339 struct ol_softc *ol_sc;
1340
1341 if (!sc)
1342 return;
1343
1344 ol_sc = sc->ol_sc;
1345 if (ol_sc == NULL) {
1346 HIF_ERROR("%s: ol_sc = NULL", __func__);
1347 return;
1348 }
1349 pci_set_drvdata(sc->pdev, NULL);
1350 hif_pci_device_reset(sc);
1351 pci_iounmap(sc->pdev, sc->mem);
1352 sc->mem = NULL;
1353 ol_sc->mem = NULL;
1354 pci_clear_master(sc->pdev);
1355 pci_release_region(sc->pdev, BAR_NUM);
1356 pci_disable_device(sc->pdev);
1357}
1358
1359int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1360{
1361 int ret = 0;
1362 int targ_awake_limit = 500;
1363#ifndef QCA_WIFI_3_0
1364 uint32_t fw_indicator;
1365#endif
1366 struct ol_softc *scn = sc->ol_sc;
1367 /*
1368 * Verify that the Target was started cleanly.*
1369 * The case where this is most likely is with an AUX-powered
1370 * Target and a Host in WoW mode. If the Host crashes,
1371 * loses power, or is restarted (without unloading the driver)
1372 * then the Target is left (aux) powered and running. On a
1373 * subsequent driver load, the Target is in an unexpected state.
1374 * We try to catch that here in order to reset the Target and
1375 * retry the probe.
1376 */
1377 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1378 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1379 while (!hif_targ_is_awake(scn, sc->mem)) {
1380 if (0 == targ_awake_limit) {
1381 HIF_ERROR("%s: target awake timeout", __func__);
1382 ret = -EAGAIN;
1383 goto end;
1384 }
1385 cdf_mdelay(1);
1386 targ_awake_limit--;
1387 }
1388
1389#if PCIE_BAR0_READY_CHECKING
1390 {
1391 int wait_limit = 200;
1392 /* Synchronization point: wait the BAR0 is configured */
1393 while (wait_limit-- &&
1394 !(hif_read32_mb(sc->mem +
1395 PCIE_LOCAL_BASE_ADDRESS +
1396 PCIE_SOC_RDY_STATUS_ADDRESS) \
1397 & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
1398 cdf_mdelay(10);
1399 }
1400 if (wait_limit < 0) {
1401 /* AR6320v1 doesn't support checking of BAR0 configuration,
1402 takes one sec to wait BAR0 ready */
1403 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
1404 __func__);
1405 }
1406 }
1407#endif
1408
1409#ifndef QCA_WIFI_3_0
1410 fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
1411 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1412 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
1413
1414 if (fw_indicator & FW_IND_INITIALIZED) {
1415 HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
1416 __func__);
1417 ret = -EAGAIN;
1418 goto end;
1419 }
1420#endif
1421
1422end:
1423 return ret;
1424}
1425
1426static void wlan_tasklet_msi(unsigned long data)
1427{
1428 struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
1429 struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
1430 struct ol_softc *scn = sc->ol_sc;
1431
1432 if (sc->ol_sc->hif_init_done == false)
1433 goto irq_handled;
1434
1435 if (cdf_atomic_read(&sc->ol_sc->link_suspended))
1436 goto irq_handled;
1437
1438 cdf_atomic_inc(&scn->active_tasklet_cnt);
1439
1440 if (entry->id == HIF_MAX_TASKLET_NUM) {
1441 /* the last tasklet is for fw IRQ */
1442 (irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, sc->ol_sc);
1443 if (sc->ol_sc->target_status == OL_TRGET_STATUS_RESET)
1444 goto irq_handled;
1445 } else if (entry->id < sc->ol_sc->ce_count) {
1446 ce_per_engine_service(sc->ol_sc, entry->id);
1447 } else {
1448 HIF_ERROR("%s: ERROR - invalid CE_id = %d",
1449 __func__, entry->id);
1450 }
1451 return;
1452
1453irq_handled:
1454 cdf_atomic_dec(&scn->active_tasklet_cnt);
1455
1456}
1457
1458int hif_configure_msi(struct hif_pci_softc *sc)
1459{
1460 int ret = 0;
1461 int num_msi_desired;
1462 int rv = -1;
1463 struct ol_softc *scn = sc->ol_sc;
1464
1465 HIF_TRACE("%s: E", __func__);
1466
1467 num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
1468 if (num_msi_desired < 1) {
1469 HIF_ERROR("%s: MSI is not configured", __func__);
1470 return -EINVAL;
1471 }
1472
1473 if (num_msi_desired > 1) {
1474#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1475 rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
1476 num_msi_desired);
1477#else
1478 rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
1479#endif
1480 }
1481 HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
1482 __func__, num_msi_desired, rv);
1483
1484 if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
1485 int i;
1486
1487 sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
1488 sc->tasklet_entries[HIF_MAX_TASKLET_NUM -1].hif_handler =
1489 (void *)sc;
1490 sc->tasklet_entries[HIF_MAX_TASKLET_NUM -1].id =
1491 HIF_MAX_TASKLET_NUM;
1492 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
1493 (unsigned long)&sc->tasklet_entries[
1494 HIF_MAX_TASKLET_NUM -1]);
1495 ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
1496 hif_pci_msi_fw_handler,
1497 IRQF_SHARED, "wlan_pci", sc);
1498 if(ret) {
1499 HIF_ERROR("%s: request_irq failed", __func__);
1500 goto err_intr;
1501 }
1502 for (i = 0; i <= scn->ce_count; i++) {
1503 sc->tasklet_entries[i].hif_handler = (void *)sc;
1504 sc->tasklet_entries[i].id = i;
1505 tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
1506 (unsigned long)&sc->tasklet_entries[i]);
1507 ret = request_irq((sc->pdev->irq +
1508 i + MSI_ASSIGN_CE_INITIAL),
1509 ce_per_engine_handler, IRQF_SHARED,
1510 "wlan_pci", sc);
1511 if(ret) {
1512 HIF_ERROR("%s: request_irq failed", __func__);
1513 goto err_intr;
1514 }
1515 }
1516 } else if (rv > 0) {
1517 HIF_TRACE("%s: use single msi", __func__);
1518
1519 if ((ret = pci_enable_msi(sc->pdev)) < 0) {
1520 HIF_ERROR("%s: single MSI allocation failed",
1521 __func__);
1522 /* Try for legacy PCI line interrupts */
1523 sc->num_msi_intrs = 0;
1524 } else {
1525 sc->num_msi_intrs = 1;
1526 tasklet_init(&sc->intr_tq,
1527 wlan_tasklet, (unsigned long)sc);
1528 ret = request_irq(sc->pdev->irq,
1529 hif_pci_interrupt_handler,
1530 IRQF_SHARED, "wlan_pci", sc);
1531 if(ret) {
1532 HIF_ERROR("%s: request_irq failed", __func__);
1533 goto err_intr;
1534 }
1535 }
1536 } else {
1537 sc->num_msi_intrs = 0;
1538 ret = -EIO;
1539 HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
1540 }
1541 if ((ret = pci_enable_msi(sc->pdev)) < 0) {
1542 HIF_ERROR("%s: single MSI interrupt allocation failed",
1543 __func__);
1544 /* Try for legacy PCI line interrupts */
1545 sc->num_msi_intrs = 0;
1546 } else {
1547 sc->num_msi_intrs = 1;
1548 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
1549 ret = request_irq(sc->pdev->irq,
1550 hif_pci_interrupt_handler, IRQF_SHARED,
1551 "wlan_pci", sc);
1552 if(ret) {
1553 HIF_ERROR("%s: request_irq failed", __func__);
1554 goto err_intr;
1555 }
1556 }
1557
1558 if (ret == 0) {
1559 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
1560 PCIE_INTR_ENABLE_ADDRESS),
1561 HOST_GROUP0_MASK);
1562 hif_write32_mb(sc->mem +
1563 PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
1564 PCIE_SOC_WAKE_RESET);
1565 }
1566 HIF_TRACE("%s: X, ret = %d", __func__, ret);
1567
1568 return ret;
1569
1570err_intr:
1571if (sc->num_msi_intrs >= 1)
1572 pci_disable_msi(sc->pdev);
1573 return ret;
1574}
1575
1576static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
1577{
1578 int ret = 0;
1579 struct ol_softc *scn = sc->ol_sc;
1580
1581 HIF_TRACE("%s: E", __func__);
1582
1583 /* do notn support MSI or MSI IRQ failed */
1584 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
1585 ret = request_irq(sc->pdev->irq,
1586 hif_pci_interrupt_handler, IRQF_SHARED,
1587 "wlan_pci", sc);
1588 if(ret) {
1589 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
1590 goto end;
1591 }
1592 /* Use Legacy PCI Interrupts */
1593 hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
1594 PCIE_INTR_ENABLE_ADDRESS),
1595 HOST_GROUP0_MASK);
1596 hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1597 PCIE_SOC_WAKE_ADDRESS,
1598 PCIE_SOC_WAKE_RESET);
1599end:
1600 CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_ERROR,
1601 "%s: X, ret = %d", __func__, ret);
1602 return ret;
1603}
1604
1605/**
1606 * hif_nointrs(): disable IRQ
1607 *
1608 * This function stops interrupt(s)
1609 *
1610 * @scn: struct ol_softc
1611 *
1612 * Return: none
1613 */
1614void hif_nointrs(struct ol_softc *scn)
1615{
1616 int i;
1617 struct hif_pci_softc *sc = scn->hif_sc;
1618
1619 if (scn->request_irq_done == false)
1620 return;
1621 if (sc->num_msi_intrs > 0) {
1622 /* MSI interrupt(s) */
1623 for (i = 0; i < sc->num_msi_intrs; i++) {
1624 free_irq(sc->pdev->irq + i, sc);
1625 }
1626 sc->num_msi_intrs = 0;
1627 } else {
1628 /* Legacy PCI line interrupt */
1629 free_irq(sc->pdev->irq, sc);
1630 }
1631 ce_unregister_irq(scn->hif_hdl, 0xfff);
1632 scn->request_irq_done = false;
1633}
1634
1635/**
1636 * hif_disable_bus(): hif_disable_bus
1637 *
1638 * This function disables the bus
1639 *
1640 * @bdev: bus dev
1641 *
1642 * Return: none
1643 */
1644void hif_disable_bus(void *bdev)
1645{
1646 struct pci_dev *pdev = bdev;
1647 struct hif_pci_softc *sc = pci_get_drvdata(pdev);
1648 struct ol_softc *scn;
1649 void __iomem *mem;
1650
1651 /* Attach did not succeed, all resources have been
1652 * freed in error handler
1653 */
1654 if (!sc)
1655 return;
1656
1657 scn = sc->ol_sc;
1658
1659 if (ADRASTEA_BU) {
1660 hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
1661 hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
1662 HOST_GROUP0_MASK);
1663 }
1664
1665 mem = (void __iomem *)sc->mem;
1666 if (mem) {
1667 pci_disable_msi(pdev);
1668 hif_dump_pipe_debug_count(scn);
1669 hif_deinit_cdf_ctx(scn);
1670 if (scn->athdiag_procfs_inited) {
1671 athdiag_procfs_remove();
1672 scn->athdiag_procfs_inited = false;
1673 }
1674 pci_set_drvdata(pdev, NULL);
1675 pci_iounmap(pdev, mem);
1676 scn->mem = NULL;
1677 pci_release_region(pdev, BAR_NUM);
1678 pci_clear_master(pdev);
1679 pci_disable_device(pdev);
1680 }
1681 HIF_INFO("%s: X", __func__);
1682}
1683
1684#define OL_ATH_PCI_PM_CONTROL 0x44
1685
Ryan Hsu0f6d3302016-01-21 16:21:17 -08001686#ifdef FEATURE_RUNTIME_PM
Houston Hoffmancceec342015-11-11 11:37:20 -08001687/**
1688 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
1689 * @scn: hif context
1690 * @flag: prevent linkdown if true otherwise allow
1691 *
1692 * this api should only be called as part of bus prevent linkdown
1693 */
1694static void hif_runtime_prevent_linkdown(struct ol_softc *scn, bool flag)
1695{
1696 struct hif_pci_softc *sc = scn->hif_sc;
1697
1698 if (flag)
1699 hif_pm_runtime_prevent_suspend(scn, sc->prevent_linkdown_lock);
1700 else
1701 hif_pm_runtime_allow_suspend(scn, sc->prevent_linkdown_lock);
1702}
1703#else
1704static void hif_runtime_prevent_linkdown(struct ol_softc *scn, bool flag)
1705{
1706}
1707#endif
1708
Ryan Hsu0f6d3302016-01-21 16:21:17 -08001709#if defined(CONFIG_CNSS) && defined(CONFIG_PCI_MSM)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001710/**
1711 * hif_bus_prevent_linkdown(): allow or permit linkdown
1712 * @flag: true prevents linkdown, false allows
1713 *
1714 * Calls into the platform driver to vote against taking down the
1715 * pcie link.
1716 *
1717 * Return: n/a
1718 */
Houston Hoffmancceec342015-11-11 11:37:20 -08001719void hif_bus_prevent_linkdown(struct ol_softc *scn, bool flag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720{
1721 HIF_ERROR("wlan: %s pcie power collapse",
1722 (flag ? "disable" : "enable"));
Houston Hoffmancceec342015-11-11 11:37:20 -08001723 hif_runtime_prevent_linkdown(scn, flag);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724 cnss_wlan_pm_control(flag);
1725}
Ryan Hsu0f6d3302016-01-21 16:21:17 -08001726#else
1727void hif_bus_prevent_linkdown(struct ol_softc *scn, bool flag)
1728{
1729 HIF_ERROR("wlan: %s pcie power collapse",
1730 (flag ? "disable" : "enable"));
1731 hif_runtime_prevent_linkdown(scn, flag);
1732}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001733#endif
1734
1735/**
1736 * hif_drain_tasklets(): wait untill no tasklet is pending
1737 * @scn: hif context
1738 *
1739 * Let running tasklets clear pending trafic.
1740 *
1741 * Return: 0 if no bottom half is in progress when it returns.
1742 * -EFAULT if it times out.
1743 */
1744static inline int hif_drain_tasklets(struct ol_softc *scn)
1745{
1746 uint32_t ce_drain_wait_cnt = 0;
1747
1748 while (cdf_atomic_read(&scn->active_tasklet_cnt)) {
1749 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
1750 HIF_ERROR("%s: CE still not done with access",
1751 __func__);
1752
1753 return -EFAULT;
1754 }
1755 HIF_INFO("%s: Waiting for CE to finish access", __func__);
1756 msleep(10);
1757 }
1758 return 0;
1759}
1760
1761/**
1762 * hif_bus_suspend_link_up() - suspend the bus
1763 *
1764 * Configures the pci irq line as a wakeup source.
1765 *
1766 * Return: 0 for success and non-zero for failure
1767 */
1768static int hif_bus_suspend_link_up(void)
1769{
1770 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
1771 struct pci_dev *pdev;
1772 int status;
1773
1774 if (!scn)
1775 return -EFAULT;
1776
1777 pdev = scn->aps_osdev.bdev;
1778
1779 status = hif_drain_tasklets(scn);
1780 if (status != 0)
1781 return status;
1782
1783 if (unlikely(enable_irq_wake(pdev->irq))) {
1784 HIF_ERROR("%s: Fail to enable wake IRQ!", __func__);
1785 return -EINVAL;
1786 }
1787
1788 return 0;
1789}
1790
1791/**
1792 * hif_bus_resume_link_up() - hif bus resume API
1793 *
1794 * This function disables the wakeup source.
1795 *
1796 * Return: 0 for success and non-zero for failure
1797 */
1798static int hif_bus_resume_link_up(void)
1799{
1800 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
1801 struct pci_dev *pdev;
1802
1803 if (!scn)
1804 return -EFAULT;
1805
1806 pdev = scn->aps_osdev.bdev;
1807 if (!pdev) {
1808 HIF_ERROR("%s: pci_dev is null", __func__);
1809 return -EFAULT;
1810 }
1811
1812 if (unlikely(disable_irq_wake(pdev->irq))) {
1813 HIF_ERROR("%s: Fail to disable wake IRQ!", __func__);
1814 return -EFAULT;
1815 }
1816
1817 return 0;
1818}
1819
1820/**
1821 * hif_bus_suspend_link_down() - suspend the bus
1822 *
1823 * Suspends the hif layer taking care of draining recieve queues and
1824 * shutting down copy engines if needed. Ensures opy engine interrupts
1825 * are disabled when it returns. Prevents register access after it
1826 * returns.
1827 *
1828 * Return: 0 for success and non-zero for failure
1829 */
1830static int hif_bus_suspend_link_down(void)
1831{
1832 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
1833 struct pci_dev *pdev;
1834 struct HIF_CE_state *hif_state;
1835 int status = 0;
1836
1837 if (!scn)
1838 return -EFAULT;
1839
1840 pdev = scn->aps_osdev.bdev;
1841
1842 hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1843 if (!hif_state) {
1844 HIF_ERROR("%s: hif_state is null", __func__);
1845 return -EFAULT;
1846 }
1847
1848 disable_irq(pdev->irq);
1849
1850 status = hif_drain_tasklets(scn);
1851 if (status != 0) {
1852 enable_irq(pdev->irq);
1853 return status;
1854 }
1855
1856 /* Stop the HIF Sleep Timer */
1857 hif_cancel_deferred_target_sleep(scn);
1858
1859 cdf_atomic_set(&scn->link_suspended, 1);
1860
1861 return 0;
1862}
1863
1864/**
1865 * hif_bus_resume_link_down() - hif bus resume API
1866 *
1867 * This function resumes the bus reenabling interupts.
1868 *
1869 * Return: 0 for success and non-zero for failure
1870 */
1871static int hif_bus_resume_link_down(void)
1872{
1873 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
1874 struct pci_dev *pdev;
1875
1876 if (!scn)
1877 return -EFAULT;
1878
1879 pdev = scn->aps_osdev.bdev;
1880 if (!pdev) {
1881 HIF_ERROR("%s: pci_dev is null", __func__);
1882 return -EFAULT;
1883 }
1884
1885 cdf_atomic_set(&scn->link_suspended, 0);
1886
1887 enable_irq(pdev->irq);
1888
1889 return 0;
1890}
1891
1892/**
1893 * hif_bus_suspend(): prepare hif for suspend
Houston Hoffman1688fba2015-11-10 16:47:27 -08001894 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001895 * chose suspend type based on link suspend voting.
1896 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08001897 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898 */
1899int hif_bus_suspend(void)
1900{
1901 if (hif_can_suspend_link())
1902 return hif_bus_suspend_link_down();
1903 else
1904 return hif_bus_suspend_link_up();
1905}
1906
1907/**
Houston Hoffman1688fba2015-11-10 16:47:27 -08001908 * hif_bus_resume(): prepare hif for resume
1909 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001910 * chose suspend type based on link suspend voting.
1911 *
Houston Hoffman1688fba2015-11-10 16:47:27 -08001912 * Return: 0 for success and non-zero error code for failure
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001913 */
1914int hif_bus_resume(void)
1915{
1916 if (hif_can_suspend_link())
1917 return hif_bus_resume_link_down();
1918 else
1919 return hif_bus_resume_link_up();
1920}
1921
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08001922#ifdef FEATURE_RUNTIME_PM
1923/**
1924 * __hif_runtime_pm_set_state(): utility function
1925 * @state: state to set
1926 *
1927 * indexes into the runtime pm state and sets it.
1928 */
1929static void __hif_runtime_pm_set_state(enum hif_pm_runtime_state state)
1930{
1931 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
1932 struct hif_pci_softc *sc;
1933
1934 if (NULL == scn) {
1935 HIF_ERROR("%s: HIF_CTX not initialized",
1936 __func__);
1937 return;
1938 }
1939
1940 sc = scn->hif_sc;
1941 cdf_atomic_set(&sc->pm_state, state);
1942
1943}
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08001944#endif
1945
Houston Hoffman78467a82016-01-05 20:08:56 -08001946#ifdef FEATURE_RUNTIME_PM
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08001947/**
1948 * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
1949 *
1950 * Notify hif that a runtime pm opperation has started
1951 */
Houston Hoffman78467a82016-01-05 20:08:56 -08001952static void hif_runtime_pm_set_state_inprogress(void)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08001953{
1954 __hif_runtime_pm_set_state(HIF_PM_RUNTIME_STATE_INPROGRESS);
1955}
1956
1957/**
1958 * hif_runtime_pm_set_state_on(): adjust runtime pm state
1959 *
1960 * Notify hif that a the runtime pm state should be on
1961 */
Houston Hoffman78467a82016-01-05 20:08:56 -08001962static void hif_runtime_pm_set_state_on(void)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08001963{
1964 __hif_runtime_pm_set_state(HIF_PM_RUNTIME_STATE_ON);
1965}
1966
1967/**
1968 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
1969 *
1970 * Notify hif that a runtime suspend attempt has been completed successfully
1971 */
Houston Hoffman78467a82016-01-05 20:08:56 -08001972static void hif_runtime_pm_set_state_suspended(void)
Houston Hoffmanf2ff37a2015-11-03 11:33:36 -08001973{
1974 __hif_runtime_pm_set_state(HIF_PM_RUNTIME_STATE_SUSPENDED);
1975}
1976
Houston Hoffman692cc052015-11-10 18:42:47 -08001977static inline struct hif_pci_softc *get_sc(void)
1978{
1979 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
1980
1981 if (NULL == scn) {
1982 HIF_ERROR("%s: Could not disable ASPM scn is null",
1983 __func__);
1984 return NULL;
1985 }
1986
1987 return scn->hif_sc;
1988}
1989
1990/**
1991 * hif_log_runtime_suspend_success() - log a successful runtime suspend
1992 */
Houston Hoffman78467a82016-01-05 20:08:56 -08001993static void hif_log_runtime_suspend_success(void)
Houston Hoffman692cc052015-11-10 18:42:47 -08001994{
1995 struct hif_pci_softc *sc = get_sc();
1996 if (sc == NULL)
1997 return;
1998
1999 sc->pm_stats.suspended++;
2000 sc->pm_stats.suspend_jiffies = jiffies;
2001}
2002
2003/**
2004 * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2005 *
2006 * log a failed runtime suspend
2007 * mark last busy to prevent immediate runtime suspend
2008 */
Houston Hoffman78467a82016-01-05 20:08:56 -08002009static void hif_log_runtime_suspend_failure(void)
Houston Hoffman692cc052015-11-10 18:42:47 -08002010{
2011 struct hif_pci_softc *sc = get_sc();
2012 if (sc == NULL)
2013 return;
2014
2015 sc->pm_stats.suspend_err++;
Houston Hoffman692cc052015-11-10 18:42:47 -08002016}
2017
2018/**
2019 * hif_log_runtime_resume_success() - log a successful runtime resume
2020 *
2021 * log a successfull runtime resume
2022 * mark last busy to prevent immediate runtime suspend
2023 */
Houston Hoffman78467a82016-01-05 20:08:56 -08002024static void hif_log_runtime_resume_success(void)
Houston Hoffman692cc052015-11-10 18:42:47 -08002025{
2026 struct hif_pci_softc *sc = get_sc();
2027 if (sc == NULL)
2028 return;
2029
2030 sc->pm_stats.resumed++;
Houston Hoffman78467a82016-01-05 20:08:56 -08002031}
2032
2033/**
2034 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2035 *
2036 * Record the failure.
2037 * mark last busy to delay a retry.
2038 * adjust the runtime_pm state.
2039 */
2040void hif_process_runtime_suspend_failure(void)
2041{
2042 struct hif_pci_softc *sc = get_sc();
2043
2044 hif_log_runtime_suspend_failure();
2045 if (sc != NULL)
2046 hif_pm_runtime_mark_last_busy(sc->dev);
2047 hif_runtime_pm_set_state_on();
2048}
2049
2050/**
2051 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2052 *
2053 * Makes sure that the pci link will be taken down by the suspend opperation.
2054 * If the hif layer is configured to leave the bus on, runtime suspend will
2055 * not save any power.
2056 *
2057 * Set the runtime suspend state to in progress.
2058 *
2059 * return -EINVAL if the bus won't go down. otherwise return 0
2060 */
2061int hif_pre_runtime_suspend(void)
2062{
2063 if (!hif_can_suspend_link()) {
2064 HIF_ERROR("Runtime PM not supported for link up suspend");
2065 return -EINVAL;
2066 }
2067
2068 hif_runtime_pm_set_state_inprogress();
2069 return 0;
2070}
2071
2072/**
2073 * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2074 *
2075 * Record the success.
2076 * adjust the runtime_pm state
2077 */
2078void hif_process_runtime_suspend_success(void)
2079{
2080 hif_runtime_pm_set_state_suspended();
2081 hif_log_runtime_suspend_success();
2082}
2083
2084/**
2085 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2086 *
2087 * update the runtime pm state.
2088 */
2089void hif_pre_runtime_resume(void)
2090{
2091 hif_runtime_pm_set_state_inprogress();
2092}
2093
2094/**
2095 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2096 *
2097 * record the success.
2098 * adjust the runtime_pm state
2099 */
2100void hif_process_runtime_resume_success(void)
2101{
2102 struct hif_pci_softc *sc = get_sc();
2103
2104 hif_log_runtime_resume_success();
2105 if (sc != NULL)
2106 hif_pm_runtime_mark_last_busy(sc->dev);
2107 hif_runtime_pm_set_state_on();
Houston Hoffman692cc052015-11-10 18:42:47 -08002108}
2109#endif
2110
Houston Hoffman1688fba2015-11-10 16:47:27 -08002111/**
2112 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2113 *
2114 * Return: 0 for success and non-zero error code for failure
2115 */
2116int hif_runtime_suspend(void)
2117{
2118 return hif_bus_suspend();
2119}
2120
Houston Hoffmanf4607852015-12-17 17:14:40 -08002121#ifdef WLAN_FEATURE_FASTPATH
2122/**
2123 * hif_fastpath_resume() - resume fastpath for runtimepm
2124 *
2125 * ensure that the fastpath write index register is up to date
2126 * since runtime pm may cause ce_send_fast to skip the register
2127 * write.
2128 */
2129static void hif_fastpath_resume(void)
2130{
2131 struct ol_softc *scn =
2132 (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
2133 struct CE_state *ce_state;
2134
2135 if (!scn)
2136 return;
2137
2138 if (scn->fastpath_mode_on) {
2139 if (Q_TARGET_ACCESS_BEGIN(scn)) {
2140 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2141 cdf_spin_lock_bh(&ce_state->ce_index_lock);
2142
2143 /*war_ce_src_ring_write_idx_set */
2144 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2145 ce_state->src_ring->write_index);
2146 cdf_spin_unlock_bh(&ce_state->ce_index_lock);
2147 Q_TARGET_ACCESS_END(scn);
2148 }
2149 }
2150}
2151#else
2152static void hif_fastpath_resume(void) {}
2153#endif
2154
2155
Houston Hoffman1688fba2015-11-10 16:47:27 -08002156/**
2157 * hif_runtime_resume() - do the bus resume part of a runtime resume
2158 *
2159 * Return: 0 for success and non-zero error code for failure
2160 */
2161int hif_runtime_resume(void)
2162{
Houston Hoffmanf4607852015-12-17 17:14:40 -08002163 int status = hif_bus_resume();
2164
2165 hif_fastpath_resume();
2166
2167 return status;
Houston Hoffman1688fba2015-11-10 16:47:27 -08002168}
2169
Komal Seelamaa72bb72016-02-01 17:22:50 +05302170#if CONFIG_PCIE_64BIT_MSI
2171static void hif_free_msi_ctx(struct ol_softc *scn)
2172{
2173 struct hif_pci_softc *sc = scn->hif_sc;
2174 struct hif_msi_info *info = &sc->msi_info;
2175
2176 OS_FREE_CONSISTENT(&scn->aps_osdev, 4,
2177 info->magic, info->magic_dma,
2178 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2179 info->magic = NULL;
2180 info->magic_dma = 0;
2181}
2182#else
2183static void hif_free_msi_ctx(struct ol_softc *scn)
2184{
2185}
2186#endif
2187
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002188void hif_disable_isr(void *ol_sc)
2189{
2190 struct ol_softc *scn = (struct ol_softc *)ol_sc;
2191 struct hif_pci_softc *sc = scn->hif_sc;
2192
2193 hif_nointrs(ol_sc);
Komal Seelamaa72bb72016-02-01 17:22:50 +05302194 hif_free_msi_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002195 /* Cancel the pending tasklet */
2196 ce_tasklet_kill(scn->hif_hdl);
2197 tasklet_kill(&sc->intr_tq);
2198 cdf_atomic_set(&scn->active_tasklet_cnt, 0);
2199}
2200
2201/* Function to reset SoC */
2202void hif_reset_soc(void *ol_sc)
2203{
2204 struct ol_softc *scn = (struct ol_softc *)ol_sc;
2205 struct hif_pci_softc *sc = scn->hif_sc;
Komal Seelam91553ce2016-01-27 18:57:10 +05302206 struct hif_target_info *tgt_info = hif_get_target_info_handle(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207
2208#if defined(CPU_WARM_RESET_WAR)
2209 /* Currently CPU warm reset sequence is tested only for AR9888_REV2
2210 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2211 * verified for AR9888_REV1
2212 */
Komal Seelam91553ce2016-01-27 18:57:10 +05302213 if (tgt_info->target_version == AR9888_REV2_VERSION)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002214 hif_pci_device_warm_reset(sc);
Komal Seelam91553ce2016-01-27 18:57:10 +05302215 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002216 hif_pci_device_reset(sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002217#else
2218 hif_pci_device_reset(sc);
2219#endif
2220}
2221
2222void hif_disable_aspm(void)
2223{
2224 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
2225 struct hif_pci_softc *sc;
2226
2227 if (NULL == scn) {
2228 HIF_ERROR("%s: Could not disable ASPM scn is null",
2229 __func__);
2230 return;
2231 }
2232
2233 sc = scn->hif_sc;
2234
2235 /* Disable ASPM when pkt log is enabled */
Komal Seelamaa72bb72016-02-01 17:22:50 +05302236 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
2237 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002238}
2239
2240/**
2241 * hif_enable_power_gating(): enable HW power gating
2242 *
2243 * This function enables HW gating
2244 *
2245 * Return: none
2246 */
2247void hif_enable_power_gating(void *hif_ctx)
2248{
2249 struct ol_softc *scn = hif_ctx;
2250 struct hif_pci_softc *sc;
2251
2252 if (NULL == scn) {
2253 HIF_ERROR("%s: Could not disable ASPM scn is null",
2254 __func__);
2255 return;
2256 }
2257 sc = scn->hif_sc;
2258
2259 /* Re-enable ASPM after firmware/OTP download is complete */
Komal Seelamaa72bb72016-02-01 17:22:50 +05302260 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002261}
2262
2263#ifdef CONFIG_PCI_MSM
2264static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2265{
2266 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2267 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2268}
2269#else
2270static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2271#endif
2272
2273/*
2274 * For now, we use simple on-demand sleep/wake.
2275 * Some possible improvements:
2276 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2277 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2278 * Careful, though, these functions may be used by
2279 * interrupt handlers ("atomic")
2280 * -Don't use host_reg_table for this code; instead use values directly
2281 * -Use a separate timer to track activity and allow Target to sleep only
2282 * if it hasn't done anything for a while; may even want to delay some
2283 * processing for a short while in order to "batch" (e.g.) transmit
2284 * requests with completion processing into "windows of up time". Costs
2285 * some performance, but improves power utilization.
2286 * -On some platforms, it might be possible to eliminate explicit
2287 * sleep/wakeup. Instead, take a chance that each access works OK. If not,
2288 * recover from the failure by forcing the Target awake.
2289 * -Change keep_awake_count to an atomic_t in order to avoid spin lock
2290 * overhead in some cases. Perhaps this makes more sense when
2291 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2292 * disabled.
2293 * -It is possible to compile this code out and simply force the Target
2294 * to remain awake. That would yield optimal performance at the cost of
2295 * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2296 *
2297 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2298 */
2299/**
2300 * hif_target_sleep_state_adjust() - on-demand sleep/wake
2301 * @scn: ol_softc pointer.
2302 * @sleep_ok: bool
2303 * @wait_for_it: bool
2304 *
2305 * Output the pipe error counts of each pipe to log file
2306 *
2307 * Return: int
2308 */
2309#if ((CONFIG_ATH_PCIE_MAX_PERF == 0) && CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD)
2310int
2311hif_target_sleep_state_adjust(struct ol_softc *scn,
2312 bool sleep_ok, bool wait_for_it)
2313{
2314 struct HIF_CE_state *hif_state = scn->hif_hdl;
2315 A_target_id_t pci_addr = scn->mem;
2316 static int max_delay;
2317 struct hif_pci_softc *sc = scn->hif_sc;
2318 static int debug;
Komal Seelam91553ce2016-01-27 18:57:10 +05302319 struct hif_config_info *cfg = hif_get_ini_handle(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002320
2321 if (scn->recovery)
2322 return -EACCES;
2323
2324 if (cdf_atomic_read(&scn->link_suspended)) {
2325 HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
2326 debug = true;
2327 CDF_ASSERT(0);
2328 return -EACCES;
2329 }
2330
2331 if (debug) {
2332 wait_for_it = true;
2333 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
2334 __func__);
2335 CDF_ASSERT(0);
2336 }
2337
2338 if (sleep_ok) {
2339 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2340 hif_state->keep_awake_count--;
2341 if (hif_state->keep_awake_count == 0) {
2342 /* Allow sleep */
2343 hif_state->verified_awake = false;
2344 hif_state->sleep_ticks = cdf_system_ticks();
2345 }
2346 if (hif_state->fake_sleep == false) {
2347 /* Set the Fake Sleep */
2348 hif_state->fake_sleep = true;
2349
2350 /* Start the Sleep Timer */
2351 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
2352 cdf_softirq_timer_start(&hif_state->sleep_timer,
2353 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2354 }
2355 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2356 } else {
2357 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2358
2359 if (hif_state->fake_sleep) {
2360 hif_state->verified_awake = true;
2361 } else {
2362 if (hif_state->keep_awake_count == 0) {
2363 /* Force AWAKE */
2364 hif_write32_mb(pci_addr +
2365 PCIE_LOCAL_BASE_ADDRESS +
2366 PCIE_SOC_WAKE_ADDRESS,
2367 PCIE_SOC_WAKE_V_MASK);
2368 }
2369 }
2370 hif_state->keep_awake_count++;
2371 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2372
2373 if (wait_for_it && !hif_state->verified_awake) {
2374#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
2375 int tot_delay = 0;
2376 int curr_delay = 5;
2377
2378 for (;; ) {
2379 if (hif_targ_is_awake(scn, pci_addr)) {
2380 hif_state->verified_awake = true;
2381 break;
2382 } else
2383 if (!hif_pci_targ_is_present
2384 (scn, pci_addr)) {
2385 break;
2386 }
2387 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT) {
2388 uint16_t val;
2389 uint32_t bar;
2390
2391 HIF_ERROR("%s: keep_awake_count = %d",
2392 __func__,
2393 hif_state->keep_awake_count);
2394
2395 pci_read_config_word(sc->pdev,
2396 PCI_VENDOR_ID,
2397 &val);
2398 HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
2399 __func__, val);
2400
2401 pci_read_config_word(sc->pdev,
2402 PCI_DEVICE_ID,
2403 &val);
2404 HIF_ERROR("%s: PCI Device ID = 0x%04x",
2405 __func__, val);
2406
2407 pci_read_config_word(sc->pdev,
2408 PCI_COMMAND, &val);
2409 HIF_ERROR("%s: PCI Command = 0x%04x",
2410 __func__, val);
2411
2412 pci_read_config_word(sc->pdev,
2413 PCI_STATUS, &val);
2414 HIF_ERROR("%s: PCI Status = 0x%04x",
2415 __func__, val);
2416
2417 pci_read_config_dword(sc->pdev,
2418 PCI_BASE_ADDRESS_0, &bar);
2419 HIF_ERROR("%s: PCI BAR 0 = 0x%08x",
2420 __func__, bar);
2421
2422 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x",
2423 __func__,
2424 hif_read32_mb(pci_addr +
2425 PCIE_LOCAL_BASE_ADDRESS
2426 + PCIE_SOC_WAKE_ADDRESS));
2427 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x",
2428 __func__,
2429 hif_read32_mb(pci_addr +
2430 PCIE_LOCAL_BASE_ADDRESS
2431 + RTC_STATE_ADDRESS));
2432
2433 HIF_ERROR("%s:error, wakeup target",
2434 __func__);
2435 hif_msm_pcie_debug_info(sc);
Komal Seelam91553ce2016-01-27 18:57:10 +05302436 if (!cfg->enable_self_recovery)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002437 CDF_BUG(0);
2438 scn->recovery = true;
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002439 cds_set_recovery_in_progress(true);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002440 cnss_wlan_pci_link_down();
2441 return -EACCES;
2442 }
2443
2444 OS_DELAY(curr_delay);
2445 tot_delay += curr_delay;
2446
2447 if (curr_delay < 50)
2448 curr_delay += 5;
2449 }
2450
2451 /*
2452 * NB: If Target has to come out of Deep Sleep,
2453 * this may take a few Msecs. Typically, though
2454 * this delay should be <30us.
2455 */
2456 if (tot_delay > max_delay)
2457 max_delay = tot_delay;
2458 }
2459 }
2460
2461 if (debug && hif_state->verified_awake) {
2462 debug = 0;
2463 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2464 __func__,
2465 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
2466 PCIE_INTR_ENABLE_ADDRESS),
2467 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
2468 PCIE_INTR_CAUSE_ADDRESS),
2469 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
2470 CPU_INTR_ADDRESS),
2471 hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
2472 PCIE_INTR_CLR_ADDRESS),
2473 hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
2474 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2475 }
2476
2477 return 0;
2478}
2479#else
2480inline int
2481hif_target_sleep_state_adjust(struct ol_softc *scn,
2482 bool sleep_ok, bool wait_for_it)
2483{
2484 return 0;
2485}
2486#endif
2487
2488#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2489uint32_t hif_target_read_checked(struct ol_softc *scn, uint32_t offset)
2490{
2491 uint32_t value;
2492 void *addr;
2493
2494 if (!A_TARGET_ACCESS_OK(scn))
2495 hi_fdebug();
2496
2497 addr = scn->mem + offset;
2498 value = A_PCI_READ32(addr);
2499
2500 {
2501 unsigned long irq_flags;
2502 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2503
2504 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2505 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2506 pcie_access_log[idx].is_write = false;
2507 pcie_access_log[idx].addr = addr;
2508 pcie_access_log[idx].value = value;
2509 pcie_access_log_seqnum++;
2510 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2511 }
2512
2513 return value;
2514}
2515
2516void
2517hif_target_write_checked(struct ol_softc *scn, uint32_t offset, uint32_t value)
2518{
2519 void *addr;
2520
2521 if (!A_TARGET_ACCESS_OK(scn))
2522 hi_fdebug();
2523
2524 addr = scn->mem + (offset);
2525 hif_write32_mb(addr, value);
2526
2527 {
2528 unsigned long irq_flags;
2529 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2530
2531 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2532 pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2533 pcie_access_log[idx].is_write = true;
2534 pcie_access_log[idx].addr = addr;
2535 pcie_access_log[idx].value = value;
2536 pcie_access_log_seqnum++;
2537 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2538 }
2539}
2540
2541/**
2542 * hi_fdebug() - not needed in PCI
2543 *
2544 *
2545 * Return: n/a
2546 */
2547void hi_fdebug(void)
2548{
2549 /* BUG_ON(1); */
2550}
2551
2552/**
2553 * hif_target_dump_access_log() - dump access log
2554 *
2555 * dump access log
2556 *
2557 * Return: n/a
2558 */
2559void hif_target_dump_access_log(void)
2560{
2561 int idx, len, start_idx, cur_idx;
2562 unsigned long irq_flags;
2563
2564 spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2565 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2566 len = PCIE_ACCESS_LOG_NUM;
2567 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2568 } else {
2569 len = pcie_access_log_seqnum;
2570 start_idx = 0;
2571 }
2572
2573 for (idx = 0; idx < len; idx++) {
2574 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2575 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%p val:%u.",
2576 __func__, idx,
2577 pcie_access_log[cur_idx].seqnum,
2578 pcie_access_log[cur_idx].is_write,
2579 pcie_access_log[cur_idx].addr,
2580 pcie_access_log[cur_idx].value);
2581 }
2582
2583 pcie_access_log_seqnum = 0;
2584 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2585}
2586#endif
2587
2588/**
2589 * war_pci_write32() - PCIe io32 write workaround
2590 * @addr: addr
2591 * @offset: offset
2592 * @value: value
2593 *
2594 * iowrite32
2595 *
2596 * Return: int
2597 */
2598void war_pci_write32(char *addr, uint32_t offset, uint32_t value)
2599{
2600 if (hif_pci_war1) {
2601 unsigned long irq_flags;
2602
2603 spin_lock_irqsave(&pciwar_lock, irq_flags);
2604
2605 (void)ioread32((void __iomem *)(addr + offset + 4));
2606 (void)ioread32((void __iomem *)(addr + offset + 4));
2607 (void)ioread32((void __iomem *)(addr + offset + 4));
2608 iowrite32((uint32_t) (value), (void __iomem *)(addr + offset));
2609
2610 spin_unlock_irqrestore(&pciwar_lock, irq_flags);
2611 } else {
2612 iowrite32((uint32_t) (value), (void __iomem *)(addr + offset));
2613 }
2614}
2615
2616/**
2617 * hif_configure_irq(): configure interrupt
2618 *
2619 * This function configures interrupt(s)
2620 *
2621 * @sc: PCIe control struct
2622 * @hif_hdl: struct HIF_CE_state
2623 *
2624 * Return: 0 - for success
2625 */
2626int hif_configure_irq(struct hif_pci_softc *sc)
2627{
2628 int ret = 0;
2629 struct ol_softc *scn = sc->ol_sc;
2630
2631 HIF_TRACE("%s: E", __func__);
2632
Komal Seelamaa72bb72016-02-01 17:22:50 +05302633 hif_init_reschedule_tasklet_work(sc);
2634
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002635 if (ENABLE_MSI) {
2636 ret = hif_configure_msi(sc);
2637 if (ret == 0)
2638 goto end;
2639 }
2640 /* MSI failed. Try legacy irq */
2641 ret = hif_pci_configure_legacy_irq(sc);
2642 if (ret < 0) {
2643 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
2644 __func__, ret);
2645 return ret;
2646 }
2647end:
2648 scn->request_irq_done = true;
2649 return 0;
2650}
2651
2652/**
2653 * hif_target_sync() : ensure the target is ready
2654 * @scn: hif controll structure
2655 *
2656 * Informs fw that we plan to use legacy interupts so that
2657 * it can begin booting. Ensures that the fw finishes booting
2658 * before continuing. Should be called before trying to write
2659 * to the targets other registers for the first time.
2660 *
2661 * Return: none
2662 */
2663void hif_target_sync(struct ol_softc *scn)
2664{
2665 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
2666 PCIE_INTR_ENABLE_ADDRESS),
2667 PCIE_INTR_FIRMWARE_MASK);
2668
2669 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2670 PCIE_SOC_WAKE_ADDRESS,
2671 PCIE_SOC_WAKE_V_MASK);
2672 while (!hif_targ_is_awake(scn, scn->mem))
2673 ;
2674
2675 if (HAS_FW_INDICATOR) {
2676 int wait_limit = 500;
2677 int fw_ind = 0;
2678 HIF_TRACE("%s: Loop checking FW signal", __func__);
2679 while (1) {
2680 fw_ind = hif_read32_mb(scn->hif_sc->mem +
2681 FW_INDICATOR_ADDRESS);
2682 if (fw_ind & FW_IND_INITIALIZED)
2683 break;
2684 if (wait_limit-- < 0)
2685 break;
2686 hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
2687 PCIE_INTR_ENABLE_ADDRESS),
2688 PCIE_INTR_FIRMWARE_MASK);
2689
2690 cdf_mdelay(10);
2691 }
2692 if (wait_limit < 0)
2693 HIF_TRACE("%s: FW signal timed out",
2694 __func__);
2695 else
2696 HIF_TRACE("%s: Got FW signal, retries = %x",
2697 __func__, 500-wait_limit);
2698 }
2699 hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2700 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2701}
2702
2703/**
2704 * hif_enable_bus(): enable bus
2705 *
2706 * This function enables the bus
2707 *
2708 * @ol_sc: soft_sc struct
2709 * @dev: device pointer
2710 * @bdev: bus dev pointer
2711 * bid: bus id pointer
2712 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
2713 * Return: CDF_STATUS
2714 */
2715CDF_STATUS hif_enable_bus(struct ol_softc *ol_sc,
2716 struct device *dev, void *bdev,
2717 const hif_bus_id *bid,
2718 enum hif_enable_type type)
2719{
2720 int ret = 0;
2721 uint32_t hif_type, target_type;
2722 struct hif_pci_softc *sc;
2723 uint16_t revision_id;
2724 uint32_t lcr_val;
2725 int probe_again = 0;
2726 struct pci_dev *pdev = bdev;
2727 const struct pci_device_id *id = bid;
Komal Seelam91553ce2016-01-27 18:57:10 +05302728 struct hif_target_info *tgt_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002729
2730 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
2731 __func__, cds_get_conparam(), id->device);
2732
2733 ol_sc = cds_get_context(CDF_MODULE_ID_HIF);
2734 if (!ol_sc) {
2735 HIF_ERROR("%s: hif_ctx is NULL", __func__);
2736 return CDF_STATUS_E_NOMEM;
2737 }
2738 sc = ol_sc->hif_sc;
2739 ol_sc->aps_osdev.bdev = pdev;
2740
2741 sc->pdev = pdev;
2742 sc->dev = &pdev->dev;
2743 ol_sc->aps_osdev.bdev = pdev;
2744 ol_sc->aps_osdev.device = &pdev->dev;
2745 ol_sc->aps_osdev.bc.bc_handle = (void *)ol_sc->mem;
2746 ol_sc->aps_osdev.bc.bc_bustype = type;
2747 sc->devid = id->device;
2748 sc->cacheline_sz = dma_get_cache_alignment();
Komal Seelam91553ce2016-01-27 18:57:10 +05302749 tgt_info = hif_get_target_info_handle(ol_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002750again:
2751 ret = hif_enable_pci(sc, pdev, id);
2752 if (ret < 0) {
2753 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
2754 __func__, ret);
2755 goto err_enable_pci;
2756 }
2757 HIF_TRACE("%s: hif_enable_pci done", __func__);
2758
2759 /* Temporary FIX: disable ASPM on peregrine.
2760 * Will be removed after the OTP is programmed
2761 */
2762 pci_read_config_dword(pdev, 0x80, &lcr_val);
2763 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2764
2765 device_disable_async_suspend(&pdev->dev);
2766 pci_read_config_word(pdev, 0x08, &revision_id);
2767
2768 ret = hif_get_device_type(id->device, revision_id,
2769 &hif_type, &target_type);
2770 if (ret < 0) {
2771 HIF_ERROR("%s: invalid device id/revision_id", __func__);
2772 goto err_tgtstate;
2773 }
2774 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
2775 __func__, hif_type, target_type);
2776
2777 hif_register_tbl_attach(sc->ol_sc, hif_type);
2778 target_register_tbl_attach(sc->ol_sc, target_type);
2779
2780 ret = hif_pci_probe_tgt_wakeup(sc);
2781 if (ret < 0) {
2782 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
2783 __func__, ret);
2784 if (ret == -EAGAIN)
2785 probe_again++;
2786 goto err_tgtstate;
2787 }
2788 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
2789
Komal Seelam91553ce2016-01-27 18:57:10 +05302790 tgt_info->target_type = target_type;
2791
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002792 sc->soc_pcie_bar0 = pci_resource_start(pdev, BAR_NUM);
2793 if (!sc->soc_pcie_bar0) {
2794 HIF_ERROR("%s: ERROR - cannot get CE BAR0", __func__);
2795 ret = -EIO;
2796 goto err_tgtstate;
2797 }
2798 ol_sc->mem_pa = sc->soc_pcie_bar0;
2799
2800 BUG_ON(pci_get_drvdata(sc->pdev) != NULL);
2801 pci_set_drvdata(sc->pdev, sc);
2802
2803 ret = hif_init_cdf_ctx(ol_sc);
2804 if (ret != 0) {
2805 HIF_ERROR("%s: cannot init CDF", __func__);
2806 goto err_tgtstate;
2807 }
2808
2809 hif_target_sync(ol_sc);
2810 return 0;
2811
2812err_tgtstate:
2813 hif_deinit_cdf_ctx(ol_sc);
2814 hif_disable_pci(sc);
2815 sc->pci_enabled = false;
2816 HIF_ERROR("%s: error, hif_disable_pci done", __func__);
2817 return CDF_STATUS_E_ABORTED;
2818
2819err_enable_pci:
2820 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
2821 int delay_time;
2822
2823 HIF_INFO("%s: pci reprobe", __func__);
2824 /* 10, 40, 90, 100, 100, ... */
2825 delay_time = max(100, 10 * (probe_again * probe_again));
2826 cdf_mdelay(delay_time);
2827 goto again;
2828 }
2829 return ret;
2830}
2831
2832/**
2833 * hif_get_target_type(): Get the target type
2834 *
2835 * This function is used to query the target type.
2836 *
2837 * @ol_sc: ol_softc struct pointer
2838 * @dev: device pointer
2839 * @bdev: bus dev pointer
2840 * @bid: bus id pointer
2841 * @hif_type: HIF type such as HIF_TYPE_QCA6180
2842 * @target_type: target type such as TARGET_TYPE_QCA6180
2843 *
2844 * Return: 0 for success
2845 */
2846int hif_get_target_type(struct ol_softc *ol_sc, struct device *dev,
2847 void *bdev, const hif_bus_id *bid, uint32_t *hif_type,
2848 uint32_t *target_type)
2849{
2850 uint16_t revision_id;
2851 struct pci_dev *pdev = bdev;
2852 const struct pci_device_id *id = bid;
2853
2854 pci_read_config_word(pdev, 0x08, &revision_id);
2855 return hif_get_device_type(id->device, revision_id,
2856 hif_type, target_type);
2857}
Houston Hoffman9078a152015-11-02 16:15:02 -08002858
2859#ifdef FEATURE_RUNTIME_PM
Houston Hoffmanf4607852015-12-17 17:14:40 -08002860
2861void hif_pm_runtime_get_noresume(void *hif_ctx)
2862{
2863 struct ol_softc *scn = hif_ctx;
2864 struct hif_pci_softc *sc;
2865
2866 if (NULL == scn)
2867 return;
2868
2869 sc = scn->hif_sc;
2870 if (NULL == sc)
2871 return;
2872
2873 sc->pm_stats.runtime_get++;
2874 pm_runtime_get_noresume(sc->dev);
2875}
2876
Houston Hoffman9078a152015-11-02 16:15:02 -08002877/**
2878 * hif_pm_runtime_get() - do a get opperation on the device
2879 *
2880 * A get opperation will prevent a runtime suspend untill a
2881 * corresponding put is done. This api should be used when sending
2882 * data.
2883 *
2884 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
2885 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
2886 *
2887 * return: success if the bus is up and a get has been issued
2888 * otherwise an error code.
2889 */
2890int hif_pm_runtime_get(void *hif_ctx)
2891{
2892 struct ol_softc *scn = hif_ctx;
2893 struct hif_pci_softc *sc;
2894 int ret;
2895 int pm_state;
2896
2897 if (NULL == scn) {
2898 HIF_ERROR("%s: Could not do runtime get, scn is null",
2899 __func__);
2900 return -EFAULT;
2901 }
2902 sc = scn->hif_sc;
2903
2904 pm_state = cdf_atomic_read(&sc->pm_state);
2905
2906 if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
2907 pm_state == HIF_PM_RUNTIME_STATE_NONE) {
2908 sc->pm_stats.runtime_get++;
2909 ret = __hif_pm_runtime_get(sc->dev);
2910
2911 /* Get can return 1 if the device is already active, just return
2912 * success in that case
2913 */
2914 if (ret > 0)
2915 ret = 0;
2916
2917 if (ret)
2918 hif_pm_runtime_put(hif_ctx);
2919
2920 if (ret && ret != -EINPROGRESS) {
2921 sc->pm_stats.runtime_get_err++;
2922 HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
2923 __func__, cdf_atomic_read(&sc->pm_state), ret);
2924 }
2925
2926 return ret;
2927 }
2928
2929 sc->pm_stats.request_resume++;
2930 sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
2931 ret = hif_pm_request_resume(sc->dev);
2932
2933 return -EAGAIN;
2934}
2935
2936/**
2937 * hif_pm_runtime_put() - do a put opperation on the device
2938 *
2939 * A put opperation will allow a runtime suspend after a corresponding
2940 * get was done. This api should be used when sending data.
2941 *
2942 * This api will return a failure if runtime pm is stopped
2943 * This api will return failure if it would decrement the usage count below 0.
2944 *
2945 * return: CDF_STATUS_SUCCESS if the put is performed
2946 */
2947int hif_pm_runtime_put(void *hif_ctx)
2948{
2949 struct ol_softc *scn = (struct ol_softc *)hif_ctx;
2950 struct hif_pci_softc *sc;
2951 int pm_state, usage_count;
2952 unsigned long flags;
2953 char *error = NULL;
2954
2955 if (NULL == scn) {
2956 HIF_ERROR("%s: Could not do runtime put, scn is null",
2957 __func__);
2958 return -EFAULT;
2959 }
2960 sc = scn->hif_sc;
2961
2962 usage_count = atomic_read(&sc->dev->power.usage_count);
2963
2964 if (usage_count == 1) {
2965 pm_state = cdf_atomic_read(&sc->pm_state);
2966
2967 if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
2968 error = "Ignoring unexpected put when runtime pm is disabled";
2969
2970 } else if (usage_count == 0) {
2971 error = "PUT Without a Get Operation";
2972 }
2973
2974 if (error) {
2975 spin_lock_irqsave(&sc->runtime_lock, flags);
2976 hif_pci_runtime_pm_warn(sc, error);
2977 spin_unlock_irqrestore(&sc->runtime_lock, flags);
2978 return -EINVAL;
2979 }
2980
2981 sc->pm_stats.runtime_put++;
2982
2983 hif_pm_runtime_mark_last_busy(sc->dev);
2984 hif_pm_runtime_put_auto(sc->dev);
2985
2986 return 0;
2987}
2988
2989
2990/**
2991 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol reason
2992 * @hif_sc: pci context
2993 * @lock: runtime_pm lock being acquired
2994 *
2995 * Return 0 if successful.
2996 */
2997static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
2998 *hif_sc, struct hif_pm_runtime_lock *lock)
2999{
3000 int ret = 0;
3001
3002 /*
3003 * We shouldn't be setting context->timeout to zero here when
3004 * context is active as we will have a case where Timeout API's
3005 * for the same context called back to back.
3006 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
3007 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
3008 * API to ensure the timeout version is no more active and
3009 * list entry of this context will be deleted during allow suspend.
3010 */
3011 if (lock->active)
3012 return 0;
3013
3014 ret = __hif_pm_runtime_get(hif_sc->dev);
3015
3016 /**
3017 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
3018 * RPM_SUSPENDING. Any other negative value is an error.
3019 * We shouldn't be do runtime_put here as in later point allow
3020 * suspend gets called with the the context and there the usage count
3021 * is decremented, so suspend will be prevented.
3022 */
3023
3024 if (ret < 0 && ret != -EINPROGRESS) {
3025 hif_sc->pm_stats.runtime_get_err++;
3026 hif_pci_runtime_pm_warn(hif_sc,
3027 "Prevent Suspend Runtime PM Error");
3028 }
3029
3030 hif_sc->prevent_suspend_cnt++;
3031
3032 lock->active = true;
3033
3034 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
3035
3036 hif_sc->pm_stats.prevent_suspend++;
3037
3038 HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
3039 cdf_atomic_read(&hif_sc->pm_state), ret);
3040
3041 return ret;
3042}
3043
3044static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
3045 struct hif_pm_runtime_lock *lock)
3046{
3047 int ret = 0;
3048 int usage_count;
3049
3050 if (hif_sc->prevent_suspend_cnt == 0)
3051 return ret;
3052
3053 if (!lock->active)
3054 return ret;
3055
3056 usage_count = atomic_read(&hif_sc->dev->power.usage_count);
3057
3058 /*
3059 * During Driver unload, platform driver increments the usage
3060 * count to prevent any runtime suspend getting called.
3061 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
3062 * usage_count should be one. Ideally this shouldn't happen as
3063 * context->active should be active for allow suspend to happen
3064 * Handling this case here to prevent any failures.
3065 */
3066 if ((cdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
3067 && usage_count == 1) || usage_count == 0) {
3068 hif_pci_runtime_pm_warn(hif_sc,
3069 "Allow without a prevent suspend");
3070 return -EINVAL;
3071 }
3072
3073 list_del(&lock->list);
3074
3075 hif_sc->prevent_suspend_cnt--;
3076
3077 lock->active = false;
3078 lock->timeout = 0;
3079
3080 hif_pm_runtime_mark_last_busy(hif_sc->dev);
3081 ret = hif_pm_runtime_put_auto(hif_sc->dev);
3082
3083 HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
3084 cdf_atomic_read(&hif_sc->pm_state), ret);
3085
3086 hif_sc->pm_stats.allow_suspend++;
3087 return ret;
3088}
3089
3090/**
3091 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
3092 * @data: calback data that is the pci context
3093 *
3094 * if runtime locks are aquired with a timeout, this function releases
3095 * the locks when the last runtime lock expires.
3096 *
3097 * dummy implementation until lock acquisition is implemented.
3098 */
3099static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
3100{
3101 struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
3102 unsigned long flags;
3103 unsigned long timer_expires;
3104 struct hif_pm_runtime_lock *context, *temp;
3105
3106 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3107
3108 timer_expires = hif_sc->runtime_timer_expires;
3109
3110 /* Make sure we are not called too early, this should take care of
3111 * following case
3112 *
3113 * CPU0 CPU1 (timeout function)
3114 * ---- ----------------------
3115 * spin_lock_irq
3116 * timeout function called
3117 *
3118 * mod_timer()
3119 *
3120 * spin_unlock_irq
3121 * spin_lock_irq
3122 */
3123 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
3124 hif_sc->runtime_timer_expires = 0;
3125 list_for_each_entry_safe(context, temp,
3126 &hif_sc->prevent_suspend_list, list) {
3127 if (context->timeout) {
3128 __hif_pm_runtime_allow_suspend(hif_sc, context);
3129 hif_sc->pm_stats.allow_suspend_timeout++;
3130 }
3131 }
3132 }
3133
3134 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3135}
3136
3137int hif_pm_runtime_prevent_suspend(void *ol_sc,
3138 struct hif_pm_runtime_lock *data)
3139{
3140 struct ol_softc *sc = (struct ol_softc *)ol_sc;
3141 struct hif_pci_softc *hif_sc = sc->hif_sc;
3142 struct hif_pm_runtime_lock *context = data;
3143 unsigned long flags;
3144
3145 if (!sc->enable_runtime_pm)
3146 return 0;
3147
3148 if (!context)
3149 return -EINVAL;
3150
3151 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3152 context->timeout = 0;
3153 __hif_pm_runtime_prevent_suspend(hif_sc, context);
3154 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3155
3156 return 0;
3157}
3158
3159int hif_pm_runtime_allow_suspend(void *ol_sc, struct hif_pm_runtime_lock *data)
3160{
3161 struct ol_softc *sc = (struct ol_softc *)ol_sc;
3162 struct hif_pci_softc *hif_sc = sc->hif_sc;
3163 struct hif_pm_runtime_lock *context = data;
3164
3165 unsigned long flags;
3166
3167 if (!sc->enable_runtime_pm)
3168 return 0;
3169
3170 if (!context)
3171 return -EINVAL;
3172
3173 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3174
3175 __hif_pm_runtime_allow_suspend(hif_sc, context);
3176
3177 /* The list can be empty as well in cases where
3178 * we have one context in the list and the allow
3179 * suspend came before the timer expires and we delete
3180 * context above from the list.
3181 * When list is empty prevent_suspend count will be zero.
3182 */
3183 if (hif_sc->prevent_suspend_cnt == 0 &&
3184 hif_sc->runtime_timer_expires > 0) {
3185 del_timer(&hif_sc->runtime_timer);
3186 hif_sc->runtime_timer_expires = 0;
3187 }
3188
3189 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3190
3191 return 0;
3192}
3193
3194/**
3195 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
3196 * @ol_sc: HIF context
3197 * @lock: which lock is being acquired
3198 * @delay: Timeout in milliseconds
3199 *
3200 * Prevent runtime suspend with a timeout after which runtime suspend would be
3201 * allowed. This API uses a single timer to allow the suspend and timer is
3202 * modified if the timeout is changed before timer fires.
3203 * If the timeout is less than autosuspend_delay then use mark_last_busy instead
3204 * of starting the timer.
3205 *
3206 * It is wise to try not to use this API and correct the design if possible.
3207 *
3208 * Return: 0 on success and negative error code on failure
3209 */
3210int hif_pm_runtime_prevent_suspend_timeout(void *ol_sc,
3211 struct hif_pm_runtime_lock *lock, unsigned int delay)
3212{
3213 struct ol_softc *sc = (struct ol_softc *)ol_sc;
3214 struct hif_pci_softc *hif_sc = sc->hif_sc;
3215 int ret = 0;
3216 unsigned long expires;
3217 unsigned long flags;
3218 struct hif_pm_runtime_lock *context = lock;
3219
3220 if (cds_is_load_unload_in_progress()) {
3221 HIF_ERROR("%s: Load/unload in progress, ignore!",
3222 __func__);
3223 return -EINVAL;
3224 }
3225
3226 if (cds_is_logp_in_progress()) {
3227 HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
3228 return -EINVAL;
3229 }
3230
3231 if (!sc->enable_runtime_pm)
3232 return 0;
3233
3234 if (!context)
3235 return -EINVAL;
3236
3237 /*
3238 * Don't use internal timer if the timeout is less than auto suspend
3239 * delay.
3240 */
3241 if (delay <= hif_sc->dev->power.autosuspend_delay) {
3242 hif_pm_request_resume(hif_sc->dev);
3243 hif_pm_runtime_mark_last_busy(hif_sc->dev);
3244 return ret;
3245 }
3246
3247 expires = jiffies + msecs_to_jiffies(delay);
3248 expires += !expires;
3249
3250 spin_lock_irqsave(&hif_sc->runtime_lock, flags);
3251
3252 context->timeout = delay;
3253 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
3254 hif_sc->pm_stats.prevent_suspend_timeout++;
3255
3256 /* Modify the timer only if new timeout is after already configured
3257 * timeout
3258 */
3259 if (time_after(expires, hif_sc->runtime_timer_expires)) {
3260 mod_timer(&hif_sc->runtime_timer, expires);
3261 hif_sc->runtime_timer_expires = expires;
3262 }
3263
3264 spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
3265
3266 HIF_ERROR("%s: pm_state: %d delay: %dms ret: %d\n", __func__,
3267 cdf_atomic_read(&hif_sc->pm_state), delay, ret);
3268
3269 return ret;
3270}
3271
3272/**
3273 * hif_runtime_lock_init() - API to initialize Runtime PM context
3274 * @name: Context name
3275 *
3276 * This API initalizes the Runtime PM context of the caller and
3277 * return the pointer.
3278 *
3279 * Return: void *
3280 */
3281struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name)
3282{
3283 struct hif_pm_runtime_lock *context;
3284
3285 context = cdf_mem_malloc(sizeof(*context));
3286 if (!context) {
3287 HIF_ERROR("%s: No memory for Runtime PM wakelock context\n",
3288 __func__);
3289 return NULL;
3290 }
3291
3292 context->name = name ? name : "Default";
3293 return context;
3294}
3295
3296/**
3297 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
3298 * @data: Runtime PM context
3299 *
3300 * Return: void
3301 */
3302void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
3303{
3304 unsigned long flags;
3305 struct hif_pm_runtime_lock *context = data;
3306 struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
3307 struct hif_pci_softc *sc;
3308
3309 if (!scn)
3310 return;
3311
3312 sc = scn->hif_sc;
3313
3314 if (!sc)
3315 return;
3316
3317 if (!context)
3318 return;
3319
3320 /*
3321 * Ensure to delete the context list entry and reduce the usage count
3322 * before freeing the context if context is active.
3323 */
3324 spin_lock_irqsave(&sc->runtime_lock, flags);
3325 __hif_pm_runtime_allow_suspend(sc, context);
3326 spin_unlock_irqrestore(&sc->runtime_lock, flags);
3327
3328 cdf_mem_free(context);
3329}
3330
3331#endif /* FEATURE_RUNTIME_PM */
Komal Seelamb3a3bdf2016-02-01 19:22:17 +05303332
3333/**
3334 * hif_get_bmi_ctx() - API to get BMI context
3335 * @hif_ctx: HIF Context
3336 *
3337 * Return: Pointer to BMI Context
3338 */
3339struct bmi_info *hif_get_bmi_ctx(void *hif_ctx)
3340{
3341 struct ol_softc *sc = hif_ctx;
3342 return &sc->bmi_ctx;
3343}