blob: 9d242d801d9d354f772b74257826427f0a598180 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Bartosz Markowski8cc8df92013-08-02 09:58:49 +020042static unsigned int ath10k_target_ps;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010043static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44
Kalle Valo5e3dd152013-06-12 20:52:10 +030045module_param(ath10k_target_ps, uint, 0644);
46MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47
Michal Kaziorcfe9c452013-11-25 14:06:27 +010048module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50
Kalle Valo5e3dd152013-06-12 20:52:10 +030051#define QCA988X_2_0_DEVICE_ID (0x003c)
52
53static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030054 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55 {0}
56};
57
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data);
60
Kalle Valo5e3dd152013-06-12 20:52:10 +030061static int ath10k_pci_post_rx(struct ath10k *ar);
Michal Kazior87263e52013-08-27 13:08:01 +020062static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +030063 int num);
Michal Kazior87263e52013-08-27 13:08:01 +020064static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +010065static int ath10k_pci_cold_reset(struct ath10k *ar);
66static int ath10k_pci_warm_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010067static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010068static int ath10k_pci_init_irq(struct ath10k *ar);
69static int ath10k_pci_deinit_irq(struct ath10k *ar);
70static int ath10k_pci_request_irq(struct ath10k *ar);
71static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010072static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
73 struct ath10k_ce_pipe *rx_pipe,
74 struct bmi_xfer *xfer);
Kalle Valo5e3dd152013-06-12 20:52:10 +030075
76static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030077 /* CE0: host->target HTC control and raw streams */
78 {
79 .flags = CE_ATTR_FLAGS,
80 .src_nentries = 16,
81 .src_sz_max = 256,
82 .dest_nentries = 0,
83 },
84
85 /* CE1: target->host HTT + HTC control */
86 {
87 .flags = CE_ATTR_FLAGS,
88 .src_nentries = 0,
89 .src_sz_max = 512,
90 .dest_nentries = 512,
91 },
92
93 /* CE2: target->host WMI */
94 {
95 .flags = CE_ATTR_FLAGS,
96 .src_nentries = 0,
97 .src_sz_max = 2048,
98 .dest_nentries = 32,
99 },
100
101 /* CE3: host->target WMI */
102 {
103 .flags = CE_ATTR_FLAGS,
104 .src_nentries = 32,
105 .src_sz_max = 2048,
106 .dest_nentries = 0,
107 },
108
109 /* CE4: host->target HTT */
110 {
111 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
112 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
113 .src_sz_max = 256,
114 .dest_nentries = 0,
115 },
116
117 /* CE5: unused */
118 {
119 .flags = CE_ATTR_FLAGS,
120 .src_nentries = 0,
121 .src_sz_max = 0,
122 .dest_nentries = 0,
123 },
124
125 /* CE6: target autonomous hif_memcpy */
126 {
127 .flags = CE_ATTR_FLAGS,
128 .src_nentries = 0,
129 .src_sz_max = 0,
130 .dest_nentries = 0,
131 },
132
133 /* CE7: ce_diag, the Diagnostic Window */
134 {
135 .flags = CE_ATTR_FLAGS,
136 .src_nentries = 2,
137 .src_sz_max = DIAG_TRANSFER_LIMIT,
138 .dest_nentries = 2,
139 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300140};
141
142/* Target firmware's Copy Engine configuration. */
143static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300144 /* CE0: host->target HTC control and raw streams */
145 {
146 .pipenum = 0,
147 .pipedir = PIPEDIR_OUT,
148 .nentries = 32,
149 .nbytes_max = 256,
150 .flags = CE_ATTR_FLAGS,
151 .reserved = 0,
152 },
153
154 /* CE1: target->host HTT + HTC control */
155 {
156 .pipenum = 1,
157 .pipedir = PIPEDIR_IN,
158 .nentries = 32,
159 .nbytes_max = 512,
160 .flags = CE_ATTR_FLAGS,
161 .reserved = 0,
162 },
163
164 /* CE2: target->host WMI */
165 {
166 .pipenum = 2,
167 .pipedir = PIPEDIR_IN,
168 .nentries = 32,
169 .nbytes_max = 2048,
170 .flags = CE_ATTR_FLAGS,
171 .reserved = 0,
172 },
173
174 /* CE3: host->target WMI */
175 {
176 .pipenum = 3,
177 .pipedir = PIPEDIR_OUT,
178 .nentries = 32,
179 .nbytes_max = 2048,
180 .flags = CE_ATTR_FLAGS,
181 .reserved = 0,
182 },
183
184 /* CE4: host->target HTT */
185 {
186 .pipenum = 4,
187 .pipedir = PIPEDIR_OUT,
188 .nentries = 256,
189 .nbytes_max = 256,
190 .flags = CE_ATTR_FLAGS,
191 .reserved = 0,
192 },
193
Kalle Valo5e3dd152013-06-12 20:52:10 +0300194 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300195
196 /* CE5: unused */
197 {
198 .pipenum = 5,
199 .pipedir = PIPEDIR_OUT,
200 .nentries = 32,
201 .nbytes_max = 2048,
202 .flags = CE_ATTR_FLAGS,
203 .reserved = 0,
204 },
205
206 /* CE6: Reserved for target autonomous hif_memcpy */
207 {
208 .pipenum = 6,
209 .pipedir = PIPEDIR_INOUT,
210 .nentries = 32,
211 .nbytes_max = 4096,
212 .flags = CE_ATTR_FLAGS,
213 .reserved = 0,
214 },
215
Kalle Valo5e3dd152013-06-12 20:52:10 +0300216 /* CE7 used only by Host */
217};
218
Michal Kaziore5398872013-11-25 14:06:20 +0100219static bool ath10k_pci_irq_pending(struct ath10k *ar)
220{
221 u32 cause;
222
223 /* Check if the shared legacy irq is for us */
224 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
225 PCIE_INTR_CAUSE_ADDRESS);
226 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
227 return true;
228
229 return false;
230}
231
Michal Kazior26852182013-11-25 14:06:25 +0100232static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
233{
234 /* IMPORTANT: INTR_CLR register has to be set after
235 * INTR_ENABLE is set to 0, otherwise interrupt can not be
236 * really cleared. */
237 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
238 0);
239 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
240 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
241
242 /* IMPORTANT: this extra read transaction is required to
243 * flush the posted write buffer. */
244 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
245 PCIE_INTR_ENABLE_ADDRESS);
246}
247
248static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
249{
250 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
251 PCIE_INTR_ENABLE_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
253
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
258}
259
Michal Kaziorab977bd2013-11-25 14:06:26 +0100260static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
261{
262 struct ath10k *ar = arg;
263 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
264
265 if (ar_pci->num_msi_intrs == 0) {
266 if (!ath10k_pci_irq_pending(ar))
267 return IRQ_NONE;
268
269 ath10k_pci_disable_and_clear_legacy_irq(ar);
270 }
271
272 tasklet_schedule(&ar_pci->early_irq_tasklet);
273
274 return IRQ_HANDLED;
275}
276
277static int ath10k_pci_request_early_irq(struct ath10k *ar)
278{
279 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
280 int ret;
281
282 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
283 * interrupt from irq vector is triggered in all cases for FW
284 * indication/errors */
285 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
286 IRQF_SHARED, "ath10k_pci (early)", ar);
287 if (ret) {
288 ath10k_warn("failed to request early irq: %d\n", ret);
289 return ret;
290 }
291
292 return 0;
293}
294
295static void ath10k_pci_free_early_irq(struct ath10k *ar)
296{
297 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
298}
299
Kalle Valo5e3dd152013-06-12 20:52:10 +0300300/*
301 * Diagnostic read/write access is provided for startup/config/debug usage.
302 * Caller must guarantee proper alignment, when applicable, and single user
303 * at any moment.
304 */
305static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
306 int nbytes)
307{
308 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
309 int ret = 0;
310 u32 buf;
311 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
312 unsigned int id;
313 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200314 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300315 /* Host buffer address in CE space */
316 u32 ce_data;
317 dma_addr_t ce_data_base = 0;
318 void *data_buf = NULL;
319 int i;
320
321 /*
322 * This code cannot handle reads to non-memory space. Redirect to the
323 * register read fn but preserve the multi word read capability of
324 * this fn
325 */
326 if (address < DRAM_BASE_ADDRESS) {
327 if (!IS_ALIGNED(address, 4) ||
328 !IS_ALIGNED((unsigned long)data, 4))
329 return -EIO;
330
331 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
332 ar, address, (u32 *)data)) == 0)) {
333 nbytes -= sizeof(u32);
334 address += sizeof(u32);
335 data += sizeof(u32);
336 }
337 return ret;
338 }
339
340 ce_diag = ar_pci->ce_diag;
341
342 /*
343 * Allocate a temporary bounce buffer to hold caller's data
344 * to be DMA'ed from Target. This guarantees
345 * 1) 4-byte alignment
346 * 2) Buffer in DMA-able space
347 */
348 orig_nbytes = nbytes;
349 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
350 orig_nbytes,
351 &ce_data_base);
352
353 if (!data_buf) {
354 ret = -ENOMEM;
355 goto done;
356 }
357 memset(data_buf, 0, orig_nbytes);
358
359 remaining_bytes = orig_nbytes;
360 ce_data = ce_data_base;
361 while (remaining_bytes) {
362 nbytes = min_t(unsigned int, remaining_bytes,
363 DIAG_TRANSFER_LIMIT);
364
365 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
366 if (ret != 0)
367 goto done;
368
369 /* Request CE to send from Target(!) address to Host buffer */
370 /*
371 * The address supplied by the caller is in the
372 * Target CPU virtual address space.
373 *
374 * In order to use this address with the diagnostic CE,
375 * convert it from Target CPU virtual address space
376 * to CE address space
377 */
378 ath10k_pci_wake(ar);
379 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
380 address);
381 ath10k_pci_sleep(ar);
382
383 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
384 0);
385 if (ret)
386 goto done;
387
388 i = 0;
389 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
390 &completed_nbytes,
391 &id) != 0) {
392 mdelay(1);
393 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
394 ret = -EBUSY;
395 goto done;
396 }
397 }
398
399 if (nbytes != completed_nbytes) {
400 ret = -EIO;
401 goto done;
402 }
403
404 if (buf != (u32) address) {
405 ret = -EIO;
406 goto done;
407 }
408
409 i = 0;
410 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
411 &completed_nbytes,
412 &id, &flags) != 0) {
413 mdelay(1);
414
415 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
416 ret = -EBUSY;
417 goto done;
418 }
419 }
420
421 if (nbytes != completed_nbytes) {
422 ret = -EIO;
423 goto done;
424 }
425
426 if (buf != ce_data) {
427 ret = -EIO;
428 goto done;
429 }
430
431 remaining_bytes -= nbytes;
432 address += nbytes;
433 ce_data += nbytes;
434 }
435
436done:
437 if (ret == 0) {
438 /* Copy data from allocated DMA buf to caller's buf */
439 WARN_ON_ONCE(orig_nbytes & 3);
440 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
441 ((u32 *)data)[i] =
442 __le32_to_cpu(((__le32 *)data_buf)[i]);
443 }
444 } else
445 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
446 __func__, address);
447
448 if (data_buf)
449 pci_free_consistent(ar_pci->pdev, orig_nbytes,
450 data_buf, ce_data_base);
451
452 return ret;
453}
454
455/* Read 4-byte aligned data from Target memory or register */
456static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
457 u32 *data)
458{
459 /* Assume range doesn't cross this boundary */
460 if (address >= DRAM_BASE_ADDRESS)
461 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
462
463 ath10k_pci_wake(ar);
464 *data = ath10k_pci_read32(ar, address);
465 ath10k_pci_sleep(ar);
466 return 0;
467}
468
469static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
470 const void *data, int nbytes)
471{
472 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
473 int ret = 0;
474 u32 buf;
475 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
476 unsigned int id;
477 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200478 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300479 void *data_buf = NULL;
480 u32 ce_data; /* Host buffer address in CE space */
481 dma_addr_t ce_data_base = 0;
482 int i;
483
484 ce_diag = ar_pci->ce_diag;
485
486 /*
487 * Allocate a temporary bounce buffer to hold caller's data
488 * to be DMA'ed to Target. This guarantees
489 * 1) 4-byte alignment
490 * 2) Buffer in DMA-able space
491 */
492 orig_nbytes = nbytes;
493 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
494 orig_nbytes,
495 &ce_data_base);
496 if (!data_buf) {
497 ret = -ENOMEM;
498 goto done;
499 }
500
501 /* Copy caller's data to allocated DMA buf */
502 WARN_ON_ONCE(orig_nbytes & 3);
503 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
504 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
505
506 /*
507 * The address supplied by the caller is in the
508 * Target CPU virtual address space.
509 *
510 * In order to use this address with the diagnostic CE,
511 * convert it from
512 * Target CPU virtual address space
513 * to
514 * CE address space
515 */
516 ath10k_pci_wake(ar);
517 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
518 ath10k_pci_sleep(ar);
519
520 remaining_bytes = orig_nbytes;
521 ce_data = ce_data_base;
522 while (remaining_bytes) {
523 /* FIXME: check cast */
524 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
525
526 /* Set up to receive directly into Target(!) address */
527 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
528 if (ret != 0)
529 goto done;
530
531 /*
532 * Request CE to send caller-supplied data that
533 * was copied to bounce buffer to Target(!) address.
534 */
535 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
536 nbytes, 0, 0);
537 if (ret != 0)
538 goto done;
539
540 i = 0;
541 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
542 &completed_nbytes,
543 &id) != 0) {
544 mdelay(1);
545
546 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
547 ret = -EBUSY;
548 goto done;
549 }
550 }
551
552 if (nbytes != completed_nbytes) {
553 ret = -EIO;
554 goto done;
555 }
556
557 if (buf != ce_data) {
558 ret = -EIO;
559 goto done;
560 }
561
562 i = 0;
563 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
564 &completed_nbytes,
565 &id, &flags) != 0) {
566 mdelay(1);
567
568 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
569 ret = -EBUSY;
570 goto done;
571 }
572 }
573
574 if (nbytes != completed_nbytes) {
575 ret = -EIO;
576 goto done;
577 }
578
579 if (buf != address) {
580 ret = -EIO;
581 goto done;
582 }
583
584 remaining_bytes -= nbytes;
585 address += nbytes;
586 ce_data += nbytes;
587 }
588
589done:
590 if (data_buf) {
591 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
592 ce_data_base);
593 }
594
595 if (ret != 0)
596 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
597 address);
598
599 return ret;
600}
601
602/* Write 4B data to Target memory or register */
603static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
604 u32 data)
605{
606 /* Assume range doesn't cross this boundary */
607 if (address >= DRAM_BASE_ADDRESS)
608 return ath10k_pci_diag_write_mem(ar, address, &data,
609 sizeof(u32));
610
611 ath10k_pci_wake(ar);
612 ath10k_pci_write32(ar, address, data);
613 ath10k_pci_sleep(ar);
614 return 0;
615}
616
617static bool ath10k_pci_target_is_awake(struct ath10k *ar)
618{
619 void __iomem *mem = ath10k_pci_priv(ar)->mem;
620 u32 val;
621 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
622 RTC_STATE_ADDRESS);
623 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
624}
625
Kalle Valo3aebe542013-09-01 10:02:07 +0300626int ath10k_do_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300627{
628 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
629 void __iomem *pci_addr = ar_pci->mem;
630 int tot_delay = 0;
631 int curr_delay = 5;
632
633 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
634 /* Force AWAKE */
635 iowrite32(PCIE_SOC_WAKE_V_MASK,
636 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
637 PCIE_SOC_WAKE_ADDRESS);
638 }
639 atomic_inc(&ar_pci->keep_awake_count);
640
641 if (ar_pci->verified_awake)
Kalle Valo3aebe542013-09-01 10:02:07 +0300642 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300643
644 for (;;) {
645 if (ath10k_pci_target_is_awake(ar)) {
646 ar_pci->verified_awake = true;
Kalle Valo3aebe542013-09-01 10:02:07 +0300647 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300648 }
649
650 if (tot_delay > PCIE_WAKE_TIMEOUT) {
Kalle Valo3aebe542013-09-01 10:02:07 +0300651 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
652 PCIE_WAKE_TIMEOUT,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300653 atomic_read(&ar_pci->keep_awake_count));
Kalle Valo3aebe542013-09-01 10:02:07 +0300654 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300655 }
656
657 udelay(curr_delay);
658 tot_delay += curr_delay;
659
660 if (curr_delay < 50)
661 curr_delay += 5;
662 }
663}
664
665void ath10k_do_pci_sleep(struct ath10k *ar)
666{
667 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
668 void __iomem *pci_addr = ar_pci->mem;
669
670 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
671 /* Allow sleep */
672 ar_pci->verified_awake = false;
673 iowrite32(PCIE_SOC_WAKE_RESET,
674 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
675 PCIE_SOC_WAKE_ADDRESS);
676 }
677}
678
Kalle Valo5e3dd152013-06-12 20:52:10 +0300679/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200680static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300681{
682 struct ath10k *ar = ce_state->ar;
683 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2f5280d2014-02-27 18:50:05 +0200684 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Michal Kazior5440ce22013-09-03 15:09:58 +0200685 void *transfer_context;
686 u32 ce_data;
687 unsigned int nbytes;
688 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300689
Michal Kazior5440ce22013-09-03 15:09:58 +0200690 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
691 &ce_data, &nbytes,
692 &transfer_id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +0200693 /* no need to call tx completion for NULL pointers */
Michal Kazior726346f2014-02-27 18:50:04 +0200694 if (transfer_context == NULL)
695 continue;
696
Michal Kazior2f5280d2014-02-27 18:50:05 +0200697 cb->tx_completion(ar, transfer_context, transfer_id);
Michal Kazior5440ce22013-09-03 15:09:58 +0200698 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300699}
700
701/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200702static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300703{
704 struct ath10k *ar = ce_state->ar;
705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200706 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Michal Kazior2f5280d2014-02-27 18:50:05 +0200707 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300708 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200709 void *transfer_context;
710 u32 ce_data;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200711 unsigned int nbytes, max_nbytes;
Michal Kazior5440ce22013-09-03 15:09:58 +0200712 unsigned int transfer_id;
713 unsigned int flags;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200714 int err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300715
Michal Kazior5440ce22013-09-03 15:09:58 +0200716 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
717 &ce_data, &nbytes, &transfer_id,
718 &flags) == 0) {
Michal Kazior2f5280d2014-02-27 18:50:05 +0200719 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
720 if (unlikely(err)) {
721 /* FIXME: retry */
722 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
723 pipe_info->pipe_num, err);
724 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300725
726 skb = transfer_context;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200727 max_nbytes = skb->len + skb_tailroom(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300728 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
Michal Kazior2f5280d2014-02-27 18:50:05 +0200729 max_nbytes, DMA_FROM_DEVICE);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300730
Michal Kazior2f5280d2014-02-27 18:50:05 +0200731 if (unlikely(max_nbytes < nbytes)) {
732 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
733 nbytes, max_nbytes);
734 dev_kfree_skb_any(skb);
735 continue;
736 }
737
738 skb_put(skb, nbytes);
739 cb->rx_completion(ar, skb, pipe_info->pipe_num);
740 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300741}
742
Michal Kazior726346f2014-02-27 18:50:04 +0200743static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
744 struct ath10k_hif_sg_item *items, int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300745{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300746 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior726346f2014-02-27 18:50:04 +0200747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
750 unsigned int nentries_mask = src_ring->nentries_mask;
751 unsigned int sw_index = src_ring->sw_index;
752 unsigned int write_index = src_ring->write_index;
753 int err, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300754
Michal Kazior726346f2014-02-27 18:50:04 +0200755 spin_lock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300756
Michal Kazior726346f2014-02-27 18:50:04 +0200757 if (unlikely(CE_RING_DELTA(nentries_mask,
758 write_index, sw_index - 1) < n_items)) {
759 err = -ENOBUFS;
760 goto unlock;
761 }
762
763 for (i = 0; i < n_items - 1; i++) {
764 ath10k_dbg(ATH10K_DBG_PCI,
765 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
766 i, items[i].paddr, items[i].len, n_items);
767 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
768 items[i].vaddr, items[i].len);
769
770 err = ath10k_ce_send_nolock(ce_pipe,
771 items[i].transfer_context,
772 items[i].paddr,
773 items[i].len,
774 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER);
776 if (err)
777 goto unlock;
778 }
779
780 /* `i` is equal to `n_items -1` after for() */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300781
782 ath10k_dbg(ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +0200783 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 i, items[i].paddr, items[i].len, n_items);
785 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 items[i].vaddr, items[i].len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300787
Michal Kazior726346f2014-02-27 18:50:04 +0200788 err = ath10k_ce_send_nolock(ce_pipe,
789 items[i].transfer_context,
790 items[i].paddr,
791 items[i].len,
792 items[i].transfer_id,
793 0);
794 if (err)
795 goto unlock;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300796
Michal Kazior726346f2014-02-27 18:50:04 +0200797 err = 0;
798unlock:
799 spin_unlock_bh(&ar_pci->ce_lock);
800 return err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300801}
802
803static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
804{
805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200806 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300807}
808
809static void ath10k_pci_hif_dump_area(struct ath10k *ar)
810{
811 u32 reg_dump_area = 0;
812 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
813 u32 host_addr;
814 int ret;
815 u32 i;
816
817 ath10k_err("firmware crashed!\n");
818 ath10k_err("hardware name %s version 0x%x\n",
819 ar->hw_params.name, ar->target_version);
Chun-Yeow Yeoh5ba88b32014-01-21 17:21:21 +0800820 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300821
822 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100823 ret = ath10k_pci_diag_read_mem(ar, host_addr,
824 &reg_dump_area, sizeof(u32));
825 if (ret) {
826 ath10k_err("failed to read FW dump area address: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300827 return;
828 }
829
830 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
831
832 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
833 &reg_dump_values[0],
834 REG_DUMP_COUNT_QCA988X * sizeof(u32));
835 if (ret != 0) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100836 ath10k_err("failed to read FW dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300837 return;
838 }
839
840 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
841
842 ath10k_err("target Register Dump\n");
843 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
844 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
845 i,
846 reg_dump_values[i],
847 reg_dump_values[i + 1],
848 reg_dump_values[i + 2],
849 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200850
Michal Kazior5e90de82013-10-16 16:46:05 +0300851 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300852}
853
854static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
855 int force)
856{
857 if (!force) {
858 int resources;
859 /*
860 * Decide whether to actually poll for completions, or just
861 * wait for a later chance.
862 * If there seem to be plenty of resources left, then just wait
863 * since checking involves reading a CE register, which is a
864 * relatively expensive operation.
865 */
866 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
867
868 /*
869 * If at least 50% of the total resources are still available,
870 * don't bother checking again yet.
871 */
872 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
873 return;
874 }
875 ath10k_ce_per_engine_service(ar, pipe);
876}
877
Michal Kaziore799bbf2013-07-05 16:15:12 +0300878static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
879 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300880{
881 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
882
883 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
884
885 memcpy(&ar_pci->msg_callbacks_current, callbacks,
886 sizeof(ar_pci->msg_callbacks_current));
887}
888
Michal Kaziorc80de122013-11-25 14:06:23 +0100889static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
890{
891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
892 const struct ce_attr *attr;
893 struct ath10k_pci_pipe *pipe_info;
894 int pipe_num, disable_interrupts;
895
896 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
897 pipe_info = &ar_pci->pipe_info[pipe_num];
898
899 /* Handle Diagnostic CE specially */
900 if (pipe_info->ce_hdl == ar_pci->ce_diag)
901 continue;
902
903 attr = &host_ce_config_wlan[pipe_num];
904
905 if (attr->src_nentries) {
906 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
907 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
908 ath10k_pci_ce_send_done,
909 disable_interrupts);
910 }
911
912 if (attr->dest_nentries)
913 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
914 ath10k_pci_ce_recv_data);
915 }
916
917 return 0;
918}
919
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100920static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300921{
922 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300923 int i;
924
Kalle Valo5e3dd152013-06-12 20:52:10 +0300925 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +0100926 tasklet_kill(&ar_pci->msi_fw_err);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100927 tasklet_kill(&ar_pci->early_irq_tasklet);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300928
929 for (i = 0; i < CE_COUNT; i++)
930 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100931}
932
Kalle Valo5e3dd152013-06-12 20:52:10 +0300933/* TODO - temporary mapping while we have too few CE's */
934static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
935 u16 service_id, u8 *ul_pipe,
936 u8 *dl_pipe, int *ul_is_polled,
937 int *dl_is_polled)
938{
939 int ret = 0;
940
941 /* polling for received messages not supported */
942 *dl_is_polled = 0;
943
944 switch (service_id) {
945 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
946 /*
947 * Host->target HTT gets its own pipe, so it can be polled
948 * while other pipes are interrupt driven.
949 */
950 *ul_pipe = 4;
951 /*
952 * Use the same target->host pipe for HTC ctrl, HTC raw
953 * streams, and HTT.
954 */
955 *dl_pipe = 1;
956 break;
957
958 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
959 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
960 /*
961 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
962 * HTC_CTRL_RSVD_SVC could share the same pipe as the
963 * WMI services. So, if another CE is needed, change
964 * this to *ul_pipe = 3, which frees up CE 0.
965 */
966 /* *ul_pipe = 3; */
967 *ul_pipe = 0;
968 *dl_pipe = 1;
969 break;
970
971 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
972 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
973 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
974 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
975
976 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
977 *ul_pipe = 3;
978 *dl_pipe = 2;
979 break;
980
981 /* pipe 5 unused */
982 /* pipe 6 reserved */
983 /* pipe 7 reserved */
984
985 default:
986 ret = -1;
987 break;
988 }
989 *ul_is_polled =
990 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
991
992 return ret;
993}
994
995static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
996 u8 *ul_pipe, u8 *dl_pipe)
997{
998 int ul_is_polled, dl_is_polled;
999
1000 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1001 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1002 ul_pipe,
1003 dl_pipe,
1004 &ul_is_polled,
1005 &dl_is_polled);
1006}
1007
Michal Kazior87263e52013-08-27 13:08:01 +02001008static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001009 int num)
1010{
1011 struct ath10k *ar = pipe_info->hif_ce_state;
1012 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001013 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001014 struct sk_buff *skb;
1015 dma_addr_t ce_data;
1016 int i, ret = 0;
1017
1018 if (pipe_info->buf_sz == 0)
1019 return 0;
1020
1021 for (i = 0; i < num; i++) {
1022 skb = dev_alloc_skb(pipe_info->buf_sz);
1023 if (!skb) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001024 ath10k_warn("failed to allocate skbuff for pipe %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001025 num);
1026 ret = -ENOMEM;
1027 goto err;
1028 }
1029
1030 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1031
1032 ce_data = dma_map_single(ar->dev, skb->data,
1033 skb->len + skb_tailroom(skb),
1034 DMA_FROM_DEVICE);
1035
1036 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001037 ath10k_warn("failed to DMA map sk_buff\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001038 dev_kfree_skb_any(skb);
1039 ret = -EIO;
1040 goto err;
1041 }
1042
1043 ATH10K_SKB_CB(skb)->paddr = ce_data;
1044
1045 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1046 pipe_info->buf_sz,
1047 PCI_DMA_FROMDEVICE);
1048
1049 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1050 ce_data);
1051 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001052 ath10k_warn("failed to enqueue to pipe %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001053 num, ret);
1054 goto err;
1055 }
1056 }
1057
1058 return ret;
1059
1060err:
1061 ath10k_pci_rx_pipe_cleanup(pipe_info);
1062 return ret;
1063}
1064
1065static int ath10k_pci_post_rx(struct ath10k *ar)
1066{
1067 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001068 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001069 const struct ce_attr *attr;
1070 int pipe_num, ret = 0;
1071
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001072 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001073 pipe_info = &ar_pci->pipe_info[pipe_num];
1074 attr = &host_ce_config_wlan[pipe_num];
1075
1076 if (attr->dest_nentries == 0)
1077 continue;
1078
1079 ret = ath10k_pci_post_rx_pipe(pipe_info,
1080 attr->dest_nentries - 1);
1081 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001082 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1083 pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001084
1085 for (; pipe_num >= 0; pipe_num--) {
1086 pipe_info = &ar_pci->pipe_info[pipe_num];
1087 ath10k_pci_rx_pipe_cleanup(pipe_info);
1088 }
1089 return ret;
1090 }
1091 }
1092
1093 return 0;
1094}
1095
1096static int ath10k_pci_hif_start(struct ath10k *ar)
1097{
1098 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001099 int ret, ret_early;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001100
Michal Kaziorab977bd2013-11-25 14:06:26 +01001101 ath10k_pci_free_early_irq(ar);
1102 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001103
Michal Kazior5d1aa942013-11-25 14:06:24 +01001104 ret = ath10k_pci_request_irq(ar);
1105 if (ret) {
1106 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1107 ret);
Michal Kazior2f5280d2014-02-27 18:50:05 +02001108 goto err_early_irq;
Michal Kazior5d1aa942013-11-25 14:06:24 +01001109 }
1110
Michal Kaziorc80de122013-11-25 14:06:23 +01001111 ret = ath10k_pci_setup_ce_irq(ar);
1112 if (ret) {
1113 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001114 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001115 }
1116
1117 /* Post buffers once to start things off. */
1118 ret = ath10k_pci_post_rx(ar);
1119 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001120 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1121 ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001122 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001123 }
1124
1125 ar_pci->started = 1;
1126 return 0;
Michal Kaziorc80de122013-11-25 14:06:23 +01001127
Michal Kazior5d1aa942013-11-25 14:06:24 +01001128err_stop:
1129 ath10k_ce_disable_interrupts(ar);
1130 ath10k_pci_free_irq(ar);
1131 ath10k_pci_kill_tasklet(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001132err_early_irq:
1133 /* Though there should be no interrupts (device was reset)
1134 * power_down() expects the early IRQ to be installed as per the
1135 * driver lifecycle. */
1136 ret_early = ath10k_pci_request_early_irq(ar);
1137 if (ret_early)
1138 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1139
Michal Kaziorc80de122013-11-25 14:06:23 +01001140 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001141}
1142
Michal Kazior87263e52013-08-27 13:08:01 +02001143static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001144{
1145 struct ath10k *ar;
1146 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001147 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001148 u32 buf_sz;
1149 struct sk_buff *netbuf;
1150 u32 ce_data;
1151
1152 buf_sz = pipe_info->buf_sz;
1153
1154 /* Unused Copy Engine */
1155 if (buf_sz == 0)
1156 return;
1157
1158 ar = pipe_info->hif_ce_state;
1159 ar_pci = ath10k_pci_priv(ar);
1160
1161 if (!ar_pci->started)
1162 return;
1163
1164 ce_hdl = pipe_info->ce_hdl;
1165
1166 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1167 &ce_data) == 0) {
1168 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1169 netbuf->len + skb_tailroom(netbuf),
1170 DMA_FROM_DEVICE);
1171 dev_kfree_skb_any(netbuf);
1172 }
1173}
1174
Michal Kazior87263e52013-08-27 13:08:01 +02001175static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001176{
1177 struct ath10k *ar;
1178 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001179 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001180 struct sk_buff *netbuf;
1181 u32 ce_data;
1182 unsigned int nbytes;
1183 unsigned int id;
1184 u32 buf_sz;
1185
1186 buf_sz = pipe_info->buf_sz;
1187
1188 /* Unused Copy Engine */
1189 if (buf_sz == 0)
1190 return;
1191
1192 ar = pipe_info->hif_ce_state;
1193 ar_pci = ath10k_pci_priv(ar);
1194
1195 if (!ar_pci->started)
1196 return;
1197
1198 ce_hdl = pipe_info->ce_hdl;
1199
1200 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1201 &ce_data, &nbytes, &id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +02001202 /* no need to call tx completion for NULL pointers */
1203 if (!netbuf)
Michal Kazior2415fc12013-11-08 08:01:32 +01001204 continue;
Michal Kazior2415fc12013-11-08 08:01:32 +01001205
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001206 ar_pci->msg_callbacks_current.tx_completion(ar,
1207 netbuf,
1208 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001209 }
1210}
1211
1212/*
1213 * Cleanup residual buffers for device shutdown:
1214 * buffers that were enqueued for receive
1215 * buffers that were to be sent
1216 * Note: Buffers that had completed but which were
1217 * not yet processed are on a completion queue. They
1218 * are handled when the completion thread shuts down.
1219 */
1220static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1221{
1222 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1223 int pipe_num;
1224
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001225 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001226 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001227
1228 pipe_info = &ar_pci->pipe_info[pipe_num];
1229 ath10k_pci_rx_pipe_cleanup(pipe_info);
1230 ath10k_pci_tx_pipe_cleanup(pipe_info);
1231 }
1232}
1233
1234static void ath10k_pci_ce_deinit(struct ath10k *ar)
1235{
1236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001237 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001238 int pipe_num;
1239
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001240 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001241 pipe_info = &ar_pci->pipe_info[pipe_num];
1242 if (pipe_info->ce_hdl) {
1243 ath10k_ce_deinit(pipe_info->ce_hdl);
1244 pipe_info->ce_hdl = NULL;
1245 pipe_info->buf_sz = 0;
1246 }
1247 }
1248}
1249
1250static void ath10k_pci_hif_stop(struct ath10k *ar)
1251{
Michal Kazior32270b62013-08-02 09:15:47 +02001252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001253 int ret;
Michal Kazior32270b62013-08-02 09:15:47 +02001254
Kalle Valo5e3dd152013-06-12 20:52:10 +03001255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1256
Michal Kazior5d1aa942013-11-25 14:06:24 +01001257 ret = ath10k_ce_disable_interrupts(ar);
1258 if (ret)
1259 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02001260
Michal Kazior5d1aa942013-11-25 14:06:24 +01001261 ath10k_pci_free_irq(ar);
1262 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001263
Michal Kaziorab977bd2013-11-25 14:06:26 +01001264 ret = ath10k_pci_request_early_irq(ar);
1265 if (ret)
1266 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1267
Kalle Valo5e3dd152013-06-12 20:52:10 +03001268 /* At this point, asynchronous threads are stopped, the target should
1269 * not DMA nor interrupt. We process the leftovers and then free
1270 * everything else up. */
1271
Kalle Valo5e3dd152013-06-12 20:52:10 +03001272 ath10k_pci_buffer_cleanup(ar);
Michal Kazior32270b62013-08-02 09:15:47 +02001273
Michal Kazior6a42a472013-11-08 08:01:35 +01001274 /* Make the sure the device won't access any structures on the host by
1275 * resetting it. The device was fed with PCI CE ringbuffer
1276 * configuration during init. If ringbuffers are freed and the device
1277 * were to access them this could lead to memory corruption on the
1278 * host. */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001279 ath10k_pci_warm_reset(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01001280
Michal Kazior32270b62013-08-02 09:15:47 +02001281 ar_pci->started = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001282}
1283
1284static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1285 void *req, u32 req_len,
1286 void *resp, u32 *resp_len)
1287{
1288 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001289 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1290 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1291 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1292 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001293 dma_addr_t req_paddr = 0;
1294 dma_addr_t resp_paddr = 0;
1295 struct bmi_xfer xfer = {};
1296 void *treq, *tresp = NULL;
1297 int ret = 0;
1298
Michal Kazior85622cd2013-11-25 14:06:22 +01001299 might_sleep();
1300
Kalle Valo5e3dd152013-06-12 20:52:10 +03001301 if (resp && !resp_len)
1302 return -EINVAL;
1303
1304 if (resp && resp_len && *resp_len == 0)
1305 return -EINVAL;
1306
1307 treq = kmemdup(req, req_len, GFP_KERNEL);
1308 if (!treq)
1309 return -ENOMEM;
1310
1311 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1312 ret = dma_mapping_error(ar->dev, req_paddr);
1313 if (ret)
1314 goto err_dma;
1315
1316 if (resp && resp_len) {
1317 tresp = kzalloc(*resp_len, GFP_KERNEL);
1318 if (!tresp) {
1319 ret = -ENOMEM;
1320 goto err_req;
1321 }
1322
1323 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1324 DMA_FROM_DEVICE);
1325 ret = dma_mapping_error(ar->dev, resp_paddr);
1326 if (ret)
1327 goto err_req;
1328
1329 xfer.wait_for_resp = true;
1330 xfer.resp_len = 0;
1331
1332 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1333 }
1334
1335 init_completion(&xfer.done);
1336
1337 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1338 if (ret)
1339 goto err_resp;
1340
Michal Kazior85622cd2013-11-25 14:06:22 +01001341 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1342 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001343 u32 unused_buffer;
1344 unsigned int unused_nbytes;
1345 unsigned int unused_id;
1346
Kalle Valo5e3dd152013-06-12 20:52:10 +03001347 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1348 &unused_nbytes, &unused_id);
1349 } else {
1350 /* non-zero means we did not time out */
1351 ret = 0;
1352 }
1353
1354err_resp:
1355 if (resp) {
1356 u32 unused_buffer;
1357
1358 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1359 dma_unmap_single(ar->dev, resp_paddr,
1360 *resp_len, DMA_FROM_DEVICE);
1361 }
1362err_req:
1363 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1364
1365 if (ret == 0 && resp_len) {
1366 *resp_len = min(*resp_len, xfer.resp_len);
1367 memcpy(resp, tresp, xfer.resp_len);
1368 }
1369err_dma:
1370 kfree(treq);
1371 kfree(tresp);
1372
1373 return ret;
1374}
1375
Michal Kazior5440ce22013-09-03 15:09:58 +02001376static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001377{
Michal Kazior5440ce22013-09-03 15:09:58 +02001378 struct bmi_xfer *xfer;
1379 u32 ce_data;
1380 unsigned int nbytes;
1381 unsigned int transfer_id;
1382
1383 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1384 &nbytes, &transfer_id))
1385 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001386
1387 if (xfer->wait_for_resp)
1388 return;
1389
1390 complete(&xfer->done);
1391}
1392
Michal Kazior5440ce22013-09-03 15:09:58 +02001393static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001394{
Michal Kazior5440ce22013-09-03 15:09:58 +02001395 struct bmi_xfer *xfer;
1396 u32 ce_data;
1397 unsigned int nbytes;
1398 unsigned int transfer_id;
1399 unsigned int flags;
1400
1401 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1402 &nbytes, &transfer_id, &flags))
1403 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001404
1405 if (!xfer->wait_for_resp) {
1406 ath10k_warn("unexpected: BMI data received; ignoring\n");
1407 return;
1408 }
1409
1410 xfer->resp_len = nbytes;
1411 complete(&xfer->done);
1412}
1413
Michal Kazior85622cd2013-11-25 14:06:22 +01001414static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1415 struct ath10k_ce_pipe *rx_pipe,
1416 struct bmi_xfer *xfer)
1417{
1418 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1419
1420 while (time_before_eq(jiffies, timeout)) {
1421 ath10k_pci_bmi_send_done(tx_pipe);
1422 ath10k_pci_bmi_recv_data(rx_pipe);
1423
1424 if (completion_done(&xfer->done))
1425 return 0;
1426
1427 schedule();
1428 }
1429
1430 return -ETIMEDOUT;
1431}
1432
Kalle Valo5e3dd152013-06-12 20:52:10 +03001433/*
1434 * Map from service/endpoint to Copy Engine.
1435 * This table is derived from the CE_PCI TABLE, above.
1436 * It is passed to the Target at startup for use by firmware.
1437 */
1438static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1439 {
1440 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1441 PIPEDIR_OUT, /* out = UL = host -> target */
1442 3,
1443 },
1444 {
1445 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1446 PIPEDIR_IN, /* in = DL = target -> host */
1447 2,
1448 },
1449 {
1450 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1451 PIPEDIR_OUT, /* out = UL = host -> target */
1452 3,
1453 },
1454 {
1455 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1456 PIPEDIR_IN, /* in = DL = target -> host */
1457 2,
1458 },
1459 {
1460 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1461 PIPEDIR_OUT, /* out = UL = host -> target */
1462 3,
1463 },
1464 {
1465 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1466 PIPEDIR_IN, /* in = DL = target -> host */
1467 2,
1468 },
1469 {
1470 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1471 PIPEDIR_OUT, /* out = UL = host -> target */
1472 3,
1473 },
1474 {
1475 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1476 PIPEDIR_IN, /* in = DL = target -> host */
1477 2,
1478 },
1479 {
1480 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1481 PIPEDIR_OUT, /* out = UL = host -> target */
1482 3,
1483 },
1484 {
1485 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1486 PIPEDIR_IN, /* in = DL = target -> host */
1487 2,
1488 },
1489 {
1490 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1491 PIPEDIR_OUT, /* out = UL = host -> target */
1492 0, /* could be moved to 3 (share with WMI) */
1493 },
1494 {
1495 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1496 PIPEDIR_IN, /* in = DL = target -> host */
1497 1,
1498 },
1499 {
1500 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1501 PIPEDIR_OUT, /* out = UL = host -> target */
1502 0,
1503 },
1504 {
1505 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1506 PIPEDIR_IN, /* in = DL = target -> host */
1507 1,
1508 },
1509 {
1510 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1511 PIPEDIR_OUT, /* out = UL = host -> target */
1512 4,
1513 },
1514 {
1515 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1516 PIPEDIR_IN, /* in = DL = target -> host */
1517 1,
1518 },
1519
1520 /* (Additions here) */
1521
1522 { /* Must be last */
1523 0,
1524 0,
1525 0,
1526 },
1527};
1528
1529/*
1530 * Send an interrupt to the device to wake up the Target CPU
1531 * so it has an opportunity to notice any changed state.
1532 */
1533static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1534{
1535 int ret;
1536 u32 core_ctrl;
1537
1538 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1539 CORE_CTRL_ADDRESS,
1540 &core_ctrl);
1541 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001542 ath10k_warn("failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001543 return ret;
1544 }
1545
1546 /* A_INUM_FIRMWARE interrupt to Target CPU */
1547 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1548
1549 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1550 CORE_CTRL_ADDRESS,
1551 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001552 if (ret) {
1553 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1554 ret);
1555 return ret;
1556 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001557
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001558 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001559}
1560
1561static int ath10k_pci_init_config(struct ath10k *ar)
1562{
1563 u32 interconnect_targ_addr;
1564 u32 pcie_state_targ_addr = 0;
1565 u32 pipe_cfg_targ_addr = 0;
1566 u32 svc_to_pipe_map = 0;
1567 u32 pcie_config_flags = 0;
1568 u32 ealloc_value;
1569 u32 ealloc_targ_addr;
1570 u32 flag2_value;
1571 u32 flag2_targ_addr;
1572 int ret = 0;
1573
1574 /* Download to Target the CE Config and the service-to-CE map */
1575 interconnect_targ_addr =
1576 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1577
1578 /* Supply Target-side CE configuration */
1579 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1580 &pcie_state_targ_addr);
1581 if (ret != 0) {
1582 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1583 return ret;
1584 }
1585
1586 if (pcie_state_targ_addr == 0) {
1587 ret = -EIO;
1588 ath10k_err("Invalid pcie state addr\n");
1589 return ret;
1590 }
1591
1592 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1593 offsetof(struct pcie_state,
1594 pipe_cfg_addr),
1595 &pipe_cfg_targ_addr);
1596 if (ret != 0) {
1597 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1598 return ret;
1599 }
1600
1601 if (pipe_cfg_targ_addr == 0) {
1602 ret = -EIO;
1603 ath10k_err("Invalid pipe cfg addr\n");
1604 return ret;
1605 }
1606
1607 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1608 target_ce_config_wlan,
1609 sizeof(target_ce_config_wlan));
1610
1611 if (ret != 0) {
1612 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1613 return ret;
1614 }
1615
1616 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1617 offsetof(struct pcie_state,
1618 svc_to_pipe_map),
1619 &svc_to_pipe_map);
1620 if (ret != 0) {
1621 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1622 return ret;
1623 }
1624
1625 if (svc_to_pipe_map == 0) {
1626 ret = -EIO;
1627 ath10k_err("Invalid svc_to_pipe map\n");
1628 return ret;
1629 }
1630
1631 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1632 target_service_to_ce_map_wlan,
1633 sizeof(target_service_to_ce_map_wlan));
1634 if (ret != 0) {
1635 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1636 return ret;
1637 }
1638
1639 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1640 offsetof(struct pcie_state,
1641 config_flags),
1642 &pcie_config_flags);
1643 if (ret != 0) {
1644 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1645 return ret;
1646 }
1647
1648 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1649
1650 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1651 offsetof(struct pcie_state, config_flags),
1652 &pcie_config_flags,
1653 sizeof(pcie_config_flags));
1654 if (ret != 0) {
1655 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1656 return ret;
1657 }
1658
1659 /* configure early allocation */
1660 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1661
1662 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1663 if (ret != 0) {
1664 ath10k_err("Faile to get early alloc val: %d\n", ret);
1665 return ret;
1666 }
1667
1668 /* first bank is switched to IRAM */
1669 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1670 HI_EARLY_ALLOC_MAGIC_MASK);
1671 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1672 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1673
1674 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1675 if (ret != 0) {
1676 ath10k_err("Failed to set early alloc val: %d\n", ret);
1677 return ret;
1678 }
1679
1680 /* Tell Target to proceed with initialization */
1681 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1682
1683 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1684 if (ret != 0) {
1685 ath10k_err("Failed to get option val: %d\n", ret);
1686 return ret;
1687 }
1688
1689 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1690
1691 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1692 if (ret != 0) {
1693 ath10k_err("Failed to set option val: %d\n", ret);
1694 return ret;
1695 }
1696
1697 return 0;
1698}
1699
1700
1701
1702static int ath10k_pci_ce_init(struct ath10k *ar)
1703{
1704 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001705 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001706 const struct ce_attr *attr;
1707 int pipe_num;
1708
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001709 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001710 pipe_info = &ar_pci->pipe_info[pipe_num];
1711 pipe_info->pipe_num = pipe_num;
1712 pipe_info->hif_ce_state = ar;
1713 attr = &host_ce_config_wlan[pipe_num];
1714
1715 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1716 if (pipe_info->ce_hdl == NULL) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001717 ath10k_err("failed to initialize CE for pipe: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001718 pipe_num);
1719
1720 /* It is safe to call it here. It checks if ce_hdl is
1721 * valid for each pipe */
1722 ath10k_pci_ce_deinit(ar);
1723 return -1;
1724 }
1725
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001726 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001727 /*
1728 * Reserve the ultimate CE for
1729 * diagnostic Window support
1730 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001731 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001732 continue;
1733 }
1734
1735 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1736 }
1737
Kalle Valo5e3dd152013-06-12 20:52:10 +03001738 return 0;
1739}
1740
1741static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1742{
1743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1744 u32 fw_indicator_address, fw_indicator;
1745
1746 ath10k_pci_wake(ar);
1747
1748 fw_indicator_address = ar_pci->fw_indicator_address;
1749 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1750
1751 if (fw_indicator & FW_IND_EVENT_PENDING) {
1752 /* ACK: clear Target-side pending event */
1753 ath10k_pci_write32(ar, fw_indicator_address,
1754 fw_indicator & ~FW_IND_EVENT_PENDING);
1755
1756 if (ar_pci->started) {
1757 ath10k_pci_hif_dump_area(ar);
1758 } else {
1759 /*
1760 * Probable Target failure before we're prepared
1761 * to handle it. Generally unexpected.
1762 */
1763 ath10k_warn("early firmware event indicated\n");
1764 }
1765 }
1766
1767 ath10k_pci_sleep(ar);
1768}
1769
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001770static int ath10k_pci_warm_reset(struct ath10k *ar)
1771{
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 int ret = 0;
1774 u32 val;
1775
1776 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1777
1778 ret = ath10k_do_pci_wake(ar);
1779 if (ret) {
1780 ath10k_err("failed to wake up target: %d\n", ret);
1781 return ret;
1782 }
1783
1784 /* debug */
1785 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1786 PCIE_INTR_CAUSE_ADDRESS);
1787 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1788
1789 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1790 CPU_INTR_ADDRESS);
1791 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1792 val);
1793
1794 /* disable pending irqs */
1795 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1796 PCIE_INTR_ENABLE_ADDRESS, 0);
1797
1798 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1799 PCIE_INTR_CLR_ADDRESS, ~0);
1800
1801 msleep(100);
1802
1803 /* clear fw indicator */
1804 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
1805
1806 /* clear target LF timer interrupts */
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808 SOC_LF_TIMER_CONTROL0_ADDRESS);
1809 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1810 SOC_LF_TIMER_CONTROL0_ADDRESS,
1811 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1812
1813 /* reset CE */
1814 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1815 SOC_RESET_CONTROL_ADDRESS);
1816 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1817 val | SOC_RESET_CONTROL_CE_RST_MASK);
1818 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1819 SOC_RESET_CONTROL_ADDRESS);
1820 msleep(10);
1821
1822 /* unreset CE */
1823 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1824 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1825 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1826 SOC_RESET_CONTROL_ADDRESS);
1827 msleep(10);
1828
1829 /* debug */
1830 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 PCIE_INTR_CAUSE_ADDRESS);
1832 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1833
1834 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1835 CPU_INTR_ADDRESS);
1836 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1837 val);
1838
1839 /* CPU warm reset */
1840 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1841 SOC_RESET_CONTROL_ADDRESS);
1842 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1843 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1844
1845 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1846 SOC_RESET_CONTROL_ADDRESS);
1847 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1848
1849 msleep(100);
1850
1851 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1852
1853 ath10k_do_pci_sleep(ar);
1854 return ret;
1855}
1856
1857static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
Michal Kazior8c5c5362013-07-16 09:38:50 +02001858{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001859 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001860 const char *irq_mode;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001861 int ret;
1862
1863 /*
1864 * Bring the target up cleanly.
1865 *
1866 * The target may be in an undefined state with an AUX-powered Target
1867 * and a Host in WoW mode. If the Host crashes, loses power, or is
1868 * restarted (without unloading the driver) then the Target is left
1869 * (aux) powered and running. On a subsequent driver load, the Target
1870 * is in an unexpected state. We try to catch that here in order to
1871 * reset the Target and retry the probe.
1872 */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001873 if (cold_reset)
1874 ret = ath10k_pci_cold_reset(ar);
1875 else
1876 ret = ath10k_pci_warm_reset(ar);
1877
Michal Kazior5b2589f2013-11-08 08:01:30 +01001878 if (ret) {
1879 ath10k_err("failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001880 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01001881 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001882
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001883 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001884 /* Force AWAKE forever */
Michal Kazior8c5c5362013-07-16 09:38:50 +02001885 ath10k_do_pci_wake(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001886
1887 ret = ath10k_pci_ce_init(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001888 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001889 ath10k_err("failed to initialize CE: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001890 goto err_ps;
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001891 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001892
Michal Kazior98563d52013-11-08 08:01:33 +01001893 ret = ath10k_ce_disable_interrupts(ar);
1894 if (ret) {
1895 ath10k_err("failed to disable CE interrupts: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001896 goto err_ce;
1897 }
1898
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001899 ret = ath10k_pci_init_irq(ar);
Michal Kazior98563d52013-11-08 08:01:33 +01001900 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001901 ath10k_err("failed to init irqs: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001902 goto err_ce;
1903 }
1904
Michal Kaziorab977bd2013-11-25 14:06:26 +01001905 ret = ath10k_pci_request_early_irq(ar);
1906 if (ret) {
1907 ath10k_err("failed to request early irq: %d\n", ret);
1908 goto err_deinit_irq;
1909 }
1910
Michal Kazior98563d52013-11-08 08:01:33 +01001911 ret = ath10k_pci_wait_for_target_init(ar);
1912 if (ret) {
1913 ath10k_err("failed to wait for target to init: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001914 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001915 }
1916
1917 ret = ath10k_pci_init_config(ar);
1918 if (ret) {
1919 ath10k_err("failed to setup init config: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001920 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001921 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001922
1923 ret = ath10k_pci_wake_target_cpu(ar);
1924 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001925 ath10k_err("could not wake up target CPU: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001926 goto err_free_early_irq;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001927 }
1928
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001929 if (ar_pci->num_msi_intrs > 1)
1930 irq_mode = "MSI-X";
1931 else if (ar_pci->num_msi_intrs == 1)
1932 irq_mode = "MSI";
1933 else
1934 irq_mode = "legacy";
1935
Kalle Valo650b91f2013-11-20 10:00:49 +02001936 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1937 ath10k_info("pci irq %s\n", irq_mode);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001938
Michal Kazior8c5c5362013-07-16 09:38:50 +02001939 return 0;
1940
Michal Kaziorab977bd2013-11-25 14:06:26 +01001941err_free_early_irq:
1942 ath10k_pci_free_early_irq(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001943err_deinit_irq:
1944 ath10k_pci_deinit_irq(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001945err_ce:
1946 ath10k_pci_ce_deinit(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001947 ath10k_pci_warm_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001948err_ps:
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001949 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001950 ath10k_do_pci_sleep(ar);
1951err:
1952 return ret;
1953}
1954
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001955static int ath10k_pci_hif_power_up(struct ath10k *ar)
1956{
1957 int ret;
1958
1959 /*
1960 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 * preferred (and safer) way to perform a device reset is through a
1962 * warm reset.
1963 *
1964 * Warm reset doesn't always work though (notably after a firmware
1965 * crash) so fall back to cold reset if necessary.
1966 */
1967 ret = __ath10k_pci_hif_power_up(ar, false);
1968 if (ret) {
1969 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
1970 ret);
1971
1972 ret = __ath10k_pci_hif_power_up(ar, true);
1973 if (ret) {
1974 ath10k_err("failed to power up target using cold reset too (%d)\n",
1975 ret);
1976 return ret;
1977 }
1978 }
1979
1980 return 0;
1981}
1982
Michal Kazior8c5c5362013-07-16 09:38:50 +02001983static void ath10k_pci_hif_power_down(struct ath10k *ar)
1984{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986
Michal Kaziorab977bd2013-11-25 14:06:26 +01001987 ath10k_pci_free_early_irq(ar);
1988 ath10k_pci_kill_tasklet(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001989 ath10k_pci_deinit_irq(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001990 ath10k_pci_warm_reset(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001991
Michal Kazior8c5c5362013-07-16 09:38:50 +02001992 ath10k_pci_ce_deinit(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001993 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001994 ath10k_do_pci_sleep(ar);
1995}
1996
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001997#ifdef CONFIG_PM
1998
1999#define ATH10K_PCI_PM_CONTROL 0x44
2000
2001static int ath10k_pci_hif_suspend(struct ath10k *ar)
2002{
2003 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2004 struct pci_dev *pdev = ar_pci->pdev;
2005 u32 val;
2006
2007 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2008
2009 if ((val & 0x000000ff) != 0x3) {
2010 pci_save_state(pdev);
2011 pci_disable_device(pdev);
2012 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2013 (val & 0xffffff00) | 0x03);
2014 }
2015
2016 return 0;
2017}
2018
2019static int ath10k_pci_hif_resume(struct ath10k *ar)
2020{
2021 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2022 struct pci_dev *pdev = ar_pci->pdev;
2023 u32 val;
2024
2025 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2026
2027 if ((val & 0x000000ff) != 0) {
2028 pci_restore_state(pdev);
2029 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2030 val & 0xffffff00);
2031 /*
2032 * Suspend/Resume resets the PCI configuration space,
2033 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2034 * to keep PCI Tx retries from interfering with C3 CPU state
2035 */
2036 pci_read_config_dword(pdev, 0x40, &val);
2037
2038 if ((val & 0x0000ff00) != 0)
2039 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2040 }
2041
2042 return 0;
2043}
2044#endif
2045
Kalle Valo5e3dd152013-06-12 20:52:10 +03002046static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
Michal Kazior726346f2014-02-27 18:50:04 +02002047 .tx_sg = ath10k_pci_hif_tx_sg,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002048 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2049 .start = ath10k_pci_hif_start,
2050 .stop = ath10k_pci_hif_stop,
2051 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2052 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2053 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002054 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002055 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002056 .power_up = ath10k_pci_hif_power_up,
2057 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002058#ifdef CONFIG_PM
2059 .suspend = ath10k_pci_hif_suspend,
2060 .resume = ath10k_pci_hif_resume,
2061#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002062};
2063
2064static void ath10k_pci_ce_tasklet(unsigned long ptr)
2065{
Michal Kazior87263e52013-08-27 13:08:01 +02002066 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002067 struct ath10k_pci *ar_pci = pipe->ar_pci;
2068
2069 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2070}
2071
2072static void ath10k_msi_err_tasklet(unsigned long data)
2073{
2074 struct ath10k *ar = (struct ath10k *)data;
2075
2076 ath10k_pci_fw_interrupt_handler(ar);
2077}
2078
2079/*
2080 * Handler for a per-engine interrupt on a PARTICULAR CE.
2081 * This is used in cases where each CE has a private MSI interrupt.
2082 */
2083static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2084{
2085 struct ath10k *ar = arg;
2086 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2087 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2088
Dan Carpentere5742672013-06-18 10:28:46 +03002089 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002090 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2091 return IRQ_HANDLED;
2092 }
2093
2094 /*
2095 * NOTE: We are able to derive ce_id from irq because we
2096 * use a one-to-one mapping for CE's 0..5.
2097 * CE's 6 & 7 do not use interrupts at all.
2098 *
2099 * This mapping must be kept in sync with the mapping
2100 * used by firmware.
2101 */
2102 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2103 return IRQ_HANDLED;
2104}
2105
2106static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2107{
2108 struct ath10k *ar = arg;
2109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2110
2111 tasklet_schedule(&ar_pci->msi_fw_err);
2112 return IRQ_HANDLED;
2113}
2114
2115/*
2116 * Top-level interrupt handler for all PCI interrupts from a Target.
2117 * When a block of MSI interrupts is allocated, this top-level handler
2118 * is not used; instead, we directly call the correct sub-handler.
2119 */
2120static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2121{
2122 struct ath10k *ar = arg;
2123 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2124
2125 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002126 if (!ath10k_pci_irq_pending(ar))
2127 return IRQ_NONE;
2128
Michal Kazior26852182013-11-25 14:06:25 +01002129 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002130 }
2131
2132 tasklet_schedule(&ar_pci->intr_tq);
2133
2134 return IRQ_HANDLED;
2135}
2136
Michal Kaziorab977bd2013-11-25 14:06:26 +01002137static void ath10k_pci_early_irq_tasklet(unsigned long data)
2138{
2139 struct ath10k *ar = (struct ath10k *)data;
2140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2141 u32 fw_ind;
2142 int ret;
2143
2144 ret = ath10k_pci_wake(ar);
2145 if (ret) {
2146 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2147 ret);
2148 return;
2149 }
2150
2151 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2152 if (fw_ind & FW_IND_EVENT_PENDING) {
2153 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2154 fw_ind & ~FW_IND_EVENT_PENDING);
2155
2156 /* Some structures are unavailable during early boot or at
2157 * driver teardown so just print that the device has crashed. */
2158 ath10k_warn("device crashed - no diagnostics available\n");
2159 }
2160
2161 ath10k_pci_sleep(ar);
2162 ath10k_pci_enable_legacy_irq(ar);
2163}
2164
Kalle Valo5e3dd152013-06-12 20:52:10 +03002165static void ath10k_pci_tasklet(unsigned long data)
2166{
2167 struct ath10k *ar = (struct ath10k *)data;
2168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2169
2170 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2171 ath10k_ce_per_engine_service_any(ar);
2172
Michal Kazior26852182013-11-25 14:06:25 +01002173 /* Re-enable legacy irq that was disabled in the irq handler */
2174 if (ar_pci->num_msi_intrs == 0)
2175 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002176}
2177
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002178static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002179{
2180 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002181 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002182
2183 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2184 ath10k_pci_msi_fw_handler,
2185 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002186 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002187 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002188 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002189 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002190 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002191
2192 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2193 ret = request_irq(ar_pci->pdev->irq + i,
2194 ath10k_pci_per_engine_handler,
2195 IRQF_SHARED, "ath10k_pci", ar);
2196 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002197 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002198 ar_pci->pdev->irq + i, ret);
2199
Michal Kazior87b14232013-06-26 08:50:50 +02002200 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2201 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002202
Michal Kazior87b14232013-06-26 08:50:50 +02002203 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002204 return ret;
2205 }
2206 }
2207
Kalle Valo5e3dd152013-06-12 20:52:10 +03002208 return 0;
2209}
2210
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002211static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002212{
2213 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2214 int ret;
2215
2216 ret = request_irq(ar_pci->pdev->irq,
2217 ath10k_pci_interrupt_handler,
2218 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002219 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002220 ath10k_warn("failed to request MSI irq %d: %d\n",
2221 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002222 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002223 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002224
Kalle Valo5e3dd152013-06-12 20:52:10 +03002225 return 0;
2226}
2227
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002228static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002229{
2230 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002231 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002232
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002233 ret = request_irq(ar_pci->pdev->irq,
2234 ath10k_pci_interrupt_handler,
2235 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002236 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002237 ath10k_warn("failed to request legacy irq %d: %d\n",
2238 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002239 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002240 }
2241
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002242 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002243}
2244
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002245static int ath10k_pci_request_irq(struct ath10k *ar)
2246{
2247 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2248
2249 switch (ar_pci->num_msi_intrs) {
2250 case 0:
2251 return ath10k_pci_request_irq_legacy(ar);
2252 case 1:
2253 return ath10k_pci_request_irq_msi(ar);
2254 case MSI_NUM_REQUEST:
2255 return ath10k_pci_request_irq_msix(ar);
2256 }
2257
2258 ath10k_warn("unknown irq configuration upon request\n");
2259 return -EINVAL;
2260}
2261
2262static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002263{
2264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2265 int i;
2266
2267 /* There's at least one interrupt irregardless whether its legacy INTR
2268 * or MSI or MSI-X */
2269 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2270 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002271}
2272
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002273static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2274{
2275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2276 int i;
2277
2278 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2279 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2280 (unsigned long)ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002281 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2282 (unsigned long)ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002283
2284 for (i = 0; i < CE_COUNT; i++) {
2285 ar_pci->pipe_info[i].ar_pci = ar_pci;
2286 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2287 (unsigned long)&ar_pci->pipe_info[i]);
2288 }
2289}
2290
2291static int ath10k_pci_init_irq(struct ath10k *ar)
2292{
2293 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002294 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2295 ar_pci->features);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002296 int ret;
2297
2298 ath10k_pci_init_irq_tasklets(ar);
2299
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002300 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2301 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2302 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002303
2304 /* Try MSI-X */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002305 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2306 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002307 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2308 ar_pci->num_msi_intrs);
2309 if (ret > 0)
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002310 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002311
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002312 /* fall-through */
2313 }
2314
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002315 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002316 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2317 ar_pci->num_msi_intrs = 1;
2318 ret = pci_enable_msi(ar_pci->pdev);
2319 if (ret == 0)
2320 return 0;
2321
2322 /* fall-through */
2323 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002324
2325 /* Try legacy irq
2326 *
2327 * A potential race occurs here: The CORE_BASE write
2328 * depends on target correctly decoding AXI address but
2329 * host won't know when target writes BAR to CORE_CTRL.
2330 * This write might get lost if target has NOT written BAR.
2331 * For now, fix the race by repeating the write in below
2332 * synchronization checking. */
2333 ar_pci->num_msi_intrs = 0;
2334
2335 ret = ath10k_pci_wake(ar);
2336 if (ret) {
2337 ath10k_warn("failed to wake target: %d\n", ret);
2338 return ret;
2339 }
2340
2341 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2342 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2343 ath10k_pci_sleep(ar);
2344
2345 return 0;
2346}
2347
2348static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2349{
2350 int ret;
2351
2352 ret = ath10k_pci_wake(ar);
2353 if (ret) {
2354 ath10k_warn("failed to wake target: %d\n", ret);
2355 return ret;
2356 }
2357
2358 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2359 0);
2360 ath10k_pci_sleep(ar);
2361
2362 return 0;
2363}
2364
2365static int ath10k_pci_deinit_irq(struct ath10k *ar)
2366{
2367 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2368
2369 switch (ar_pci->num_msi_intrs) {
2370 case 0:
2371 return ath10k_pci_deinit_irq_legacy(ar);
2372 case 1:
2373 /* fall-through */
2374 case MSI_NUM_REQUEST:
2375 pci_disable_msi(ar_pci->pdev);
2376 return 0;
Alexander Gordeevbb8b6212014-02-13 17:50:01 +02002377 default:
2378 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002379 }
2380
2381 ath10k_warn("unknown irq configuration upon deinit\n");
2382 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002383}
2384
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002385static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002386{
2387 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 int wait_limit = 300; /* 3 sec */
Kalle Valof3782742013-10-17 11:36:15 +03002389 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002390
Michal Kazior98563d52013-11-08 08:01:33 +01002391 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002392 if (ret) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002393 ath10k_err("failed to wake up target: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002394 return ret;
2395 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002396
2397 while (wait_limit-- &&
2398 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2399 FW_IND_INITIALIZED)) {
2400 if (ar_pci->num_msi_intrs == 0)
2401 /* Fix potential race by repeating CORE_BASE writes */
2402 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2403 PCIE_INTR_CE_MASK_ALL,
2404 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2405 PCIE_INTR_ENABLE_ADDRESS));
2406 mdelay(10);
2407 }
2408
2409 if (wait_limit < 0) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002410 ath10k_err("target stalled\n");
2411 ret = -EIO;
2412 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002413 }
2414
Michal Kazior5b2589f2013-11-08 08:01:30 +01002415out:
Michal Kazior98563d52013-11-08 08:01:33 +01002416 ath10k_pci_sleep(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002417 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002418}
2419
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002420static int ath10k_pci_cold_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002421{
Michal Kazior5b2589f2013-11-08 08:01:30 +01002422 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002423 u32 val;
2424
Michal Kazior5b2589f2013-11-08 08:01:30 +01002425 ret = ath10k_do_pci_wake(ar);
2426 if (ret) {
2427 ath10k_err("failed to wake up target: %d\n",
2428 ret);
2429 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002430 }
2431
2432 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002433 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002434 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002435 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002436
2437 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002438 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002439 RTC_STATE_COLD_RESET_MASK)
2440 break;
2441 msleep(1);
2442 }
2443
2444 /* Pull Target, including PCIe, out of RESET. */
2445 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002446 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002447
2448 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002449 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002450 RTC_STATE_COLD_RESET_MASK))
2451 break;
2452 msleep(1);
2453 }
2454
Michal Kazior5b2589f2013-11-08 08:01:30 +01002455 ath10k_do_pci_sleep(ar);
2456 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002457}
2458
2459static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2460{
2461 int i;
2462
2463 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2464 if (!test_bit(i, ar_pci->features))
2465 continue;
2466
2467 switch (i) {
2468 case ATH10K_PCI_FEATURE_MSI_X:
Kalle Valo24cfade2013-09-08 17:55:50 +03002469 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002470 break;
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002471 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
Kalle Valo24cfade2013-09-08 17:55:50 +03002472 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002473 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002474 }
2475 }
2476}
2477
2478static int ath10k_pci_probe(struct pci_dev *pdev,
2479 const struct pci_device_id *pci_dev)
2480{
2481 void __iomem *mem;
2482 int ret = 0;
2483 struct ath10k *ar;
2484 struct ath10k_pci *ar_pci;
Kalle Valoe01ae682013-09-01 11:22:14 +03002485 u32 lcr_val, chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002486
2487 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2488
2489 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2490 if (ar_pci == NULL)
2491 return -ENOMEM;
2492
2493 ar_pci->pdev = pdev;
2494 ar_pci->dev = &pdev->dev;
2495
2496 switch (pci_dev->device) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002497 case QCA988X_2_0_DEVICE_ID:
2498 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2499 break;
2500 default:
2501 ret = -ENODEV;
Masanari Iida6d3be302013-09-30 23:19:09 +09002502 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002503 goto err_ar_pci;
2504 }
2505
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002506 if (ath10k_target_ps)
2507 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2508
Kalle Valo5e3dd152013-06-12 20:52:10 +03002509 ath10k_pci_dump_features(ar_pci);
2510
Michal Kazior3a0861f2013-07-05 16:15:06 +03002511 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002512 if (!ar) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002513 ath10k_err("failed to create driver core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002514 ret = -EINVAL;
2515 goto err_ar_pci;
2516 }
2517
Kalle Valo5e3dd152013-06-12 20:52:10 +03002518 ar_pci->ar = ar;
2519 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2520 atomic_set(&ar_pci->keep_awake_count, 0);
2521
2522 pci_set_drvdata(pdev, ar);
2523
2524 /*
2525 * Without any knowledge of the Host, the Target may have been reset or
2526 * power cycled and its Config Space may no longer reflect the PCI
2527 * address space that was assigned earlier by the PCI infrastructure.
2528 * Refresh it now.
2529 */
2530 ret = pci_assign_resource(pdev, BAR_NUM);
2531 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002532 ath10k_err("failed to assign PCI space: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002533 goto err_ar;
2534 }
2535
2536 ret = pci_enable_device(pdev);
2537 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002538 ath10k_err("failed to enable PCI device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002539 goto err_ar;
2540 }
2541
2542 /* Request MMIO resources */
2543 ret = pci_request_region(pdev, BAR_NUM, "ath");
2544 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002545 ath10k_err("failed to request MMIO region: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002546 goto err_device;
2547 }
2548
2549 /*
2550 * Target structures have a limit of 32 bit DMA pointers.
2551 * DMA pointers can be wider than 32 bits by default on some systems.
2552 */
2553 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2554 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002555 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002556 goto err_region;
2557 }
2558
2559 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2560 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002561 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002562 goto err_region;
2563 }
2564
2565 /* Set bus master bit in PCI_COMMAND to enable DMA */
2566 pci_set_master(pdev);
2567
2568 /*
2569 * Temporary FIX: disable ASPM
2570 * Will be removed after the OTP is programmed
2571 */
2572 pci_read_config_dword(pdev, 0x80, &lcr_val);
2573 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2574
2575 /* Arrange for access to Target SoC registers. */
2576 mem = pci_iomap(pdev, BAR_NUM, 0);
2577 if (!mem) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002578 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002579 ret = -EIO;
2580 goto err_master;
2581 }
2582
2583 ar_pci->mem = mem;
2584
2585 spin_lock_init(&ar_pci->ce_lock);
2586
Kalle Valoe01ae682013-09-01 11:22:14 +03002587 ret = ath10k_do_pci_wake(ar);
2588 if (ret) {
2589 ath10k_err("Failed to get chip id: %d\n", ret);
Wei Yongjun12eb0872013-10-30 13:24:39 +08002590 goto err_iomap;
Kalle Valoe01ae682013-09-01 11:22:14 +03002591 }
2592
Kalle Valo233eb972013-10-16 16:46:11 +03002593 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Kalle Valoe01ae682013-09-01 11:22:14 +03002594
2595 ath10k_do_pci_sleep(ar);
2596
Kalle Valo24cfade2013-09-08 17:55:50 +03002597 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2598
Kalle Valoe01ae682013-09-01 11:22:14 +03002599 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002600 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002601 ath10k_err("failed to register driver core: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02002602 goto err_iomap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002603 }
2604
2605 return 0;
2606
Kalle Valo5e3dd152013-06-12 20:52:10 +03002607err_iomap:
2608 pci_iounmap(pdev, mem);
2609err_master:
2610 pci_clear_master(pdev);
2611err_region:
2612 pci_release_region(pdev, BAR_NUM);
2613err_device:
2614 pci_disable_device(pdev);
2615err_ar:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002616 ath10k_core_destroy(ar);
2617err_ar_pci:
2618 /* call HIF PCI free here */
2619 kfree(ar_pci);
2620
2621 return ret;
2622}
2623
2624static void ath10k_pci_remove(struct pci_dev *pdev)
2625{
2626 struct ath10k *ar = pci_get_drvdata(pdev);
2627 struct ath10k_pci *ar_pci;
2628
2629 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2630
2631 if (!ar)
2632 return;
2633
2634 ar_pci = ath10k_pci_priv(ar);
2635
2636 if (!ar_pci)
2637 return;
2638
2639 tasklet_kill(&ar_pci->msi_fw_err);
2640
2641 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002642
Kalle Valo5e3dd152013-06-12 20:52:10 +03002643 pci_iounmap(pdev, ar_pci->mem);
2644 pci_release_region(pdev, BAR_NUM);
2645 pci_clear_master(pdev);
2646 pci_disable_device(pdev);
2647
2648 ath10k_core_destroy(ar);
2649 kfree(ar_pci);
2650}
2651
Kalle Valo5e3dd152013-06-12 20:52:10 +03002652MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2653
2654static struct pci_driver ath10k_pci_driver = {
2655 .name = "ath10k_pci",
2656 .id_table = ath10k_pci_id_table,
2657 .probe = ath10k_pci_probe,
2658 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002659};
2660
2661static int __init ath10k_pci_init(void)
2662{
2663 int ret;
2664
2665 ret = pci_register_driver(&ath10k_pci_driver);
2666 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002667 ath10k_err("failed to register PCI driver: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002668
2669 return ret;
2670}
2671module_init(ath10k_pci_init);
2672
2673static void __exit ath10k_pci_exit(void)
2674{
2675 pci_unregister_driver(&ath10k_pci_driver);
2676}
2677
2678module_exit(ath10k_pci_exit);
2679
2680MODULE_AUTHOR("Qualcomm Atheros");
2681MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2682MODULE_LICENSE("Dual BSD/GPL");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002683MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2684MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2685MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);