blob: 085c7ee741b17050b2b3f5e29dcc9c6f1ab674f8 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Kalle Valo35098462014-03-28 09:32:27 +020042enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
Kalle Valoe42c1fb2014-03-28 09:32:33 +020047static unsigned int ath10k_pci_target_ps;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010048static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
Kalle Valo35098462014-03-28 09:32:27 +020049static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010050
Kalle Valoe42c1fb2014-03-28 09:32:33 +020051module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
Kalle Valo5e3dd152013-06-12 20:52:10 +030053
Michal Kaziorcfe9c452013-11-25 14:06:27 +010054module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
56
Kalle Valo35098462014-03-28 09:32:27 +020057module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
59
Kalle Valo0399eca2014-03-28 09:32:21 +020060/* how long wait to wait for target to initialise, in ms */
61#define ATH10K_PCI_TARGET_WAIT 3000
62
Kalle Valo5e3dd152013-06-12 20:52:10 +030063#define QCA988X_2_0_DEVICE_ID (0x003c)
64
65static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030066 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
67 {0}
68};
69
70static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
71 u32 *data);
72
Kalle Valo5e3dd152013-06-12 20:52:10 +030073static int ath10k_pci_post_rx(struct ath10k *ar);
Michal Kazior87263e52013-08-27 13:08:01 +020074static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +030075 int num);
Michal Kazior87263e52013-08-27 13:08:01 +020076static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +010077static int ath10k_pci_cold_reset(struct ath10k *ar);
78static int ath10k_pci_warm_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010079static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010080static int ath10k_pci_init_irq(struct ath10k *ar);
81static int ath10k_pci_deinit_irq(struct ath10k *ar);
82static int ath10k_pci_request_irq(struct ath10k *ar);
83static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010084static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
85 struct ath10k_ce_pipe *rx_pipe,
86 struct bmi_xfer *xfer);
Kalle Valo5e3dd152013-06-12 20:52:10 +030087
88static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030089 /* CE0: host->target HTC control and raw streams */
90 {
91 .flags = CE_ATTR_FLAGS,
92 .src_nentries = 16,
93 .src_sz_max = 256,
94 .dest_nentries = 0,
95 },
96
97 /* CE1: target->host HTT + HTC control */
98 {
99 .flags = CE_ATTR_FLAGS,
100 .src_nentries = 0,
101 .src_sz_max = 512,
102 .dest_nentries = 512,
103 },
104
105 /* CE2: target->host WMI */
106 {
107 .flags = CE_ATTR_FLAGS,
108 .src_nentries = 0,
109 .src_sz_max = 2048,
110 .dest_nentries = 32,
111 },
112
113 /* CE3: host->target WMI */
114 {
115 .flags = CE_ATTR_FLAGS,
116 .src_nentries = 32,
117 .src_sz_max = 2048,
118 .dest_nentries = 0,
119 },
120
121 /* CE4: host->target HTT */
122 {
123 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
124 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
125 .src_sz_max = 256,
126 .dest_nentries = 0,
127 },
128
129 /* CE5: unused */
130 {
131 .flags = CE_ATTR_FLAGS,
132 .src_nentries = 0,
133 .src_sz_max = 0,
134 .dest_nentries = 0,
135 },
136
137 /* CE6: target autonomous hif_memcpy */
138 {
139 .flags = CE_ATTR_FLAGS,
140 .src_nentries = 0,
141 .src_sz_max = 0,
142 .dest_nentries = 0,
143 },
144
145 /* CE7: ce_diag, the Diagnostic Window */
146 {
147 .flags = CE_ATTR_FLAGS,
148 .src_nentries = 2,
149 .src_sz_max = DIAG_TRANSFER_LIMIT,
150 .dest_nentries = 2,
151 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300152};
153
154/* Target firmware's Copy Engine configuration. */
155static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300156 /* CE0: host->target HTC control and raw streams */
157 {
158 .pipenum = 0,
159 .pipedir = PIPEDIR_OUT,
160 .nentries = 32,
161 .nbytes_max = 256,
162 .flags = CE_ATTR_FLAGS,
163 .reserved = 0,
164 },
165
166 /* CE1: target->host HTT + HTC control */
167 {
168 .pipenum = 1,
169 .pipedir = PIPEDIR_IN,
170 .nentries = 32,
171 .nbytes_max = 512,
172 .flags = CE_ATTR_FLAGS,
173 .reserved = 0,
174 },
175
176 /* CE2: target->host WMI */
177 {
178 .pipenum = 2,
179 .pipedir = PIPEDIR_IN,
180 .nentries = 32,
181 .nbytes_max = 2048,
182 .flags = CE_ATTR_FLAGS,
183 .reserved = 0,
184 },
185
186 /* CE3: host->target WMI */
187 {
188 .pipenum = 3,
189 .pipedir = PIPEDIR_OUT,
190 .nentries = 32,
191 .nbytes_max = 2048,
192 .flags = CE_ATTR_FLAGS,
193 .reserved = 0,
194 },
195
196 /* CE4: host->target HTT */
197 {
198 .pipenum = 4,
199 .pipedir = PIPEDIR_OUT,
200 .nentries = 256,
201 .nbytes_max = 256,
202 .flags = CE_ATTR_FLAGS,
203 .reserved = 0,
204 },
205
Kalle Valo5e3dd152013-06-12 20:52:10 +0300206 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300207
208 /* CE5: unused */
209 {
210 .pipenum = 5,
211 .pipedir = PIPEDIR_OUT,
212 .nentries = 32,
213 .nbytes_max = 2048,
214 .flags = CE_ATTR_FLAGS,
215 .reserved = 0,
216 },
217
218 /* CE6: Reserved for target autonomous hif_memcpy */
219 {
220 .pipenum = 6,
221 .pipedir = PIPEDIR_INOUT,
222 .nentries = 32,
223 .nbytes_max = 4096,
224 .flags = CE_ATTR_FLAGS,
225 .reserved = 0,
226 },
227
Kalle Valo5e3dd152013-06-12 20:52:10 +0300228 /* CE7 used only by Host */
229};
230
Michal Kaziore5398872013-11-25 14:06:20 +0100231static bool ath10k_pci_irq_pending(struct ath10k *ar)
232{
233 u32 cause;
234
235 /* Check if the shared legacy irq is for us */
236 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
237 PCIE_INTR_CAUSE_ADDRESS);
238 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
239 return true;
240
241 return false;
242}
243
Michal Kazior26852182013-11-25 14:06:25 +0100244static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
245{
246 /* IMPORTANT: INTR_CLR register has to be set after
247 * INTR_ENABLE is set to 0, otherwise interrupt can not be
248 * really cleared. */
249 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
250 0);
251 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
253
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
258}
259
260static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
261{
262 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
263 PCIE_INTR_ENABLE_ADDRESS,
264 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
265
266 /* IMPORTANT: this extra read transaction is required to
267 * flush the posted write buffer. */
268 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
269 PCIE_INTR_ENABLE_ADDRESS);
270}
271
Michal Kaziorab977bd2013-11-25 14:06:26 +0100272static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
273{
274 struct ath10k *ar = arg;
275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
276
277 if (ar_pci->num_msi_intrs == 0) {
278 if (!ath10k_pci_irq_pending(ar))
279 return IRQ_NONE;
280
281 ath10k_pci_disable_and_clear_legacy_irq(ar);
282 }
283
284 tasklet_schedule(&ar_pci->early_irq_tasklet);
285
286 return IRQ_HANDLED;
287}
288
289static int ath10k_pci_request_early_irq(struct ath10k *ar)
290{
291 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
292 int ret;
293
294 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
295 * interrupt from irq vector is triggered in all cases for FW
296 * indication/errors */
297 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
298 IRQF_SHARED, "ath10k_pci (early)", ar);
299 if (ret) {
300 ath10k_warn("failed to request early irq: %d\n", ret);
301 return ret;
302 }
303
304 return 0;
305}
306
307static void ath10k_pci_free_early_irq(struct ath10k *ar)
308{
309 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
310}
311
Kalle Valo5e3dd152013-06-12 20:52:10 +0300312/*
313 * Diagnostic read/write access is provided for startup/config/debug usage.
314 * Caller must guarantee proper alignment, when applicable, and single user
315 * at any moment.
316 */
317static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
318 int nbytes)
319{
320 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
321 int ret = 0;
322 u32 buf;
323 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
324 unsigned int id;
325 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200326 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300327 /* Host buffer address in CE space */
328 u32 ce_data;
329 dma_addr_t ce_data_base = 0;
330 void *data_buf = NULL;
331 int i;
332
333 /*
334 * This code cannot handle reads to non-memory space. Redirect to the
335 * register read fn but preserve the multi word read capability of
336 * this fn
337 */
338 if (address < DRAM_BASE_ADDRESS) {
339 if (!IS_ALIGNED(address, 4) ||
340 !IS_ALIGNED((unsigned long)data, 4))
341 return -EIO;
342
343 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
344 ar, address, (u32 *)data)) == 0)) {
345 nbytes -= sizeof(u32);
346 address += sizeof(u32);
347 data += sizeof(u32);
348 }
349 return ret;
350 }
351
352 ce_diag = ar_pci->ce_diag;
353
354 /*
355 * Allocate a temporary bounce buffer to hold caller's data
356 * to be DMA'ed from Target. This guarantees
357 * 1) 4-byte alignment
358 * 2) Buffer in DMA-able space
359 */
360 orig_nbytes = nbytes;
361 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
362 orig_nbytes,
363 &ce_data_base);
364
365 if (!data_buf) {
366 ret = -ENOMEM;
367 goto done;
368 }
369 memset(data_buf, 0, orig_nbytes);
370
371 remaining_bytes = orig_nbytes;
372 ce_data = ce_data_base;
373 while (remaining_bytes) {
374 nbytes = min_t(unsigned int, remaining_bytes,
375 DIAG_TRANSFER_LIMIT);
376
377 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
378 if (ret != 0)
379 goto done;
380
381 /* Request CE to send from Target(!) address to Host buffer */
382 /*
383 * The address supplied by the caller is in the
384 * Target CPU virtual address space.
385 *
386 * In order to use this address with the diagnostic CE,
387 * convert it from Target CPU virtual address space
388 * to CE address space
389 */
390 ath10k_pci_wake(ar);
391 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
392 address);
393 ath10k_pci_sleep(ar);
394
395 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
396 0);
397 if (ret)
398 goto done;
399
400 i = 0;
401 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
402 &completed_nbytes,
403 &id) != 0) {
404 mdelay(1);
405 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
406 ret = -EBUSY;
407 goto done;
408 }
409 }
410
411 if (nbytes != completed_nbytes) {
412 ret = -EIO;
413 goto done;
414 }
415
416 if (buf != (u32) address) {
417 ret = -EIO;
418 goto done;
419 }
420
421 i = 0;
422 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
423 &completed_nbytes,
424 &id, &flags) != 0) {
425 mdelay(1);
426
427 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
428 ret = -EBUSY;
429 goto done;
430 }
431 }
432
433 if (nbytes != completed_nbytes) {
434 ret = -EIO;
435 goto done;
436 }
437
438 if (buf != ce_data) {
439 ret = -EIO;
440 goto done;
441 }
442
443 remaining_bytes -= nbytes;
444 address += nbytes;
445 ce_data += nbytes;
446 }
447
448done:
449 if (ret == 0) {
450 /* Copy data from allocated DMA buf to caller's buf */
451 WARN_ON_ONCE(orig_nbytes & 3);
452 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
453 ((u32 *)data)[i] =
454 __le32_to_cpu(((__le32 *)data_buf)[i]);
455 }
456 } else
457 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
458 __func__, address);
459
460 if (data_buf)
461 pci_free_consistent(ar_pci->pdev, orig_nbytes,
462 data_buf, ce_data_base);
463
464 return ret;
465}
466
467/* Read 4-byte aligned data from Target memory or register */
468static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
469 u32 *data)
470{
471 /* Assume range doesn't cross this boundary */
472 if (address >= DRAM_BASE_ADDRESS)
473 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
474
475 ath10k_pci_wake(ar);
476 *data = ath10k_pci_read32(ar, address);
477 ath10k_pci_sleep(ar);
478 return 0;
479}
480
481static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
482 const void *data, int nbytes)
483{
484 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
485 int ret = 0;
486 u32 buf;
487 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
488 unsigned int id;
489 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200490 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300491 void *data_buf = NULL;
492 u32 ce_data; /* Host buffer address in CE space */
493 dma_addr_t ce_data_base = 0;
494 int i;
495
496 ce_diag = ar_pci->ce_diag;
497
498 /*
499 * Allocate a temporary bounce buffer to hold caller's data
500 * to be DMA'ed to Target. This guarantees
501 * 1) 4-byte alignment
502 * 2) Buffer in DMA-able space
503 */
504 orig_nbytes = nbytes;
505 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
506 orig_nbytes,
507 &ce_data_base);
508 if (!data_buf) {
509 ret = -ENOMEM;
510 goto done;
511 }
512
513 /* Copy caller's data to allocated DMA buf */
514 WARN_ON_ONCE(orig_nbytes & 3);
515 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
516 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
517
518 /*
519 * The address supplied by the caller is in the
520 * Target CPU virtual address space.
521 *
522 * In order to use this address with the diagnostic CE,
523 * convert it from
524 * Target CPU virtual address space
525 * to
526 * CE address space
527 */
528 ath10k_pci_wake(ar);
529 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
530 ath10k_pci_sleep(ar);
531
532 remaining_bytes = orig_nbytes;
533 ce_data = ce_data_base;
534 while (remaining_bytes) {
535 /* FIXME: check cast */
536 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
537
538 /* Set up to receive directly into Target(!) address */
539 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
540 if (ret != 0)
541 goto done;
542
543 /*
544 * Request CE to send caller-supplied data that
545 * was copied to bounce buffer to Target(!) address.
546 */
547 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
548 nbytes, 0, 0);
549 if (ret != 0)
550 goto done;
551
552 i = 0;
553 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
554 &completed_nbytes,
555 &id) != 0) {
556 mdelay(1);
557
558 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
559 ret = -EBUSY;
560 goto done;
561 }
562 }
563
564 if (nbytes != completed_nbytes) {
565 ret = -EIO;
566 goto done;
567 }
568
569 if (buf != ce_data) {
570 ret = -EIO;
571 goto done;
572 }
573
574 i = 0;
575 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
576 &completed_nbytes,
577 &id, &flags) != 0) {
578 mdelay(1);
579
580 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
581 ret = -EBUSY;
582 goto done;
583 }
584 }
585
586 if (nbytes != completed_nbytes) {
587 ret = -EIO;
588 goto done;
589 }
590
591 if (buf != address) {
592 ret = -EIO;
593 goto done;
594 }
595
596 remaining_bytes -= nbytes;
597 address += nbytes;
598 ce_data += nbytes;
599 }
600
601done:
602 if (data_buf) {
603 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
604 ce_data_base);
605 }
606
607 if (ret != 0)
608 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
609 address);
610
611 return ret;
612}
613
614/* Write 4B data to Target memory or register */
615static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
616 u32 data)
617{
618 /* Assume range doesn't cross this boundary */
619 if (address >= DRAM_BASE_ADDRESS)
620 return ath10k_pci_diag_write_mem(ar, address, &data,
621 sizeof(u32));
622
623 ath10k_pci_wake(ar);
624 ath10k_pci_write32(ar, address, data);
625 ath10k_pci_sleep(ar);
626 return 0;
627}
628
629static bool ath10k_pci_target_is_awake(struct ath10k *ar)
630{
631 void __iomem *mem = ath10k_pci_priv(ar)->mem;
632 u32 val;
633 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
634 RTC_STATE_ADDRESS);
635 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
636}
637
Kalle Valo3aebe542013-09-01 10:02:07 +0300638int ath10k_do_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300639{
640 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641 void __iomem *pci_addr = ar_pci->mem;
642 int tot_delay = 0;
643 int curr_delay = 5;
644
645 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
646 /* Force AWAKE */
647 iowrite32(PCIE_SOC_WAKE_V_MASK,
648 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
649 PCIE_SOC_WAKE_ADDRESS);
650 }
651 atomic_inc(&ar_pci->keep_awake_count);
652
653 if (ar_pci->verified_awake)
Kalle Valo3aebe542013-09-01 10:02:07 +0300654 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300655
656 for (;;) {
657 if (ath10k_pci_target_is_awake(ar)) {
658 ar_pci->verified_awake = true;
Kalle Valo3aebe542013-09-01 10:02:07 +0300659 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300660 }
661
662 if (tot_delay > PCIE_WAKE_TIMEOUT) {
Kalle Valo3aebe542013-09-01 10:02:07 +0300663 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
664 PCIE_WAKE_TIMEOUT,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300665 atomic_read(&ar_pci->keep_awake_count));
Kalle Valo3aebe542013-09-01 10:02:07 +0300666 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300667 }
668
669 udelay(curr_delay);
670 tot_delay += curr_delay;
671
672 if (curr_delay < 50)
673 curr_delay += 5;
674 }
675}
676
677void ath10k_do_pci_sleep(struct ath10k *ar)
678{
679 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
680 void __iomem *pci_addr = ar_pci->mem;
681
682 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
683 /* Allow sleep */
684 ar_pci->verified_awake = false;
685 iowrite32(PCIE_SOC_WAKE_RESET,
686 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
687 PCIE_SOC_WAKE_ADDRESS);
688 }
689}
690
Kalle Valo5e3dd152013-06-12 20:52:10 +0300691/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200692static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300693{
694 struct ath10k *ar = ce_state->ar;
695 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2f5280d2014-02-27 18:50:05 +0200696 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Michal Kazior5440ce22013-09-03 15:09:58 +0200697 void *transfer_context;
698 u32 ce_data;
699 unsigned int nbytes;
700 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300701
Michal Kazior5440ce22013-09-03 15:09:58 +0200702 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
703 &ce_data, &nbytes,
704 &transfer_id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +0200705 /* no need to call tx completion for NULL pointers */
Michal Kazior726346f2014-02-27 18:50:04 +0200706 if (transfer_context == NULL)
707 continue;
708
Michal Kazior2f5280d2014-02-27 18:50:05 +0200709 cb->tx_completion(ar, transfer_context, transfer_id);
Michal Kazior5440ce22013-09-03 15:09:58 +0200710 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300711}
712
713/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200714static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300715{
716 struct ath10k *ar = ce_state->ar;
717 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200718 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Michal Kazior2f5280d2014-02-27 18:50:05 +0200719 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300720 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200721 void *transfer_context;
722 u32 ce_data;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200723 unsigned int nbytes, max_nbytes;
Michal Kazior5440ce22013-09-03 15:09:58 +0200724 unsigned int transfer_id;
725 unsigned int flags;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200726 int err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300727
Michal Kazior5440ce22013-09-03 15:09:58 +0200728 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
729 &ce_data, &nbytes, &transfer_id,
730 &flags) == 0) {
Michal Kazior2f5280d2014-02-27 18:50:05 +0200731 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
732 if (unlikely(err)) {
733 /* FIXME: retry */
734 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
735 pipe_info->pipe_num, err);
736 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300737
738 skb = transfer_context;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200739 max_nbytes = skb->len + skb_tailroom(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300740 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
Michal Kazior2f5280d2014-02-27 18:50:05 +0200741 max_nbytes, DMA_FROM_DEVICE);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300742
Michal Kazior2f5280d2014-02-27 18:50:05 +0200743 if (unlikely(max_nbytes < nbytes)) {
744 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
745 nbytes, max_nbytes);
746 dev_kfree_skb_any(skb);
747 continue;
748 }
749
750 skb_put(skb, nbytes);
751 cb->rx_completion(ar, skb, pipe_info->pipe_num);
752 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300753}
754
Michal Kazior726346f2014-02-27 18:50:04 +0200755static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
756 struct ath10k_hif_sg_item *items, int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300757{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300758 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior726346f2014-02-27 18:50:04 +0200759 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
760 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
761 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
762 unsigned int nentries_mask = src_ring->nentries_mask;
763 unsigned int sw_index = src_ring->sw_index;
764 unsigned int write_index = src_ring->write_index;
765 int err, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300766
Michal Kazior726346f2014-02-27 18:50:04 +0200767 spin_lock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300768
Michal Kazior726346f2014-02-27 18:50:04 +0200769 if (unlikely(CE_RING_DELTA(nentries_mask,
770 write_index, sw_index - 1) < n_items)) {
771 err = -ENOBUFS;
772 goto unlock;
773 }
774
775 for (i = 0; i < n_items - 1; i++) {
776 ath10k_dbg(ATH10K_DBG_PCI,
777 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
778 i, items[i].paddr, items[i].len, n_items);
779 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
780 items[i].vaddr, items[i].len);
781
782 err = ath10k_ce_send_nolock(ce_pipe,
783 items[i].transfer_context,
784 items[i].paddr,
785 items[i].len,
786 items[i].transfer_id,
787 CE_SEND_FLAG_GATHER);
788 if (err)
789 goto unlock;
790 }
791
792 /* `i` is equal to `n_items -1` after for() */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300793
794 ath10k_dbg(ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +0200795 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
796 i, items[i].paddr, items[i].len, n_items);
797 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
798 items[i].vaddr, items[i].len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300799
Michal Kazior726346f2014-02-27 18:50:04 +0200800 err = ath10k_ce_send_nolock(ce_pipe,
801 items[i].transfer_context,
802 items[i].paddr,
803 items[i].len,
804 items[i].transfer_id,
805 0);
806 if (err)
807 goto unlock;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300808
Michal Kazior726346f2014-02-27 18:50:04 +0200809 err = 0;
810unlock:
811 spin_unlock_bh(&ar_pci->ce_lock);
812 return err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300813}
814
815static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
816{
817 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200818 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300819}
820
821static void ath10k_pci_hif_dump_area(struct ath10k *ar)
822{
823 u32 reg_dump_area = 0;
824 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
825 u32 host_addr;
826 int ret;
827 u32 i;
828
829 ath10k_err("firmware crashed!\n");
830 ath10k_err("hardware name %s version 0x%x\n",
831 ar->hw_params.name, ar->target_version);
Chun-Yeow Yeoh5ba88b32014-01-21 17:21:21 +0800832 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300833
834 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100835 ret = ath10k_pci_diag_read_mem(ar, host_addr,
836 &reg_dump_area, sizeof(u32));
837 if (ret) {
838 ath10k_err("failed to read FW dump area address: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300839 return;
840 }
841
842 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
843
844 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
845 &reg_dump_values[0],
846 REG_DUMP_COUNT_QCA988X * sizeof(u32));
847 if (ret != 0) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100848 ath10k_err("failed to read FW dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300849 return;
850 }
851
852 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
853
854 ath10k_err("target Register Dump\n");
855 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
856 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
857 i,
858 reg_dump_values[i],
859 reg_dump_values[i + 1],
860 reg_dump_values[i + 2],
861 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200862
Michal Kazior5e90de82013-10-16 16:46:05 +0300863 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300864}
865
866static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
867 int force)
868{
869 if (!force) {
870 int resources;
871 /*
872 * Decide whether to actually poll for completions, or just
873 * wait for a later chance.
874 * If there seem to be plenty of resources left, then just wait
875 * since checking involves reading a CE register, which is a
876 * relatively expensive operation.
877 */
878 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
879
880 /*
881 * If at least 50% of the total resources are still available,
882 * don't bother checking again yet.
883 */
884 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
885 return;
886 }
887 ath10k_ce_per_engine_service(ar, pipe);
888}
889
Michal Kaziore799bbf2013-07-05 16:15:12 +0300890static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
891 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300892{
893 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
894
895 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
896
897 memcpy(&ar_pci->msg_callbacks_current, callbacks,
898 sizeof(ar_pci->msg_callbacks_current));
899}
900
Michal Kaziorc80de122013-11-25 14:06:23 +0100901static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
902{
903 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
904 const struct ce_attr *attr;
905 struct ath10k_pci_pipe *pipe_info;
906 int pipe_num, disable_interrupts;
907
908 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
909 pipe_info = &ar_pci->pipe_info[pipe_num];
910
911 /* Handle Diagnostic CE specially */
912 if (pipe_info->ce_hdl == ar_pci->ce_diag)
913 continue;
914
915 attr = &host_ce_config_wlan[pipe_num];
916
917 if (attr->src_nentries) {
918 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
919 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
920 ath10k_pci_ce_send_done,
921 disable_interrupts);
922 }
923
924 if (attr->dest_nentries)
925 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
926 ath10k_pci_ce_recv_data);
927 }
928
929 return 0;
930}
931
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100932static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300933{
934 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300935 int i;
936
Kalle Valo5e3dd152013-06-12 20:52:10 +0300937 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +0100938 tasklet_kill(&ar_pci->msi_fw_err);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100939 tasklet_kill(&ar_pci->early_irq_tasklet);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300940
941 for (i = 0; i < CE_COUNT; i++)
942 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100943}
944
Kalle Valo5e3dd152013-06-12 20:52:10 +0300945/* TODO - temporary mapping while we have too few CE's */
946static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
947 u16 service_id, u8 *ul_pipe,
948 u8 *dl_pipe, int *ul_is_polled,
949 int *dl_is_polled)
950{
951 int ret = 0;
952
953 /* polling for received messages not supported */
954 *dl_is_polled = 0;
955
956 switch (service_id) {
957 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
958 /*
959 * Host->target HTT gets its own pipe, so it can be polled
960 * while other pipes are interrupt driven.
961 */
962 *ul_pipe = 4;
963 /*
964 * Use the same target->host pipe for HTC ctrl, HTC raw
965 * streams, and HTT.
966 */
967 *dl_pipe = 1;
968 break;
969
970 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
971 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
972 /*
973 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
974 * HTC_CTRL_RSVD_SVC could share the same pipe as the
975 * WMI services. So, if another CE is needed, change
976 * this to *ul_pipe = 3, which frees up CE 0.
977 */
978 /* *ul_pipe = 3; */
979 *ul_pipe = 0;
980 *dl_pipe = 1;
981 break;
982
983 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
984 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
985 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
986 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
987
988 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
989 *ul_pipe = 3;
990 *dl_pipe = 2;
991 break;
992
993 /* pipe 5 unused */
994 /* pipe 6 reserved */
995 /* pipe 7 reserved */
996
997 default:
998 ret = -1;
999 break;
1000 }
1001 *ul_is_polled =
1002 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1003
1004 return ret;
1005}
1006
1007static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1008 u8 *ul_pipe, u8 *dl_pipe)
1009{
1010 int ul_is_polled, dl_is_polled;
1011
1012 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1013 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1014 ul_pipe,
1015 dl_pipe,
1016 &ul_is_polled,
1017 &dl_is_polled);
1018}
1019
Michal Kazior87263e52013-08-27 13:08:01 +02001020static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001021 int num)
1022{
1023 struct ath10k *ar = pipe_info->hif_ce_state;
1024 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001025 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001026 struct sk_buff *skb;
1027 dma_addr_t ce_data;
1028 int i, ret = 0;
1029
1030 if (pipe_info->buf_sz == 0)
1031 return 0;
1032
1033 for (i = 0; i < num; i++) {
1034 skb = dev_alloc_skb(pipe_info->buf_sz);
1035 if (!skb) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001036 ath10k_warn("failed to allocate skbuff for pipe %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001037 num);
1038 ret = -ENOMEM;
1039 goto err;
1040 }
1041
1042 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1043
1044 ce_data = dma_map_single(ar->dev, skb->data,
1045 skb->len + skb_tailroom(skb),
1046 DMA_FROM_DEVICE);
1047
1048 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001049 ath10k_warn("failed to DMA map sk_buff\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001050 dev_kfree_skb_any(skb);
1051 ret = -EIO;
1052 goto err;
1053 }
1054
1055 ATH10K_SKB_CB(skb)->paddr = ce_data;
1056
1057 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1058 pipe_info->buf_sz,
1059 PCI_DMA_FROMDEVICE);
1060
1061 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1062 ce_data);
1063 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001064 ath10k_warn("failed to enqueue to pipe %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001065 num, ret);
1066 goto err;
1067 }
1068 }
1069
1070 return ret;
1071
1072err:
1073 ath10k_pci_rx_pipe_cleanup(pipe_info);
1074 return ret;
1075}
1076
1077static int ath10k_pci_post_rx(struct ath10k *ar)
1078{
1079 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001080 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001081 const struct ce_attr *attr;
1082 int pipe_num, ret = 0;
1083
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001084 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001085 pipe_info = &ar_pci->pipe_info[pipe_num];
1086 attr = &host_ce_config_wlan[pipe_num];
1087
1088 if (attr->dest_nentries == 0)
1089 continue;
1090
1091 ret = ath10k_pci_post_rx_pipe(pipe_info,
1092 attr->dest_nentries - 1);
1093 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001094 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1095 pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001096
1097 for (; pipe_num >= 0; pipe_num--) {
1098 pipe_info = &ar_pci->pipe_info[pipe_num];
1099 ath10k_pci_rx_pipe_cleanup(pipe_info);
1100 }
1101 return ret;
1102 }
1103 }
1104
1105 return 0;
1106}
1107
1108static int ath10k_pci_hif_start(struct ath10k *ar)
1109{
1110 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001111 int ret, ret_early;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001112
Michal Kaziorab977bd2013-11-25 14:06:26 +01001113 ath10k_pci_free_early_irq(ar);
1114 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001115
Michal Kazior5d1aa942013-11-25 14:06:24 +01001116 ret = ath10k_pci_request_irq(ar);
1117 if (ret) {
1118 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1119 ret);
Michal Kazior2f5280d2014-02-27 18:50:05 +02001120 goto err_early_irq;
Michal Kazior5d1aa942013-11-25 14:06:24 +01001121 }
1122
Michal Kaziorc80de122013-11-25 14:06:23 +01001123 ret = ath10k_pci_setup_ce_irq(ar);
1124 if (ret) {
1125 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001126 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001127 }
1128
1129 /* Post buffers once to start things off. */
1130 ret = ath10k_pci_post_rx(ar);
1131 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001132 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1133 ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001134 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001135 }
1136
1137 ar_pci->started = 1;
1138 return 0;
Michal Kaziorc80de122013-11-25 14:06:23 +01001139
Michal Kazior5d1aa942013-11-25 14:06:24 +01001140err_stop:
1141 ath10k_ce_disable_interrupts(ar);
1142 ath10k_pci_free_irq(ar);
1143 ath10k_pci_kill_tasklet(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001144err_early_irq:
1145 /* Though there should be no interrupts (device was reset)
1146 * power_down() expects the early IRQ to be installed as per the
1147 * driver lifecycle. */
1148 ret_early = ath10k_pci_request_early_irq(ar);
1149 if (ret_early)
1150 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1151
Michal Kaziorc80de122013-11-25 14:06:23 +01001152 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001153}
1154
Michal Kazior87263e52013-08-27 13:08:01 +02001155static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001156{
1157 struct ath10k *ar;
1158 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001159 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001160 u32 buf_sz;
1161 struct sk_buff *netbuf;
1162 u32 ce_data;
1163
1164 buf_sz = pipe_info->buf_sz;
1165
1166 /* Unused Copy Engine */
1167 if (buf_sz == 0)
1168 return;
1169
1170 ar = pipe_info->hif_ce_state;
1171 ar_pci = ath10k_pci_priv(ar);
1172
1173 if (!ar_pci->started)
1174 return;
1175
1176 ce_hdl = pipe_info->ce_hdl;
1177
1178 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1179 &ce_data) == 0) {
1180 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1181 netbuf->len + skb_tailroom(netbuf),
1182 DMA_FROM_DEVICE);
1183 dev_kfree_skb_any(netbuf);
1184 }
1185}
1186
Michal Kazior87263e52013-08-27 13:08:01 +02001187static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001188{
1189 struct ath10k *ar;
1190 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001191 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001192 struct sk_buff *netbuf;
1193 u32 ce_data;
1194 unsigned int nbytes;
1195 unsigned int id;
1196 u32 buf_sz;
1197
1198 buf_sz = pipe_info->buf_sz;
1199
1200 /* Unused Copy Engine */
1201 if (buf_sz == 0)
1202 return;
1203
1204 ar = pipe_info->hif_ce_state;
1205 ar_pci = ath10k_pci_priv(ar);
1206
1207 if (!ar_pci->started)
1208 return;
1209
1210 ce_hdl = pipe_info->ce_hdl;
1211
1212 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1213 &ce_data, &nbytes, &id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +02001214 /* no need to call tx completion for NULL pointers */
1215 if (!netbuf)
Michal Kazior2415fc12013-11-08 08:01:32 +01001216 continue;
Michal Kazior2415fc12013-11-08 08:01:32 +01001217
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001218 ar_pci->msg_callbacks_current.tx_completion(ar,
1219 netbuf,
1220 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001221 }
1222}
1223
1224/*
1225 * Cleanup residual buffers for device shutdown:
1226 * buffers that were enqueued for receive
1227 * buffers that were to be sent
1228 * Note: Buffers that had completed but which were
1229 * not yet processed are on a completion queue. They
1230 * are handled when the completion thread shuts down.
1231 */
1232static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1233{
1234 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1235 int pipe_num;
1236
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001237 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001238 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001239
1240 pipe_info = &ar_pci->pipe_info[pipe_num];
1241 ath10k_pci_rx_pipe_cleanup(pipe_info);
1242 ath10k_pci_tx_pipe_cleanup(pipe_info);
1243 }
1244}
1245
1246static void ath10k_pci_ce_deinit(struct ath10k *ar)
1247{
1248 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001249 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001250 int pipe_num;
1251
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001252 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001253 pipe_info = &ar_pci->pipe_info[pipe_num];
1254 if (pipe_info->ce_hdl) {
1255 ath10k_ce_deinit(pipe_info->ce_hdl);
1256 pipe_info->ce_hdl = NULL;
1257 pipe_info->buf_sz = 0;
1258 }
1259 }
1260}
1261
1262static void ath10k_pci_hif_stop(struct ath10k *ar)
1263{
Michal Kazior32270b62013-08-02 09:15:47 +02001264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001265 int ret;
Michal Kazior32270b62013-08-02 09:15:47 +02001266
Kalle Valo5e3dd152013-06-12 20:52:10 +03001267 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1268
Michal Kazior5d1aa942013-11-25 14:06:24 +01001269 ret = ath10k_ce_disable_interrupts(ar);
1270 if (ret)
1271 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02001272
Michal Kazior5d1aa942013-11-25 14:06:24 +01001273 ath10k_pci_free_irq(ar);
1274 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001275
Michal Kaziorab977bd2013-11-25 14:06:26 +01001276 ret = ath10k_pci_request_early_irq(ar);
1277 if (ret)
1278 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1279
Kalle Valo5e3dd152013-06-12 20:52:10 +03001280 /* At this point, asynchronous threads are stopped, the target should
1281 * not DMA nor interrupt. We process the leftovers and then free
1282 * everything else up. */
1283
Kalle Valo5e3dd152013-06-12 20:52:10 +03001284 ath10k_pci_buffer_cleanup(ar);
Michal Kazior32270b62013-08-02 09:15:47 +02001285
Michal Kazior6a42a472013-11-08 08:01:35 +01001286 /* Make the sure the device won't access any structures on the host by
1287 * resetting it. The device was fed with PCI CE ringbuffer
1288 * configuration during init. If ringbuffers are freed and the device
1289 * were to access them this could lead to memory corruption on the
1290 * host. */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001291 ath10k_pci_warm_reset(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01001292
Michal Kazior32270b62013-08-02 09:15:47 +02001293 ar_pci->started = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001294}
1295
1296static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1297 void *req, u32 req_len,
1298 void *resp, u32 *resp_len)
1299{
1300 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001301 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1302 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1303 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1304 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001305 dma_addr_t req_paddr = 0;
1306 dma_addr_t resp_paddr = 0;
1307 struct bmi_xfer xfer = {};
1308 void *treq, *tresp = NULL;
1309 int ret = 0;
1310
Michal Kazior85622cd2013-11-25 14:06:22 +01001311 might_sleep();
1312
Kalle Valo5e3dd152013-06-12 20:52:10 +03001313 if (resp && !resp_len)
1314 return -EINVAL;
1315
1316 if (resp && resp_len && *resp_len == 0)
1317 return -EINVAL;
1318
1319 treq = kmemdup(req, req_len, GFP_KERNEL);
1320 if (!treq)
1321 return -ENOMEM;
1322
1323 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1324 ret = dma_mapping_error(ar->dev, req_paddr);
1325 if (ret)
1326 goto err_dma;
1327
1328 if (resp && resp_len) {
1329 tresp = kzalloc(*resp_len, GFP_KERNEL);
1330 if (!tresp) {
1331 ret = -ENOMEM;
1332 goto err_req;
1333 }
1334
1335 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1336 DMA_FROM_DEVICE);
1337 ret = dma_mapping_error(ar->dev, resp_paddr);
1338 if (ret)
1339 goto err_req;
1340
1341 xfer.wait_for_resp = true;
1342 xfer.resp_len = 0;
1343
1344 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1345 }
1346
1347 init_completion(&xfer.done);
1348
1349 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1350 if (ret)
1351 goto err_resp;
1352
Michal Kazior85622cd2013-11-25 14:06:22 +01001353 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1354 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001355 u32 unused_buffer;
1356 unsigned int unused_nbytes;
1357 unsigned int unused_id;
1358
Kalle Valo5e3dd152013-06-12 20:52:10 +03001359 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1360 &unused_nbytes, &unused_id);
1361 } else {
1362 /* non-zero means we did not time out */
1363 ret = 0;
1364 }
1365
1366err_resp:
1367 if (resp) {
1368 u32 unused_buffer;
1369
1370 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1371 dma_unmap_single(ar->dev, resp_paddr,
1372 *resp_len, DMA_FROM_DEVICE);
1373 }
1374err_req:
1375 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1376
1377 if (ret == 0 && resp_len) {
1378 *resp_len = min(*resp_len, xfer.resp_len);
1379 memcpy(resp, tresp, xfer.resp_len);
1380 }
1381err_dma:
1382 kfree(treq);
1383 kfree(tresp);
1384
1385 return ret;
1386}
1387
Michal Kazior5440ce22013-09-03 15:09:58 +02001388static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001389{
Michal Kazior5440ce22013-09-03 15:09:58 +02001390 struct bmi_xfer *xfer;
1391 u32 ce_data;
1392 unsigned int nbytes;
1393 unsigned int transfer_id;
1394
1395 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1396 &nbytes, &transfer_id))
1397 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001398
1399 if (xfer->wait_for_resp)
1400 return;
1401
1402 complete(&xfer->done);
1403}
1404
Michal Kazior5440ce22013-09-03 15:09:58 +02001405static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001406{
Michal Kazior5440ce22013-09-03 15:09:58 +02001407 struct bmi_xfer *xfer;
1408 u32 ce_data;
1409 unsigned int nbytes;
1410 unsigned int transfer_id;
1411 unsigned int flags;
1412
1413 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1414 &nbytes, &transfer_id, &flags))
1415 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001416
1417 if (!xfer->wait_for_resp) {
1418 ath10k_warn("unexpected: BMI data received; ignoring\n");
1419 return;
1420 }
1421
1422 xfer->resp_len = nbytes;
1423 complete(&xfer->done);
1424}
1425
Michal Kazior85622cd2013-11-25 14:06:22 +01001426static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1427 struct ath10k_ce_pipe *rx_pipe,
1428 struct bmi_xfer *xfer)
1429{
1430 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1431
1432 while (time_before_eq(jiffies, timeout)) {
1433 ath10k_pci_bmi_send_done(tx_pipe);
1434 ath10k_pci_bmi_recv_data(rx_pipe);
1435
1436 if (completion_done(&xfer->done))
1437 return 0;
1438
1439 schedule();
1440 }
1441
1442 return -ETIMEDOUT;
1443}
1444
Kalle Valo5e3dd152013-06-12 20:52:10 +03001445/*
1446 * Map from service/endpoint to Copy Engine.
1447 * This table is derived from the CE_PCI TABLE, above.
1448 * It is passed to the Target at startup for use by firmware.
1449 */
1450static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1451 {
1452 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1453 PIPEDIR_OUT, /* out = UL = host -> target */
1454 3,
1455 },
1456 {
1457 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1458 PIPEDIR_IN, /* in = DL = target -> host */
1459 2,
1460 },
1461 {
1462 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1463 PIPEDIR_OUT, /* out = UL = host -> target */
1464 3,
1465 },
1466 {
1467 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1468 PIPEDIR_IN, /* in = DL = target -> host */
1469 2,
1470 },
1471 {
1472 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1473 PIPEDIR_OUT, /* out = UL = host -> target */
1474 3,
1475 },
1476 {
1477 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1478 PIPEDIR_IN, /* in = DL = target -> host */
1479 2,
1480 },
1481 {
1482 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1483 PIPEDIR_OUT, /* out = UL = host -> target */
1484 3,
1485 },
1486 {
1487 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1488 PIPEDIR_IN, /* in = DL = target -> host */
1489 2,
1490 },
1491 {
1492 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1493 PIPEDIR_OUT, /* out = UL = host -> target */
1494 3,
1495 },
1496 {
1497 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1498 PIPEDIR_IN, /* in = DL = target -> host */
1499 2,
1500 },
1501 {
1502 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1503 PIPEDIR_OUT, /* out = UL = host -> target */
1504 0, /* could be moved to 3 (share with WMI) */
1505 },
1506 {
1507 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1508 PIPEDIR_IN, /* in = DL = target -> host */
1509 1,
1510 },
1511 {
1512 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1513 PIPEDIR_OUT, /* out = UL = host -> target */
1514 0,
1515 },
1516 {
1517 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1518 PIPEDIR_IN, /* in = DL = target -> host */
1519 1,
1520 },
1521 {
1522 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1523 PIPEDIR_OUT, /* out = UL = host -> target */
1524 4,
1525 },
1526 {
1527 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1528 PIPEDIR_IN, /* in = DL = target -> host */
1529 1,
1530 },
1531
1532 /* (Additions here) */
1533
1534 { /* Must be last */
1535 0,
1536 0,
1537 0,
1538 },
1539};
1540
1541/*
1542 * Send an interrupt to the device to wake up the Target CPU
1543 * so it has an opportunity to notice any changed state.
1544 */
1545static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1546{
1547 int ret;
1548 u32 core_ctrl;
1549
1550 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1551 CORE_CTRL_ADDRESS,
1552 &core_ctrl);
1553 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001554 ath10k_warn("failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001555 return ret;
1556 }
1557
1558 /* A_INUM_FIRMWARE interrupt to Target CPU */
1559 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1560
1561 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1562 CORE_CTRL_ADDRESS,
1563 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001564 if (ret) {
1565 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1566 ret);
1567 return ret;
1568 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001569
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001570 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001571}
1572
1573static int ath10k_pci_init_config(struct ath10k *ar)
1574{
1575 u32 interconnect_targ_addr;
1576 u32 pcie_state_targ_addr = 0;
1577 u32 pipe_cfg_targ_addr = 0;
1578 u32 svc_to_pipe_map = 0;
1579 u32 pcie_config_flags = 0;
1580 u32 ealloc_value;
1581 u32 ealloc_targ_addr;
1582 u32 flag2_value;
1583 u32 flag2_targ_addr;
1584 int ret = 0;
1585
1586 /* Download to Target the CE Config and the service-to-CE map */
1587 interconnect_targ_addr =
1588 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1589
1590 /* Supply Target-side CE configuration */
1591 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1592 &pcie_state_targ_addr);
1593 if (ret != 0) {
1594 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1595 return ret;
1596 }
1597
1598 if (pcie_state_targ_addr == 0) {
1599 ret = -EIO;
1600 ath10k_err("Invalid pcie state addr\n");
1601 return ret;
1602 }
1603
1604 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1605 offsetof(struct pcie_state,
1606 pipe_cfg_addr),
1607 &pipe_cfg_targ_addr);
1608 if (ret != 0) {
1609 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1610 return ret;
1611 }
1612
1613 if (pipe_cfg_targ_addr == 0) {
1614 ret = -EIO;
1615 ath10k_err("Invalid pipe cfg addr\n");
1616 return ret;
1617 }
1618
1619 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1620 target_ce_config_wlan,
1621 sizeof(target_ce_config_wlan));
1622
1623 if (ret != 0) {
1624 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1625 return ret;
1626 }
1627
1628 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1629 offsetof(struct pcie_state,
1630 svc_to_pipe_map),
1631 &svc_to_pipe_map);
1632 if (ret != 0) {
1633 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1634 return ret;
1635 }
1636
1637 if (svc_to_pipe_map == 0) {
1638 ret = -EIO;
1639 ath10k_err("Invalid svc_to_pipe map\n");
1640 return ret;
1641 }
1642
1643 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1644 target_service_to_ce_map_wlan,
1645 sizeof(target_service_to_ce_map_wlan));
1646 if (ret != 0) {
1647 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1648 return ret;
1649 }
1650
1651 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1652 offsetof(struct pcie_state,
1653 config_flags),
1654 &pcie_config_flags);
1655 if (ret != 0) {
1656 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1657 return ret;
1658 }
1659
1660 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1661
1662 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1663 offsetof(struct pcie_state, config_flags),
1664 &pcie_config_flags,
1665 sizeof(pcie_config_flags));
1666 if (ret != 0) {
1667 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1668 return ret;
1669 }
1670
1671 /* configure early allocation */
1672 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1673
1674 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1675 if (ret != 0) {
1676 ath10k_err("Faile to get early alloc val: %d\n", ret);
1677 return ret;
1678 }
1679
1680 /* first bank is switched to IRAM */
1681 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1682 HI_EARLY_ALLOC_MAGIC_MASK);
1683 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1684 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1685
1686 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1687 if (ret != 0) {
1688 ath10k_err("Failed to set early alloc val: %d\n", ret);
1689 return ret;
1690 }
1691
1692 /* Tell Target to proceed with initialization */
1693 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1694
1695 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1696 if (ret != 0) {
1697 ath10k_err("Failed to get option val: %d\n", ret);
1698 return ret;
1699 }
1700
1701 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1702
1703 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1704 if (ret != 0) {
1705 ath10k_err("Failed to set option val: %d\n", ret);
1706 return ret;
1707 }
1708
1709 return 0;
1710}
1711
1712
1713
1714static int ath10k_pci_ce_init(struct ath10k *ar)
1715{
1716 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001717 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001718 const struct ce_attr *attr;
1719 int pipe_num;
1720
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001721 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001722 pipe_info = &ar_pci->pipe_info[pipe_num];
1723 pipe_info->pipe_num = pipe_num;
1724 pipe_info->hif_ce_state = ar;
1725 attr = &host_ce_config_wlan[pipe_num];
1726
1727 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1728 if (pipe_info->ce_hdl == NULL) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001729 ath10k_err("failed to initialize CE for pipe: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001730 pipe_num);
1731
1732 /* It is safe to call it here. It checks if ce_hdl is
1733 * valid for each pipe */
1734 ath10k_pci_ce_deinit(ar);
1735 return -1;
1736 }
1737
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001738 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001739 /*
1740 * Reserve the ultimate CE for
1741 * diagnostic Window support
1742 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001743 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001744 continue;
1745 }
1746
1747 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1748 }
1749
Kalle Valo5e3dd152013-06-12 20:52:10 +03001750 return 0;
1751}
1752
1753static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1754{
1755 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1756 u32 fw_indicator_address, fw_indicator;
1757
1758 ath10k_pci_wake(ar);
1759
1760 fw_indicator_address = ar_pci->fw_indicator_address;
1761 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1762
1763 if (fw_indicator & FW_IND_EVENT_PENDING) {
1764 /* ACK: clear Target-side pending event */
1765 ath10k_pci_write32(ar, fw_indicator_address,
1766 fw_indicator & ~FW_IND_EVENT_PENDING);
1767
1768 if (ar_pci->started) {
1769 ath10k_pci_hif_dump_area(ar);
1770 } else {
1771 /*
1772 * Probable Target failure before we're prepared
1773 * to handle it. Generally unexpected.
1774 */
1775 ath10k_warn("early firmware event indicated\n");
1776 }
1777 }
1778
1779 ath10k_pci_sleep(ar);
1780}
1781
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001782static int ath10k_pci_warm_reset(struct ath10k *ar)
1783{
1784 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1785 int ret = 0;
1786 u32 val;
1787
1788 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1789
1790 ret = ath10k_do_pci_wake(ar);
1791 if (ret) {
1792 ath10k_err("failed to wake up target: %d\n", ret);
1793 return ret;
1794 }
1795
1796 /* debug */
1797 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1798 PCIE_INTR_CAUSE_ADDRESS);
1799 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1800
1801 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1802 CPU_INTR_ADDRESS);
1803 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1804 val);
1805
1806 /* disable pending irqs */
1807 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1808 PCIE_INTR_ENABLE_ADDRESS, 0);
1809
1810 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1811 PCIE_INTR_CLR_ADDRESS, ~0);
1812
1813 msleep(100);
1814
1815 /* clear fw indicator */
1816 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
1817
1818 /* clear target LF timer interrupts */
1819 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1820 SOC_LF_TIMER_CONTROL0_ADDRESS);
1821 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1822 SOC_LF_TIMER_CONTROL0_ADDRESS,
1823 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1824
1825 /* reset CE */
1826 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1827 SOC_RESET_CONTROL_ADDRESS);
1828 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1829 val | SOC_RESET_CONTROL_CE_RST_MASK);
1830 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1831 SOC_RESET_CONTROL_ADDRESS);
1832 msleep(10);
1833
1834 /* unreset CE */
1835 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1836 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1837 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1838 SOC_RESET_CONTROL_ADDRESS);
1839 msleep(10);
1840
1841 /* debug */
1842 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1843 PCIE_INTR_CAUSE_ADDRESS);
1844 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1845
1846 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1847 CPU_INTR_ADDRESS);
1848 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1849 val);
1850
1851 /* CPU warm reset */
1852 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1853 SOC_RESET_CONTROL_ADDRESS);
1854 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1855 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1856
1857 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1858 SOC_RESET_CONTROL_ADDRESS);
1859 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1860
1861 msleep(100);
1862
1863 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1864
1865 ath10k_do_pci_sleep(ar);
1866 return ret;
1867}
1868
1869static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
Michal Kazior8c5c5362013-07-16 09:38:50 +02001870{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001871 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001872 const char *irq_mode;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001873 int ret;
1874
1875 /*
1876 * Bring the target up cleanly.
1877 *
1878 * The target may be in an undefined state with an AUX-powered Target
1879 * and a Host in WoW mode. If the Host crashes, loses power, or is
1880 * restarted (without unloading the driver) then the Target is left
1881 * (aux) powered and running. On a subsequent driver load, the Target
1882 * is in an unexpected state. We try to catch that here in order to
1883 * reset the Target and retry the probe.
1884 */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001885 if (cold_reset)
1886 ret = ath10k_pci_cold_reset(ar);
1887 else
1888 ret = ath10k_pci_warm_reset(ar);
1889
Michal Kazior5b2589f2013-11-08 08:01:30 +01001890 if (ret) {
1891 ath10k_err("failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001892 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01001893 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001894
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001895 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001896 /* Force AWAKE forever */
Michal Kazior8c5c5362013-07-16 09:38:50 +02001897 ath10k_do_pci_wake(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001898
1899 ret = ath10k_pci_ce_init(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001900 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001901 ath10k_err("failed to initialize CE: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001902 goto err_ps;
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001903 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001904
Michal Kazior98563d52013-11-08 08:01:33 +01001905 ret = ath10k_ce_disable_interrupts(ar);
1906 if (ret) {
1907 ath10k_err("failed to disable CE interrupts: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001908 goto err_ce;
1909 }
1910
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001911 ret = ath10k_pci_init_irq(ar);
Michal Kazior98563d52013-11-08 08:01:33 +01001912 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001913 ath10k_err("failed to init irqs: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001914 goto err_ce;
1915 }
1916
Michal Kaziorab977bd2013-11-25 14:06:26 +01001917 ret = ath10k_pci_request_early_irq(ar);
1918 if (ret) {
1919 ath10k_err("failed to request early irq: %d\n", ret);
1920 goto err_deinit_irq;
1921 }
1922
Michal Kazior98563d52013-11-08 08:01:33 +01001923 ret = ath10k_pci_wait_for_target_init(ar);
1924 if (ret) {
1925 ath10k_err("failed to wait for target to init: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001926 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001927 }
1928
1929 ret = ath10k_pci_init_config(ar);
1930 if (ret) {
1931 ath10k_err("failed to setup init config: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001932 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001933 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001934
1935 ret = ath10k_pci_wake_target_cpu(ar);
1936 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001937 ath10k_err("could not wake up target CPU: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001938 goto err_free_early_irq;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001939 }
1940
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001941 if (ar_pci->num_msi_intrs > 1)
1942 irq_mode = "MSI-X";
1943 else if (ar_pci->num_msi_intrs == 1)
1944 irq_mode = "MSI";
1945 else
1946 irq_mode = "legacy";
1947
Kalle Valo650b91f2013-11-20 10:00:49 +02001948 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1949 ath10k_info("pci irq %s\n", irq_mode);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001950
Michal Kazior8c5c5362013-07-16 09:38:50 +02001951 return 0;
1952
Michal Kaziorab977bd2013-11-25 14:06:26 +01001953err_free_early_irq:
1954 ath10k_pci_free_early_irq(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001955err_deinit_irq:
1956 ath10k_pci_deinit_irq(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001957err_ce:
1958 ath10k_pci_ce_deinit(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001959 ath10k_pci_warm_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001960err_ps:
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001961 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001962 ath10k_do_pci_sleep(ar);
1963err:
1964 return ret;
1965}
1966
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001967static int ath10k_pci_hif_power_up(struct ath10k *ar)
1968{
1969 int ret;
1970
1971 /*
1972 * Hardware CUS232 version 2 has some issues with cold reset and the
1973 * preferred (and safer) way to perform a device reset is through a
1974 * warm reset.
1975 *
1976 * Warm reset doesn't always work though (notably after a firmware
1977 * crash) so fall back to cold reset if necessary.
1978 */
1979 ret = __ath10k_pci_hif_power_up(ar, false);
1980 if (ret) {
Kalle Valo35098462014-03-28 09:32:27 +02001981 ath10k_warn("failed to power up target using warm reset: %d\n",
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001982 ret);
1983
Kalle Valo35098462014-03-28 09:32:27 +02001984 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1985 return ret;
1986
1987 ath10k_warn("trying cold reset\n");
1988
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001989 ret = __ath10k_pci_hif_power_up(ar, true);
1990 if (ret) {
1991 ath10k_err("failed to power up target using cold reset too (%d)\n",
1992 ret);
1993 return ret;
1994 }
1995 }
1996
1997 return 0;
1998}
1999
Michal Kazior8c5c5362013-07-16 09:38:50 +02002000static void ath10k_pci_hif_power_down(struct ath10k *ar)
2001{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002002 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2003
Michal Kaziorab977bd2013-11-25 14:06:26 +01002004 ath10k_pci_free_early_irq(ar);
2005 ath10k_pci_kill_tasklet(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002006 ath10k_pci_deinit_irq(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002007 ath10k_pci_warm_reset(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002008
Michal Kazior8c5c5362013-07-16 09:38:50 +02002009 ath10k_pci_ce_deinit(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002010 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002011 ath10k_do_pci_sleep(ar);
2012}
2013
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002014#ifdef CONFIG_PM
2015
2016#define ATH10K_PCI_PM_CONTROL 0x44
2017
2018static int ath10k_pci_hif_suspend(struct ath10k *ar)
2019{
2020 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2021 struct pci_dev *pdev = ar_pci->pdev;
2022 u32 val;
2023
2024 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2025
2026 if ((val & 0x000000ff) != 0x3) {
2027 pci_save_state(pdev);
2028 pci_disable_device(pdev);
2029 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2030 (val & 0xffffff00) | 0x03);
2031 }
2032
2033 return 0;
2034}
2035
2036static int ath10k_pci_hif_resume(struct ath10k *ar)
2037{
2038 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2039 struct pci_dev *pdev = ar_pci->pdev;
2040 u32 val;
2041
2042 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2043
2044 if ((val & 0x000000ff) != 0) {
2045 pci_restore_state(pdev);
2046 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2047 val & 0xffffff00);
2048 /*
2049 * Suspend/Resume resets the PCI configuration space,
2050 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2051 * to keep PCI Tx retries from interfering with C3 CPU state
2052 */
2053 pci_read_config_dword(pdev, 0x40, &val);
2054
2055 if ((val & 0x0000ff00) != 0)
2056 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2057 }
2058
2059 return 0;
2060}
2061#endif
2062
Kalle Valo5e3dd152013-06-12 20:52:10 +03002063static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
Michal Kazior726346f2014-02-27 18:50:04 +02002064 .tx_sg = ath10k_pci_hif_tx_sg,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002065 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2066 .start = ath10k_pci_hif_start,
2067 .stop = ath10k_pci_hif_stop,
2068 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2069 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2070 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002071 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002072 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002073 .power_up = ath10k_pci_hif_power_up,
2074 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002075#ifdef CONFIG_PM
2076 .suspend = ath10k_pci_hif_suspend,
2077 .resume = ath10k_pci_hif_resume,
2078#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002079};
2080
2081static void ath10k_pci_ce_tasklet(unsigned long ptr)
2082{
Michal Kazior87263e52013-08-27 13:08:01 +02002083 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002084 struct ath10k_pci *ar_pci = pipe->ar_pci;
2085
2086 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2087}
2088
2089static void ath10k_msi_err_tasklet(unsigned long data)
2090{
2091 struct ath10k *ar = (struct ath10k *)data;
2092
2093 ath10k_pci_fw_interrupt_handler(ar);
2094}
2095
2096/*
2097 * Handler for a per-engine interrupt on a PARTICULAR CE.
2098 * This is used in cases where each CE has a private MSI interrupt.
2099 */
2100static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2101{
2102 struct ath10k *ar = arg;
2103 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2104 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2105
Dan Carpentere5742672013-06-18 10:28:46 +03002106 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002107 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2108 return IRQ_HANDLED;
2109 }
2110
2111 /*
2112 * NOTE: We are able to derive ce_id from irq because we
2113 * use a one-to-one mapping for CE's 0..5.
2114 * CE's 6 & 7 do not use interrupts at all.
2115 *
2116 * This mapping must be kept in sync with the mapping
2117 * used by firmware.
2118 */
2119 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2120 return IRQ_HANDLED;
2121}
2122
2123static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2124{
2125 struct ath10k *ar = arg;
2126 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2127
2128 tasklet_schedule(&ar_pci->msi_fw_err);
2129 return IRQ_HANDLED;
2130}
2131
2132/*
2133 * Top-level interrupt handler for all PCI interrupts from a Target.
2134 * When a block of MSI interrupts is allocated, this top-level handler
2135 * is not used; instead, we directly call the correct sub-handler.
2136 */
2137static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2138{
2139 struct ath10k *ar = arg;
2140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2141
2142 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002143 if (!ath10k_pci_irq_pending(ar))
2144 return IRQ_NONE;
2145
Michal Kazior26852182013-11-25 14:06:25 +01002146 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002147 }
2148
2149 tasklet_schedule(&ar_pci->intr_tq);
2150
2151 return IRQ_HANDLED;
2152}
2153
Michal Kaziorab977bd2013-11-25 14:06:26 +01002154static void ath10k_pci_early_irq_tasklet(unsigned long data)
2155{
2156 struct ath10k *ar = (struct ath10k *)data;
2157 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2158 u32 fw_ind;
2159 int ret;
2160
2161 ret = ath10k_pci_wake(ar);
2162 if (ret) {
2163 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2164 ret);
2165 return;
2166 }
2167
2168 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2169 if (fw_ind & FW_IND_EVENT_PENDING) {
2170 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2171 fw_ind & ~FW_IND_EVENT_PENDING);
2172
2173 /* Some structures are unavailable during early boot or at
2174 * driver teardown so just print that the device has crashed. */
2175 ath10k_warn("device crashed - no diagnostics available\n");
2176 }
2177
2178 ath10k_pci_sleep(ar);
2179 ath10k_pci_enable_legacy_irq(ar);
2180}
2181
Kalle Valo5e3dd152013-06-12 20:52:10 +03002182static void ath10k_pci_tasklet(unsigned long data)
2183{
2184 struct ath10k *ar = (struct ath10k *)data;
2185 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2186
2187 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2188 ath10k_ce_per_engine_service_any(ar);
2189
Michal Kazior26852182013-11-25 14:06:25 +01002190 /* Re-enable legacy irq that was disabled in the irq handler */
2191 if (ar_pci->num_msi_intrs == 0)
2192 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002193}
2194
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002195static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002196{
2197 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002198 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002199
2200 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2201 ath10k_pci_msi_fw_handler,
2202 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002203 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002204 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002205 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002206 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002207 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002208
2209 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2210 ret = request_irq(ar_pci->pdev->irq + i,
2211 ath10k_pci_per_engine_handler,
2212 IRQF_SHARED, "ath10k_pci", ar);
2213 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002214 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002215 ar_pci->pdev->irq + i, ret);
2216
Michal Kazior87b14232013-06-26 08:50:50 +02002217 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2218 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002219
Michal Kazior87b14232013-06-26 08:50:50 +02002220 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002221 return ret;
2222 }
2223 }
2224
Kalle Valo5e3dd152013-06-12 20:52:10 +03002225 return 0;
2226}
2227
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002228static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002229{
2230 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231 int ret;
2232
2233 ret = request_irq(ar_pci->pdev->irq,
2234 ath10k_pci_interrupt_handler,
2235 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002236 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002237 ath10k_warn("failed to request MSI irq %d: %d\n",
2238 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002239 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002240 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002241
Kalle Valo5e3dd152013-06-12 20:52:10 +03002242 return 0;
2243}
2244
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002245static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002246{
2247 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002248 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002249
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002250 ret = request_irq(ar_pci->pdev->irq,
2251 ath10k_pci_interrupt_handler,
2252 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002253 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002254 ath10k_warn("failed to request legacy irq %d: %d\n",
2255 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002256 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002257 }
2258
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002259 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002260}
2261
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002262static int ath10k_pci_request_irq(struct ath10k *ar)
2263{
2264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2265
2266 switch (ar_pci->num_msi_intrs) {
2267 case 0:
2268 return ath10k_pci_request_irq_legacy(ar);
2269 case 1:
2270 return ath10k_pci_request_irq_msi(ar);
2271 case MSI_NUM_REQUEST:
2272 return ath10k_pci_request_irq_msix(ar);
2273 }
2274
2275 ath10k_warn("unknown irq configuration upon request\n");
2276 return -EINVAL;
2277}
2278
2279static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002280{
2281 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2282 int i;
2283
2284 /* There's at least one interrupt irregardless whether its legacy INTR
2285 * or MSI or MSI-X */
2286 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2287 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002288}
2289
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002290static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2291{
2292 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2293 int i;
2294
2295 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2296 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2297 (unsigned long)ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002298 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2299 (unsigned long)ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002300
2301 for (i = 0; i < CE_COUNT; i++) {
2302 ar_pci->pipe_info[i].ar_pci = ar_pci;
2303 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2304 (unsigned long)&ar_pci->pipe_info[i]);
2305 }
2306}
2307
2308static int ath10k_pci_init_irq(struct ath10k *ar)
2309{
2310 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002311 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2312 ar_pci->features);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002313 int ret;
2314
2315 ath10k_pci_init_irq_tasklets(ar);
2316
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002317 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2318 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2319 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002320
2321 /* Try MSI-X */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002322 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2323 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002324 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2325 ar_pci->num_msi_intrs);
2326 if (ret > 0)
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002327 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002328
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002329 /* fall-through */
2330 }
2331
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002332 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002333 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2334 ar_pci->num_msi_intrs = 1;
2335 ret = pci_enable_msi(ar_pci->pdev);
2336 if (ret == 0)
2337 return 0;
2338
2339 /* fall-through */
2340 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002341
2342 /* Try legacy irq
2343 *
2344 * A potential race occurs here: The CORE_BASE write
2345 * depends on target correctly decoding AXI address but
2346 * host won't know when target writes BAR to CORE_CTRL.
2347 * This write might get lost if target has NOT written BAR.
2348 * For now, fix the race by repeating the write in below
2349 * synchronization checking. */
2350 ar_pci->num_msi_intrs = 0;
2351
2352 ret = ath10k_pci_wake(ar);
2353 if (ret) {
2354 ath10k_warn("failed to wake target: %d\n", ret);
2355 return ret;
2356 }
2357
2358 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2359 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2360 ath10k_pci_sleep(ar);
2361
2362 return 0;
2363}
2364
2365static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2366{
2367 int ret;
2368
2369 ret = ath10k_pci_wake(ar);
2370 if (ret) {
2371 ath10k_warn("failed to wake target: %d\n", ret);
2372 return ret;
2373 }
2374
2375 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2376 0);
2377 ath10k_pci_sleep(ar);
2378
2379 return 0;
2380}
2381
2382static int ath10k_pci_deinit_irq(struct ath10k *ar)
2383{
2384 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2385
2386 switch (ar_pci->num_msi_intrs) {
2387 case 0:
2388 return ath10k_pci_deinit_irq_legacy(ar);
2389 case 1:
2390 /* fall-through */
2391 case MSI_NUM_REQUEST:
2392 pci_disable_msi(ar_pci->pdev);
2393 return 0;
Alexander Gordeevbb8b6212014-02-13 17:50:01 +02002394 default:
2395 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002396 }
2397
2398 ath10k_warn("unknown irq configuration upon deinit\n");
2399 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002400}
2401
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002402static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002403{
2404 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo0399eca2014-03-28 09:32:21 +02002405 unsigned long timeout;
Kalle Valof3782742013-10-17 11:36:15 +03002406 int ret;
Kalle Valo0399eca2014-03-28 09:32:21 +02002407 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002408
Michal Kazior98563d52013-11-08 08:01:33 +01002409 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002410 if (ret) {
Kalle Valo0399eca2014-03-28 09:32:21 +02002411 ath10k_err("failed to wake up target for init: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002412 return ret;
2413 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002414
Kalle Valo0399eca2014-03-28 09:32:21 +02002415 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2416
2417 do {
2418 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2419
2420 /* target should never return this */
2421 if (val == 0xffffffff)
2422 continue;
2423
2424 if (val & FW_IND_INITIALIZED)
2425 break;
2426
Kalle Valo5e3dd152013-06-12 20:52:10 +03002427 if (ar_pci->num_msi_intrs == 0)
2428 /* Fix potential race by repeating CORE_BASE writes */
Kalle Valo0399eca2014-03-28 09:32:21 +02002429 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2430 PCIE_INTR_FIRMWARE_MASK |
2431 PCIE_INTR_CE_MASK_ALL);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002432
Kalle Valo0399eca2014-03-28 09:32:21 +02002433 mdelay(10);
2434 } while (time_before(jiffies, timeout));
2435
2436 if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
2437 ath10k_err("failed to receive initialized event from target: %08x\n",
2438 val);
2439 ret = -ETIMEDOUT;
Michal Kazior5b2589f2013-11-08 08:01:30 +01002440 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002441 }
2442
Michal Kazior5b2589f2013-11-08 08:01:30 +01002443out:
Michal Kazior98563d52013-11-08 08:01:33 +01002444 ath10k_pci_sleep(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002445 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002446}
2447
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002448static int ath10k_pci_cold_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002449{
Michal Kazior5b2589f2013-11-08 08:01:30 +01002450 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002451 u32 val;
2452
Michal Kazior5b2589f2013-11-08 08:01:30 +01002453 ret = ath10k_do_pci_wake(ar);
2454 if (ret) {
2455 ath10k_err("failed to wake up target: %d\n",
2456 ret);
2457 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002458 }
2459
2460 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002461 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002462 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002463 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002464
2465 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002466 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002467 RTC_STATE_COLD_RESET_MASK)
2468 break;
2469 msleep(1);
2470 }
2471
2472 /* Pull Target, including PCIe, out of RESET. */
2473 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002474 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002475
2476 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002477 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002478 RTC_STATE_COLD_RESET_MASK))
2479 break;
2480 msleep(1);
2481 }
2482
Michal Kazior5b2589f2013-11-08 08:01:30 +01002483 ath10k_do_pci_sleep(ar);
2484 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002485}
2486
2487static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2488{
2489 int i;
2490
2491 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2492 if (!test_bit(i, ar_pci->features))
2493 continue;
2494
2495 switch (i) {
2496 case ATH10K_PCI_FEATURE_MSI_X:
Kalle Valo24cfade2013-09-08 17:55:50 +03002497 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002498 break;
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002499 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
Kalle Valo24cfade2013-09-08 17:55:50 +03002500 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002501 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002502 }
2503 }
2504}
2505
2506static int ath10k_pci_probe(struct pci_dev *pdev,
2507 const struct pci_device_id *pci_dev)
2508{
2509 void __iomem *mem;
2510 int ret = 0;
2511 struct ath10k *ar;
2512 struct ath10k_pci *ar_pci;
Kalle Valoe01ae682013-09-01 11:22:14 +03002513 u32 lcr_val, chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002514
2515 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2516
2517 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2518 if (ar_pci == NULL)
2519 return -ENOMEM;
2520
2521 ar_pci->pdev = pdev;
2522 ar_pci->dev = &pdev->dev;
2523
2524 switch (pci_dev->device) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002525 case QCA988X_2_0_DEVICE_ID:
2526 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2527 break;
2528 default:
2529 ret = -ENODEV;
Masanari Iida6d3be302013-09-30 23:19:09 +09002530 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002531 goto err_ar_pci;
2532 }
2533
Kalle Valoe42c1fb2014-03-28 09:32:33 +02002534 if (ath10k_pci_target_ps)
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002535 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2536
Kalle Valo5e3dd152013-06-12 20:52:10 +03002537 ath10k_pci_dump_features(ar_pci);
2538
Michal Kazior3a0861f2013-07-05 16:15:06 +03002539 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002540 if (!ar) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002541 ath10k_err("failed to create driver core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002542 ret = -EINVAL;
2543 goto err_ar_pci;
2544 }
2545
Kalle Valo5e3dd152013-06-12 20:52:10 +03002546 ar_pci->ar = ar;
2547 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2548 atomic_set(&ar_pci->keep_awake_count, 0);
2549
2550 pci_set_drvdata(pdev, ar);
2551
2552 /*
2553 * Without any knowledge of the Host, the Target may have been reset or
2554 * power cycled and its Config Space may no longer reflect the PCI
2555 * address space that was assigned earlier by the PCI infrastructure.
2556 * Refresh it now.
2557 */
2558 ret = pci_assign_resource(pdev, BAR_NUM);
2559 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002560 ath10k_err("failed to assign PCI space: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002561 goto err_ar;
2562 }
2563
2564 ret = pci_enable_device(pdev);
2565 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002566 ath10k_err("failed to enable PCI device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002567 goto err_ar;
2568 }
2569
2570 /* Request MMIO resources */
2571 ret = pci_request_region(pdev, BAR_NUM, "ath");
2572 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002573 ath10k_err("failed to request MMIO region: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002574 goto err_device;
2575 }
2576
2577 /*
2578 * Target structures have a limit of 32 bit DMA pointers.
2579 * DMA pointers can be wider than 32 bits by default on some systems.
2580 */
2581 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2582 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002583 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002584 goto err_region;
2585 }
2586
2587 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2588 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002589 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002590 goto err_region;
2591 }
2592
2593 /* Set bus master bit in PCI_COMMAND to enable DMA */
2594 pci_set_master(pdev);
2595
2596 /*
2597 * Temporary FIX: disable ASPM
2598 * Will be removed after the OTP is programmed
2599 */
2600 pci_read_config_dword(pdev, 0x80, &lcr_val);
2601 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2602
2603 /* Arrange for access to Target SoC registers. */
2604 mem = pci_iomap(pdev, BAR_NUM, 0);
2605 if (!mem) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002606 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002607 ret = -EIO;
2608 goto err_master;
2609 }
2610
2611 ar_pci->mem = mem;
2612
2613 spin_lock_init(&ar_pci->ce_lock);
2614
Kalle Valoe01ae682013-09-01 11:22:14 +03002615 ret = ath10k_do_pci_wake(ar);
2616 if (ret) {
2617 ath10k_err("Failed to get chip id: %d\n", ret);
Wei Yongjun12eb0872013-10-30 13:24:39 +08002618 goto err_iomap;
Kalle Valoe01ae682013-09-01 11:22:14 +03002619 }
2620
Kalle Valo233eb972013-10-16 16:46:11 +03002621 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Kalle Valoe01ae682013-09-01 11:22:14 +03002622
2623 ath10k_do_pci_sleep(ar);
2624
Kalle Valo24cfade2013-09-08 17:55:50 +03002625 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2626
Kalle Valoe01ae682013-09-01 11:22:14 +03002627 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002628 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002629 ath10k_err("failed to register driver core: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02002630 goto err_iomap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002631 }
2632
2633 return 0;
2634
Kalle Valo5e3dd152013-06-12 20:52:10 +03002635err_iomap:
2636 pci_iounmap(pdev, mem);
2637err_master:
2638 pci_clear_master(pdev);
2639err_region:
2640 pci_release_region(pdev, BAR_NUM);
2641err_device:
2642 pci_disable_device(pdev);
2643err_ar:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002644 ath10k_core_destroy(ar);
2645err_ar_pci:
2646 /* call HIF PCI free here */
2647 kfree(ar_pci);
2648
2649 return ret;
2650}
2651
2652static void ath10k_pci_remove(struct pci_dev *pdev)
2653{
2654 struct ath10k *ar = pci_get_drvdata(pdev);
2655 struct ath10k_pci *ar_pci;
2656
2657 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2658
2659 if (!ar)
2660 return;
2661
2662 ar_pci = ath10k_pci_priv(ar);
2663
2664 if (!ar_pci)
2665 return;
2666
2667 tasklet_kill(&ar_pci->msi_fw_err);
2668
2669 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002670
Kalle Valo5e3dd152013-06-12 20:52:10 +03002671 pci_iounmap(pdev, ar_pci->mem);
2672 pci_release_region(pdev, BAR_NUM);
2673 pci_clear_master(pdev);
2674 pci_disable_device(pdev);
2675
2676 ath10k_core_destroy(ar);
2677 kfree(ar_pci);
2678}
2679
Kalle Valo5e3dd152013-06-12 20:52:10 +03002680MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2681
2682static struct pci_driver ath10k_pci_driver = {
2683 .name = "ath10k_pci",
2684 .id_table = ath10k_pci_id_table,
2685 .probe = ath10k_pci_probe,
2686 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002687};
2688
2689static int __init ath10k_pci_init(void)
2690{
2691 int ret;
2692
2693 ret = pci_register_driver(&ath10k_pci_driver);
2694 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002695 ath10k_err("failed to register PCI driver: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002696
2697 return ret;
2698}
2699module_init(ath10k_pci_init);
2700
2701static void __exit ath10k_pci_exit(void)
2702{
2703 pci_unregister_driver(&ath10k_pci_driver);
2704}
2705
2706module_exit(ath10k_pci_exit);
2707
2708MODULE_AUTHOR("Qualcomm Atheros");
2709MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2710MODULE_LICENSE("Dual BSD/GPL");
Kalle Valo929417c2014-03-28 09:32:39 +02002711MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002712MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);