blob: 34f09106f423f1bf45654468bdcdefafda81d97a [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Bartosz Markowski8cc8df92013-08-02 09:58:49 +020042static unsigned int ath10k_target_ps;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010043static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44
Kalle Valo5e3dd152013-06-12 20:52:10 +030045module_param(ath10k_target_ps, uint, 0644);
46MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47
Michal Kaziorcfe9c452013-11-25 14:06:27 +010048module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50
Kalle Valo5e3dd152013-06-12 20:52:10 +030051#define QCA988X_2_0_DEVICE_ID (0x003c)
52
53static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030054 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55 {0}
56};
57
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data);
60
61static void ath10k_pci_process_ce(struct ath10k *ar);
62static int ath10k_pci_post_rx(struct ath10k *ar);
Michal Kazior87263e52013-08-27 13:08:01 +020063static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +030064 int num);
Michal Kazior87263e52013-08-27 13:08:01 +020065static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +030066static void ath10k_pci_stop_ce(struct ath10k *ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +010067static int ath10k_pci_cold_reset(struct ath10k *ar);
68static int ath10k_pci_warm_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010069static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010070static int ath10k_pci_init_irq(struct ath10k *ar);
71static int ath10k_pci_deinit_irq(struct ath10k *ar);
72static int ath10k_pci_request_irq(struct ath10k *ar);
73static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010074static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
75 struct ath10k_ce_pipe *rx_pipe,
76 struct bmi_xfer *xfer);
Michal Kaziorc80de122013-11-25 14:06:23 +010077static void ath10k_pci_cleanup_ce(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030078
79static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030080 /* CE0: host->target HTC control and raw streams */
81 {
82 .flags = CE_ATTR_FLAGS,
83 .src_nentries = 16,
84 .src_sz_max = 256,
85 .dest_nentries = 0,
86 },
87
88 /* CE1: target->host HTT + HTC control */
89 {
90 .flags = CE_ATTR_FLAGS,
91 .src_nentries = 0,
92 .src_sz_max = 512,
93 .dest_nentries = 512,
94 },
95
96 /* CE2: target->host WMI */
97 {
98 .flags = CE_ATTR_FLAGS,
99 .src_nentries = 0,
100 .src_sz_max = 2048,
101 .dest_nentries = 32,
102 },
103
104 /* CE3: host->target WMI */
105 {
106 .flags = CE_ATTR_FLAGS,
107 .src_nentries = 32,
108 .src_sz_max = 2048,
109 .dest_nentries = 0,
110 },
111
112 /* CE4: host->target HTT */
113 {
114 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
115 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
116 .src_sz_max = 256,
117 .dest_nentries = 0,
118 },
119
120 /* CE5: unused */
121 {
122 .flags = CE_ATTR_FLAGS,
123 .src_nentries = 0,
124 .src_sz_max = 0,
125 .dest_nentries = 0,
126 },
127
128 /* CE6: target autonomous hif_memcpy */
129 {
130 .flags = CE_ATTR_FLAGS,
131 .src_nentries = 0,
132 .src_sz_max = 0,
133 .dest_nentries = 0,
134 },
135
136 /* CE7: ce_diag, the Diagnostic Window */
137 {
138 .flags = CE_ATTR_FLAGS,
139 .src_nentries = 2,
140 .src_sz_max = DIAG_TRANSFER_LIMIT,
141 .dest_nentries = 2,
142 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300143};
144
145/* Target firmware's Copy Engine configuration. */
146static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300147 /* CE0: host->target HTC control and raw streams */
148 {
149 .pipenum = 0,
150 .pipedir = PIPEDIR_OUT,
151 .nentries = 32,
152 .nbytes_max = 256,
153 .flags = CE_ATTR_FLAGS,
154 .reserved = 0,
155 },
156
157 /* CE1: target->host HTT + HTC control */
158 {
159 .pipenum = 1,
160 .pipedir = PIPEDIR_IN,
161 .nentries = 32,
162 .nbytes_max = 512,
163 .flags = CE_ATTR_FLAGS,
164 .reserved = 0,
165 },
166
167 /* CE2: target->host WMI */
168 {
169 .pipenum = 2,
170 .pipedir = PIPEDIR_IN,
171 .nentries = 32,
172 .nbytes_max = 2048,
173 .flags = CE_ATTR_FLAGS,
174 .reserved = 0,
175 },
176
177 /* CE3: host->target WMI */
178 {
179 .pipenum = 3,
180 .pipedir = PIPEDIR_OUT,
181 .nentries = 32,
182 .nbytes_max = 2048,
183 .flags = CE_ATTR_FLAGS,
184 .reserved = 0,
185 },
186
187 /* CE4: host->target HTT */
188 {
189 .pipenum = 4,
190 .pipedir = PIPEDIR_OUT,
191 .nentries = 256,
192 .nbytes_max = 256,
193 .flags = CE_ATTR_FLAGS,
194 .reserved = 0,
195 },
196
Kalle Valo5e3dd152013-06-12 20:52:10 +0300197 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300198
199 /* CE5: unused */
200 {
201 .pipenum = 5,
202 .pipedir = PIPEDIR_OUT,
203 .nentries = 32,
204 .nbytes_max = 2048,
205 .flags = CE_ATTR_FLAGS,
206 .reserved = 0,
207 },
208
209 /* CE6: Reserved for target autonomous hif_memcpy */
210 {
211 .pipenum = 6,
212 .pipedir = PIPEDIR_INOUT,
213 .nentries = 32,
214 .nbytes_max = 4096,
215 .flags = CE_ATTR_FLAGS,
216 .reserved = 0,
217 },
218
Kalle Valo5e3dd152013-06-12 20:52:10 +0300219 /* CE7 used only by Host */
220};
221
Michal Kaziore5398872013-11-25 14:06:20 +0100222static bool ath10k_pci_irq_pending(struct ath10k *ar)
223{
224 u32 cause;
225
226 /* Check if the shared legacy irq is for us */
227 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
228 PCIE_INTR_CAUSE_ADDRESS);
229 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
230 return true;
231
232 return false;
233}
234
Michal Kazior26852182013-11-25 14:06:25 +0100235static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
236{
237 /* IMPORTANT: INTR_CLR register has to be set after
238 * INTR_ENABLE is set to 0, otherwise interrupt can not be
239 * really cleared. */
240 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
241 0);
242 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
243 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
244
245 /* IMPORTANT: this extra read transaction is required to
246 * flush the posted write buffer. */
247 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
248 PCIE_INTR_ENABLE_ADDRESS);
249}
250
251static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
252{
253 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
254 PCIE_INTR_ENABLE_ADDRESS,
255 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
256
257 /* IMPORTANT: this extra read transaction is required to
258 * flush the posted write buffer. */
259 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
260 PCIE_INTR_ENABLE_ADDRESS);
261}
262
Michal Kaziorab977bd2013-11-25 14:06:26 +0100263static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
264{
265 struct ath10k *ar = arg;
266 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
267
268 if (ar_pci->num_msi_intrs == 0) {
269 if (!ath10k_pci_irq_pending(ar))
270 return IRQ_NONE;
271
272 ath10k_pci_disable_and_clear_legacy_irq(ar);
273 }
274
275 tasklet_schedule(&ar_pci->early_irq_tasklet);
276
277 return IRQ_HANDLED;
278}
279
280static int ath10k_pci_request_early_irq(struct ath10k *ar)
281{
282 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
283 int ret;
284
285 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
286 * interrupt from irq vector is triggered in all cases for FW
287 * indication/errors */
288 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
289 IRQF_SHARED, "ath10k_pci (early)", ar);
290 if (ret) {
291 ath10k_warn("failed to request early irq: %d\n", ret);
292 return ret;
293 }
294
295 return 0;
296}
297
298static void ath10k_pci_free_early_irq(struct ath10k *ar)
299{
300 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
301}
302
Kalle Valo5e3dd152013-06-12 20:52:10 +0300303/*
304 * Diagnostic read/write access is provided for startup/config/debug usage.
305 * Caller must guarantee proper alignment, when applicable, and single user
306 * at any moment.
307 */
308static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
309 int nbytes)
310{
311 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
312 int ret = 0;
313 u32 buf;
314 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
315 unsigned int id;
316 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200317 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300318 /* Host buffer address in CE space */
319 u32 ce_data;
320 dma_addr_t ce_data_base = 0;
321 void *data_buf = NULL;
322 int i;
323
324 /*
325 * This code cannot handle reads to non-memory space. Redirect to the
326 * register read fn but preserve the multi word read capability of
327 * this fn
328 */
329 if (address < DRAM_BASE_ADDRESS) {
330 if (!IS_ALIGNED(address, 4) ||
331 !IS_ALIGNED((unsigned long)data, 4))
332 return -EIO;
333
334 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
335 ar, address, (u32 *)data)) == 0)) {
336 nbytes -= sizeof(u32);
337 address += sizeof(u32);
338 data += sizeof(u32);
339 }
340 return ret;
341 }
342
343 ce_diag = ar_pci->ce_diag;
344
345 /*
346 * Allocate a temporary bounce buffer to hold caller's data
347 * to be DMA'ed from Target. This guarantees
348 * 1) 4-byte alignment
349 * 2) Buffer in DMA-able space
350 */
351 orig_nbytes = nbytes;
352 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
353 orig_nbytes,
354 &ce_data_base);
355
356 if (!data_buf) {
357 ret = -ENOMEM;
358 goto done;
359 }
360 memset(data_buf, 0, orig_nbytes);
361
362 remaining_bytes = orig_nbytes;
363 ce_data = ce_data_base;
364 while (remaining_bytes) {
365 nbytes = min_t(unsigned int, remaining_bytes,
366 DIAG_TRANSFER_LIMIT);
367
368 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
369 if (ret != 0)
370 goto done;
371
372 /* Request CE to send from Target(!) address to Host buffer */
373 /*
374 * The address supplied by the caller is in the
375 * Target CPU virtual address space.
376 *
377 * In order to use this address with the diagnostic CE,
378 * convert it from Target CPU virtual address space
379 * to CE address space
380 */
381 ath10k_pci_wake(ar);
382 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
383 address);
384 ath10k_pci_sleep(ar);
385
386 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
387 0);
388 if (ret)
389 goto done;
390
391 i = 0;
392 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
393 &completed_nbytes,
394 &id) != 0) {
395 mdelay(1);
396 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
397 ret = -EBUSY;
398 goto done;
399 }
400 }
401
402 if (nbytes != completed_nbytes) {
403 ret = -EIO;
404 goto done;
405 }
406
407 if (buf != (u32) address) {
408 ret = -EIO;
409 goto done;
410 }
411
412 i = 0;
413 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
414 &completed_nbytes,
415 &id, &flags) != 0) {
416 mdelay(1);
417
418 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
419 ret = -EBUSY;
420 goto done;
421 }
422 }
423
424 if (nbytes != completed_nbytes) {
425 ret = -EIO;
426 goto done;
427 }
428
429 if (buf != ce_data) {
430 ret = -EIO;
431 goto done;
432 }
433
434 remaining_bytes -= nbytes;
435 address += nbytes;
436 ce_data += nbytes;
437 }
438
439done:
440 if (ret == 0) {
441 /* Copy data from allocated DMA buf to caller's buf */
442 WARN_ON_ONCE(orig_nbytes & 3);
443 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
444 ((u32 *)data)[i] =
445 __le32_to_cpu(((__le32 *)data_buf)[i]);
446 }
447 } else
448 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
449 __func__, address);
450
451 if (data_buf)
452 pci_free_consistent(ar_pci->pdev, orig_nbytes,
453 data_buf, ce_data_base);
454
455 return ret;
456}
457
458/* Read 4-byte aligned data from Target memory or register */
459static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
460 u32 *data)
461{
462 /* Assume range doesn't cross this boundary */
463 if (address >= DRAM_BASE_ADDRESS)
464 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
465
466 ath10k_pci_wake(ar);
467 *data = ath10k_pci_read32(ar, address);
468 ath10k_pci_sleep(ar);
469 return 0;
470}
471
472static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
473 const void *data, int nbytes)
474{
475 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
476 int ret = 0;
477 u32 buf;
478 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
479 unsigned int id;
480 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200481 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300482 void *data_buf = NULL;
483 u32 ce_data; /* Host buffer address in CE space */
484 dma_addr_t ce_data_base = 0;
485 int i;
486
487 ce_diag = ar_pci->ce_diag;
488
489 /*
490 * Allocate a temporary bounce buffer to hold caller's data
491 * to be DMA'ed to Target. This guarantees
492 * 1) 4-byte alignment
493 * 2) Buffer in DMA-able space
494 */
495 orig_nbytes = nbytes;
496 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
497 orig_nbytes,
498 &ce_data_base);
499 if (!data_buf) {
500 ret = -ENOMEM;
501 goto done;
502 }
503
504 /* Copy caller's data to allocated DMA buf */
505 WARN_ON_ONCE(orig_nbytes & 3);
506 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
507 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
508
509 /*
510 * The address supplied by the caller is in the
511 * Target CPU virtual address space.
512 *
513 * In order to use this address with the diagnostic CE,
514 * convert it from
515 * Target CPU virtual address space
516 * to
517 * CE address space
518 */
519 ath10k_pci_wake(ar);
520 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
521 ath10k_pci_sleep(ar);
522
523 remaining_bytes = orig_nbytes;
524 ce_data = ce_data_base;
525 while (remaining_bytes) {
526 /* FIXME: check cast */
527 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
528
529 /* Set up to receive directly into Target(!) address */
530 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
531 if (ret != 0)
532 goto done;
533
534 /*
535 * Request CE to send caller-supplied data that
536 * was copied to bounce buffer to Target(!) address.
537 */
538 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
539 nbytes, 0, 0);
540 if (ret != 0)
541 goto done;
542
543 i = 0;
544 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
545 &completed_nbytes,
546 &id) != 0) {
547 mdelay(1);
548
549 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
550 ret = -EBUSY;
551 goto done;
552 }
553 }
554
555 if (nbytes != completed_nbytes) {
556 ret = -EIO;
557 goto done;
558 }
559
560 if (buf != ce_data) {
561 ret = -EIO;
562 goto done;
563 }
564
565 i = 0;
566 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
567 &completed_nbytes,
568 &id, &flags) != 0) {
569 mdelay(1);
570
571 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
572 ret = -EBUSY;
573 goto done;
574 }
575 }
576
577 if (nbytes != completed_nbytes) {
578 ret = -EIO;
579 goto done;
580 }
581
582 if (buf != address) {
583 ret = -EIO;
584 goto done;
585 }
586
587 remaining_bytes -= nbytes;
588 address += nbytes;
589 ce_data += nbytes;
590 }
591
592done:
593 if (data_buf) {
594 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
595 ce_data_base);
596 }
597
598 if (ret != 0)
599 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
600 address);
601
602 return ret;
603}
604
605/* Write 4B data to Target memory or register */
606static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
607 u32 data)
608{
609 /* Assume range doesn't cross this boundary */
610 if (address >= DRAM_BASE_ADDRESS)
611 return ath10k_pci_diag_write_mem(ar, address, &data,
612 sizeof(u32));
613
614 ath10k_pci_wake(ar);
615 ath10k_pci_write32(ar, address, data);
616 ath10k_pci_sleep(ar);
617 return 0;
618}
619
620static bool ath10k_pci_target_is_awake(struct ath10k *ar)
621{
622 void __iomem *mem = ath10k_pci_priv(ar)->mem;
623 u32 val;
624 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
625 RTC_STATE_ADDRESS);
626 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
627}
628
Kalle Valo3aebe542013-09-01 10:02:07 +0300629int ath10k_do_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300630{
631 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
632 void __iomem *pci_addr = ar_pci->mem;
633 int tot_delay = 0;
634 int curr_delay = 5;
635
636 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
637 /* Force AWAKE */
638 iowrite32(PCIE_SOC_WAKE_V_MASK,
639 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
640 PCIE_SOC_WAKE_ADDRESS);
641 }
642 atomic_inc(&ar_pci->keep_awake_count);
643
644 if (ar_pci->verified_awake)
Kalle Valo3aebe542013-09-01 10:02:07 +0300645 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300646
647 for (;;) {
648 if (ath10k_pci_target_is_awake(ar)) {
649 ar_pci->verified_awake = true;
Kalle Valo3aebe542013-09-01 10:02:07 +0300650 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300651 }
652
653 if (tot_delay > PCIE_WAKE_TIMEOUT) {
Kalle Valo3aebe542013-09-01 10:02:07 +0300654 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
655 PCIE_WAKE_TIMEOUT,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300656 atomic_read(&ar_pci->keep_awake_count));
Kalle Valo3aebe542013-09-01 10:02:07 +0300657 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300658 }
659
660 udelay(curr_delay);
661 tot_delay += curr_delay;
662
663 if (curr_delay < 50)
664 curr_delay += 5;
665 }
666}
667
668void ath10k_do_pci_sleep(struct ath10k *ar)
669{
670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 void __iomem *pci_addr = ar_pci->mem;
672
673 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
674 /* Allow sleep */
675 ar_pci->verified_awake = false;
676 iowrite32(PCIE_SOC_WAKE_RESET,
677 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
678 PCIE_SOC_WAKE_ADDRESS);
679 }
680}
681
682/*
683 * FIXME: Handle OOM properly.
684 */
685static inline
Michal Kazior87263e52013-08-27 13:08:01 +0200686struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300687{
688 struct ath10k_pci_compl *compl = NULL;
689
690 spin_lock_bh(&pipe_info->pipe_lock);
691 if (list_empty(&pipe_info->compl_free)) {
692 ath10k_warn("Completion buffers are full\n");
693 goto exit;
694 }
695 compl = list_first_entry(&pipe_info->compl_free,
696 struct ath10k_pci_compl, list);
697 list_del(&compl->list);
698exit:
699 spin_unlock_bh(&pipe_info->pipe_lock);
700 return compl;
701}
702
703/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200704static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300705{
706 struct ath10k *ar = ce_state->ar;
707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200708 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300709 struct ath10k_pci_compl *compl;
Michal Kazior5440ce22013-09-03 15:09:58 +0200710 void *transfer_context;
711 u32 ce_data;
712 unsigned int nbytes;
713 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300714
Michal Kazior5440ce22013-09-03 15:09:58 +0200715 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
716 &ce_data, &nbytes,
717 &transfer_id) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300718 compl = get_free_compl(pipe_info);
719 if (!compl)
720 break;
721
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200722 compl->state = ATH10K_PCI_COMPL_SEND;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300723 compl->ce_state = ce_state;
724 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300725 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300726 compl->nbytes = nbytes;
727 compl->transfer_id = transfer_id;
728 compl->flags = 0;
729
730 /*
731 * Add the completion to the processing queue.
732 */
733 spin_lock_bh(&ar_pci->compl_lock);
734 list_add_tail(&compl->list, &ar_pci->compl_process);
735 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200736 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300737
738 ath10k_pci_process_ce(ar);
739}
740
741/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200742static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300743{
744 struct ath10k *ar = ce_state->ar;
745 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200746 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300747 struct ath10k_pci_compl *compl;
748 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200749 void *transfer_context;
750 u32 ce_data;
751 unsigned int nbytes;
752 unsigned int transfer_id;
753 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300754
Michal Kazior5440ce22013-09-03 15:09:58 +0200755 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
756 &ce_data, &nbytes, &transfer_id,
757 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300758 compl = get_free_compl(pipe_info);
759 if (!compl)
760 break;
761
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200762 compl->state = ATH10K_PCI_COMPL_RECV;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300763 compl->ce_state = ce_state;
764 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300765 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300766 compl->nbytes = nbytes;
767 compl->transfer_id = transfer_id;
768 compl->flags = flags;
769
770 skb = transfer_context;
771 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
772 skb->len + skb_tailroom(skb),
773 DMA_FROM_DEVICE);
774 /*
775 * Add the completion to the processing queue.
776 */
777 spin_lock_bh(&ar_pci->compl_lock);
778 list_add_tail(&compl->list, &ar_pci->compl_process);
779 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200780 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300781
782 ath10k_pci_process_ce(ar);
783}
784
785/* Send the first nbytes bytes of the buffer */
786static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
787 unsigned int transfer_id,
788 unsigned int bytes, struct sk_buff *nbuf)
789{
790 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
791 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200792 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
Michal Kazior2aa39112013-08-27 13:08:02 +0200793 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300794 unsigned int len;
795 u32 flags = 0;
796 int ret;
797
Kalle Valo5e3dd152013-06-12 20:52:10 +0300798 len = min(bytes, nbuf->len);
799 bytes -= len;
800
801 if (len & 3)
802 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
803
804 ath10k_dbg(ATH10K_DBG_PCI,
805 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
806 nbuf->data, (unsigned long long) skb_cb->paddr,
807 nbuf->len, len);
808 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
809 "ath10k tx: data: ",
810 nbuf->data, nbuf->len);
811
Michal Kazior2e761b52013-10-02 11:03:40 +0200812 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
813 flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300814 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100815 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300816
817 return ret;
818}
819
820static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
821{
822 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200823 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300824}
825
826static void ath10k_pci_hif_dump_area(struct ath10k *ar)
827{
828 u32 reg_dump_area = 0;
829 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
830 u32 host_addr;
831 int ret;
832 u32 i;
833
834 ath10k_err("firmware crashed!\n");
835 ath10k_err("hardware name %s version 0x%x\n",
836 ar->hw_params.name, ar->target_version);
Chun-Yeow Yeoh5ba88b32014-01-21 17:21:21 +0800837 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300838
839 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100840 ret = ath10k_pci_diag_read_mem(ar, host_addr,
841 &reg_dump_area, sizeof(u32));
842 if (ret) {
843 ath10k_err("failed to read FW dump area address: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300844 return;
845 }
846
847 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
848
849 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
850 &reg_dump_values[0],
851 REG_DUMP_COUNT_QCA988X * sizeof(u32));
852 if (ret != 0) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100853 ath10k_err("failed to read FW dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300854 return;
855 }
856
857 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
858
859 ath10k_err("target Register Dump\n");
860 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
861 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
862 i,
863 reg_dump_values[i],
864 reg_dump_values[i + 1],
865 reg_dump_values[i + 2],
866 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200867
Michal Kazior5e90de82013-10-16 16:46:05 +0300868 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300869}
870
871static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
872 int force)
873{
874 if (!force) {
875 int resources;
876 /*
877 * Decide whether to actually poll for completions, or just
878 * wait for a later chance.
879 * If there seem to be plenty of resources left, then just wait
880 * since checking involves reading a CE register, which is a
881 * relatively expensive operation.
882 */
883 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
884
885 /*
886 * If at least 50% of the total resources are still available,
887 * don't bother checking again yet.
888 */
889 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
890 return;
891 }
892 ath10k_ce_per_engine_service(ar, pipe);
893}
894
Michal Kaziore799bbf2013-07-05 16:15:12 +0300895static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
896 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300897{
898 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
899
900 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
901
902 memcpy(&ar_pci->msg_callbacks_current, callbacks,
903 sizeof(ar_pci->msg_callbacks_current));
904}
905
Michal Kaziorc80de122013-11-25 14:06:23 +0100906static int ath10k_pci_alloc_compl(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300907{
908 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300909 const struct ce_attr *attr;
Michal Kazior87263e52013-08-27 13:08:01 +0200910 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300911 struct ath10k_pci_compl *compl;
Michal Kaziorc80de122013-11-25 14:06:23 +0100912 int i, pipe_num, completions;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300913
914 spin_lock_init(&ar_pci->compl_lock);
915 INIT_LIST_HEAD(&ar_pci->compl_process);
916
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100917 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300918 pipe_info = &ar_pci->pipe_info[pipe_num];
919
920 spin_lock_init(&pipe_info->pipe_lock);
921 INIT_LIST_HEAD(&pipe_info->compl_free);
922
923 /* Handle Diagnostic CE specially */
Michal Kaziorc80de122013-11-25 14:06:23 +0100924 if (pipe_info->ce_hdl == ar_pci->ce_diag)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300925 continue;
926
927 attr = &host_ce_config_wlan[pipe_num];
928 completions = 0;
929
Michal Kaziorc80de122013-11-25 14:06:23 +0100930 if (attr->src_nentries)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300931 completions += attr->src_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300932
Michal Kaziorc80de122013-11-25 14:06:23 +0100933 if (attr->dest_nentries)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300934 completions += attr->dest_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300935
936 for (i = 0; i < completions; i++) {
Michal Kaziorffe5daa2013-08-13 07:54:55 +0200937 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300938 if (!compl) {
939 ath10k_warn("No memory for completion state\n");
Michal Kaziorc80de122013-11-25 14:06:23 +0100940 ath10k_pci_cleanup_ce(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300941 return -ENOMEM;
942 }
943
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200944 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300945 list_add_tail(&compl->list, &pipe_info->compl_free);
946 }
947 }
948
949 return 0;
950}
951
Michal Kaziorc80de122013-11-25 14:06:23 +0100952static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
953{
954 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
955 const struct ce_attr *attr;
956 struct ath10k_pci_pipe *pipe_info;
957 int pipe_num, disable_interrupts;
958
959 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
960 pipe_info = &ar_pci->pipe_info[pipe_num];
961
962 /* Handle Diagnostic CE specially */
963 if (pipe_info->ce_hdl == ar_pci->ce_diag)
964 continue;
965
966 attr = &host_ce_config_wlan[pipe_num];
967
968 if (attr->src_nentries) {
969 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
970 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
971 ath10k_pci_ce_send_done,
972 disable_interrupts);
973 }
974
975 if (attr->dest_nentries)
976 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
977 ath10k_pci_ce_recv_data);
978 }
979
980 return 0;
981}
982
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100983static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300984{
985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300986 int i;
987
Kalle Valo5e3dd152013-06-12 20:52:10 +0300988 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +0100989 tasklet_kill(&ar_pci->msi_fw_err);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100990 tasklet_kill(&ar_pci->early_irq_tasklet);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300991
992 for (i = 0; i < CE_COUNT; i++)
993 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100994}
995
Kalle Valo5e3dd152013-06-12 20:52:10 +0300996static void ath10k_pci_stop_ce(struct ath10k *ar)
997{
998 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
999 struct ath10k_pci_compl *compl;
1000 struct sk_buff *skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001001
1002 /* Mark pending completions as aborted, so that upper layers free up
1003 * their associated resources */
1004 spin_lock_bh(&ar_pci->compl_lock);
1005 list_for_each_entry(compl, &ar_pci->compl_process, list) {
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001006 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001007 ATH10K_SKB_CB(skb)->is_aborted = true;
1008 }
1009 spin_unlock_bh(&ar_pci->compl_lock);
1010}
1011
1012static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1013{
1014 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1015 struct ath10k_pci_compl *compl, *tmp;
Michal Kazior87263e52013-08-27 13:08:01 +02001016 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001017 struct sk_buff *netbuf;
1018 int pipe_num;
1019
1020 /* Free pending completions. */
1021 spin_lock_bh(&ar_pci->compl_lock);
1022 if (!list_empty(&ar_pci->compl_process))
1023 ath10k_warn("pending completions still present! possible memory leaks.\n");
1024
1025 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1026 list_del(&compl->list);
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001027 netbuf = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001028 dev_kfree_skb_any(netbuf);
1029 kfree(compl);
1030 }
1031 spin_unlock_bh(&ar_pci->compl_lock);
1032
1033 /* Free unused completions for each pipe. */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001034 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001035 pipe_info = &ar_pci->pipe_info[pipe_num];
1036
1037 spin_lock_bh(&pipe_info->pipe_lock);
1038 list_for_each_entry_safe(compl, tmp,
1039 &pipe_info->compl_free, list) {
1040 list_del(&compl->list);
1041 kfree(compl);
1042 }
1043 spin_unlock_bh(&pipe_info->pipe_lock);
1044 }
1045}
1046
1047static void ath10k_pci_process_ce(struct ath10k *ar)
1048{
1049 struct ath10k_pci *ar_pci = ar->hif.priv;
1050 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1051 struct ath10k_pci_compl *compl;
1052 struct sk_buff *skb;
1053 unsigned int nbytes;
1054 int ret, send_done = 0;
1055
1056 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1057 * we must serialize all completion processing. */
1058
1059 spin_lock_bh(&ar_pci->compl_lock);
1060 if (ar_pci->compl_processing) {
1061 spin_unlock_bh(&ar_pci->compl_lock);
1062 return;
1063 }
1064 ar_pci->compl_processing = true;
1065 spin_unlock_bh(&ar_pci->compl_lock);
1066
1067 for (;;) {
1068 spin_lock_bh(&ar_pci->compl_lock);
1069 if (list_empty(&ar_pci->compl_process)) {
1070 spin_unlock_bh(&ar_pci->compl_lock);
1071 break;
1072 }
1073 compl = list_first_entry(&ar_pci->compl_process,
1074 struct ath10k_pci_compl, list);
1075 list_del(&compl->list);
1076 spin_unlock_bh(&ar_pci->compl_lock);
1077
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001078 switch (compl->state) {
1079 case ATH10K_PCI_COMPL_SEND:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001080 cb->tx_completion(ar,
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001081 compl->skb,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001082 compl->transfer_id);
1083 send_done = 1;
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001084 break;
1085 case ATH10K_PCI_COMPL_RECV:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001086 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1087 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001088 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1089 compl->pipe_info->pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001090 break;
1091 }
1092
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001093 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001094 nbytes = compl->nbytes;
1095
1096 ath10k_dbg(ATH10K_DBG_PCI,
1097 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1098 skb, nbytes);
1099 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1100 "ath10k rx: ", skb->data, nbytes);
1101
1102 if (skb->len + skb_tailroom(skb) >= nbytes) {
1103 skb_trim(skb, 0);
1104 skb_put(skb, nbytes);
1105 cb->rx_completion(ar, skb,
1106 compl->pipe_info->pipe_num);
1107 } else {
1108 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1109 nbytes,
1110 skb->len + skb_tailroom(skb));
1111 }
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001112 break;
1113 case ATH10K_PCI_COMPL_FREE:
1114 ath10k_warn("free completion cannot be processed\n");
1115 break;
1116 default:
1117 ath10k_warn("invalid completion state (%d)\n",
1118 compl->state);
1119 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001120 }
1121
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001122 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001123
1124 /*
1125 * Add completion back to the pipe's free list.
1126 */
1127 spin_lock_bh(&compl->pipe_info->pipe_lock);
1128 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001129 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1130 }
1131
1132 spin_lock_bh(&ar_pci->compl_lock);
1133 ar_pci->compl_processing = false;
1134 spin_unlock_bh(&ar_pci->compl_lock);
1135}
1136
1137/* TODO - temporary mapping while we have too few CE's */
1138static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1139 u16 service_id, u8 *ul_pipe,
1140 u8 *dl_pipe, int *ul_is_polled,
1141 int *dl_is_polled)
1142{
1143 int ret = 0;
1144
1145 /* polling for received messages not supported */
1146 *dl_is_polled = 0;
1147
1148 switch (service_id) {
1149 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1150 /*
1151 * Host->target HTT gets its own pipe, so it can be polled
1152 * while other pipes are interrupt driven.
1153 */
1154 *ul_pipe = 4;
1155 /*
1156 * Use the same target->host pipe for HTC ctrl, HTC raw
1157 * streams, and HTT.
1158 */
1159 *dl_pipe = 1;
1160 break;
1161
1162 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1163 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1164 /*
1165 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1166 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1167 * WMI services. So, if another CE is needed, change
1168 * this to *ul_pipe = 3, which frees up CE 0.
1169 */
1170 /* *ul_pipe = 3; */
1171 *ul_pipe = 0;
1172 *dl_pipe = 1;
1173 break;
1174
1175 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1176 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1177 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1178 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1179
1180 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1181 *ul_pipe = 3;
1182 *dl_pipe = 2;
1183 break;
1184
1185 /* pipe 5 unused */
1186 /* pipe 6 reserved */
1187 /* pipe 7 reserved */
1188
1189 default:
1190 ret = -1;
1191 break;
1192 }
1193 *ul_is_polled =
1194 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1195
1196 return ret;
1197}
1198
1199static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1200 u8 *ul_pipe, u8 *dl_pipe)
1201{
1202 int ul_is_polled, dl_is_polled;
1203
1204 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1205 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1206 ul_pipe,
1207 dl_pipe,
1208 &ul_is_polled,
1209 &dl_is_polled);
1210}
1211
Michal Kazior87263e52013-08-27 13:08:01 +02001212static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001213 int num)
1214{
1215 struct ath10k *ar = pipe_info->hif_ce_state;
1216 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001217 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001218 struct sk_buff *skb;
1219 dma_addr_t ce_data;
1220 int i, ret = 0;
1221
1222 if (pipe_info->buf_sz == 0)
1223 return 0;
1224
1225 for (i = 0; i < num; i++) {
1226 skb = dev_alloc_skb(pipe_info->buf_sz);
1227 if (!skb) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001228 ath10k_warn("failed to allocate skbuff for pipe %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001229 num);
1230 ret = -ENOMEM;
1231 goto err;
1232 }
1233
1234 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1235
1236 ce_data = dma_map_single(ar->dev, skb->data,
1237 skb->len + skb_tailroom(skb),
1238 DMA_FROM_DEVICE);
1239
1240 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001241 ath10k_warn("failed to DMA map sk_buff\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001242 dev_kfree_skb_any(skb);
1243 ret = -EIO;
1244 goto err;
1245 }
1246
1247 ATH10K_SKB_CB(skb)->paddr = ce_data;
1248
1249 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1250 pipe_info->buf_sz,
1251 PCI_DMA_FROMDEVICE);
1252
1253 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1254 ce_data);
1255 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001256 ath10k_warn("failed to enqueue to pipe %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001257 num, ret);
1258 goto err;
1259 }
1260 }
1261
1262 return ret;
1263
1264err:
1265 ath10k_pci_rx_pipe_cleanup(pipe_info);
1266 return ret;
1267}
1268
1269static int ath10k_pci_post_rx(struct ath10k *ar)
1270{
1271 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001272 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001273 const struct ce_attr *attr;
1274 int pipe_num, ret = 0;
1275
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001276 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001277 pipe_info = &ar_pci->pipe_info[pipe_num];
1278 attr = &host_ce_config_wlan[pipe_num];
1279
1280 if (attr->dest_nentries == 0)
1281 continue;
1282
1283 ret = ath10k_pci_post_rx_pipe(pipe_info,
1284 attr->dest_nentries - 1);
1285 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001286 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1287 pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001288
1289 for (; pipe_num >= 0; pipe_num--) {
1290 pipe_info = &ar_pci->pipe_info[pipe_num];
1291 ath10k_pci_rx_pipe_cleanup(pipe_info);
1292 }
1293 return ret;
1294 }
1295 }
1296
1297 return 0;
1298}
1299
1300static int ath10k_pci_hif_start(struct ath10k *ar)
1301{
1302 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001303 int ret, ret_early;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001304
Michal Kaziorab977bd2013-11-25 14:06:26 +01001305 ath10k_pci_free_early_irq(ar);
1306 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001307
Michal Kaziorc80de122013-11-25 14:06:23 +01001308 ret = ath10k_pci_alloc_compl(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001309 if (ret) {
Michal Kaziorc80de122013-11-25 14:06:23 +01001310 ath10k_warn("failed to allocate CE completions: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001311 goto err_early_irq;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001312 }
1313
Michal Kazior5d1aa942013-11-25 14:06:24 +01001314 ret = ath10k_pci_request_irq(ar);
1315 if (ret) {
1316 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1317 ret);
1318 goto err_free_compl;
1319 }
1320
Michal Kaziorc80de122013-11-25 14:06:23 +01001321 ret = ath10k_pci_setup_ce_irq(ar);
1322 if (ret) {
1323 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001324 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001325 }
1326
1327 /* Post buffers once to start things off. */
1328 ret = ath10k_pci_post_rx(ar);
1329 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001330 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1331 ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001332 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001333 }
1334
1335 ar_pci->started = 1;
1336 return 0;
Michal Kaziorc80de122013-11-25 14:06:23 +01001337
Michal Kazior5d1aa942013-11-25 14:06:24 +01001338err_stop:
1339 ath10k_ce_disable_interrupts(ar);
1340 ath10k_pci_free_irq(ar);
1341 ath10k_pci_kill_tasklet(ar);
Michal Kaziorc80de122013-11-25 14:06:23 +01001342 ath10k_pci_stop_ce(ar);
1343 ath10k_pci_process_ce(ar);
1344err_free_compl:
1345 ath10k_pci_cleanup_ce(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001346err_early_irq:
1347 /* Though there should be no interrupts (device was reset)
1348 * power_down() expects the early IRQ to be installed as per the
1349 * driver lifecycle. */
1350 ret_early = ath10k_pci_request_early_irq(ar);
1351 if (ret_early)
1352 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1353
Michal Kaziorc80de122013-11-25 14:06:23 +01001354 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001355}
1356
Michal Kazior87263e52013-08-27 13:08:01 +02001357static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001358{
1359 struct ath10k *ar;
1360 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001361 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001362 u32 buf_sz;
1363 struct sk_buff *netbuf;
1364 u32 ce_data;
1365
1366 buf_sz = pipe_info->buf_sz;
1367
1368 /* Unused Copy Engine */
1369 if (buf_sz == 0)
1370 return;
1371
1372 ar = pipe_info->hif_ce_state;
1373 ar_pci = ath10k_pci_priv(ar);
1374
1375 if (!ar_pci->started)
1376 return;
1377
1378 ce_hdl = pipe_info->ce_hdl;
1379
1380 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1381 &ce_data) == 0) {
1382 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1383 netbuf->len + skb_tailroom(netbuf),
1384 DMA_FROM_DEVICE);
1385 dev_kfree_skb_any(netbuf);
1386 }
1387}
1388
Michal Kazior87263e52013-08-27 13:08:01 +02001389static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001390{
1391 struct ath10k *ar;
1392 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001393 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001394 struct sk_buff *netbuf;
1395 u32 ce_data;
1396 unsigned int nbytes;
1397 unsigned int id;
1398 u32 buf_sz;
1399
1400 buf_sz = pipe_info->buf_sz;
1401
1402 /* Unused Copy Engine */
1403 if (buf_sz == 0)
1404 return;
1405
1406 ar = pipe_info->hif_ce_state;
1407 ar_pci = ath10k_pci_priv(ar);
1408
1409 if (!ar_pci->started)
1410 return;
1411
1412 ce_hdl = pipe_info->ce_hdl;
1413
1414 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1415 &ce_data, &nbytes, &id) == 0) {
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001416 /*
1417 * Indicate the completion to higer layer to free
1418 * the buffer
1419 */
Michal Kazior2415fc12013-11-08 08:01:32 +01001420
1421 if (!netbuf) {
1422 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1423 ce_hdl->id);
1424 continue;
1425 }
1426
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001427 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1428 ar_pci->msg_callbacks_current.tx_completion(ar,
1429 netbuf,
1430 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001431 }
1432}
1433
1434/*
1435 * Cleanup residual buffers for device shutdown:
1436 * buffers that were enqueued for receive
1437 * buffers that were to be sent
1438 * Note: Buffers that had completed but which were
1439 * not yet processed are on a completion queue. They
1440 * are handled when the completion thread shuts down.
1441 */
1442static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1443{
1444 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1445 int pipe_num;
1446
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001447 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001448 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001449
1450 pipe_info = &ar_pci->pipe_info[pipe_num];
1451 ath10k_pci_rx_pipe_cleanup(pipe_info);
1452 ath10k_pci_tx_pipe_cleanup(pipe_info);
1453 }
1454}
1455
1456static void ath10k_pci_ce_deinit(struct ath10k *ar)
1457{
1458 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001459 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001460 int pipe_num;
1461
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001462 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001463 pipe_info = &ar_pci->pipe_info[pipe_num];
1464 if (pipe_info->ce_hdl) {
1465 ath10k_ce_deinit(pipe_info->ce_hdl);
1466 pipe_info->ce_hdl = NULL;
1467 pipe_info->buf_sz = 0;
1468 }
1469 }
1470}
1471
1472static void ath10k_pci_hif_stop(struct ath10k *ar)
1473{
Michal Kazior32270b62013-08-02 09:15:47 +02001474 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001475 int ret;
Michal Kazior32270b62013-08-02 09:15:47 +02001476
Kalle Valo5e3dd152013-06-12 20:52:10 +03001477 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1478
Michal Kazior5d1aa942013-11-25 14:06:24 +01001479 ret = ath10k_ce_disable_interrupts(ar);
1480 if (ret)
1481 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02001482
Michal Kazior5d1aa942013-11-25 14:06:24 +01001483 ath10k_pci_free_irq(ar);
1484 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001485 ath10k_pci_stop_ce(ar);
1486
Michal Kaziorab977bd2013-11-25 14:06:26 +01001487 ret = ath10k_pci_request_early_irq(ar);
1488 if (ret)
1489 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1490
Kalle Valo5e3dd152013-06-12 20:52:10 +03001491 /* At this point, asynchronous threads are stopped, the target should
1492 * not DMA nor interrupt. We process the leftovers and then free
1493 * everything else up. */
1494
1495 ath10k_pci_process_ce(ar);
1496 ath10k_pci_cleanup_ce(ar);
1497 ath10k_pci_buffer_cleanup(ar);
Michal Kazior32270b62013-08-02 09:15:47 +02001498
Michal Kazior6a42a472013-11-08 08:01:35 +01001499 /* Make the sure the device won't access any structures on the host by
1500 * resetting it. The device was fed with PCI CE ringbuffer
1501 * configuration during init. If ringbuffers are freed and the device
1502 * were to access them this could lead to memory corruption on the
1503 * host. */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001504 ath10k_pci_warm_reset(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01001505
Michal Kazior32270b62013-08-02 09:15:47 +02001506 ar_pci->started = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001507}
1508
1509static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1510 void *req, u32 req_len,
1511 void *resp, u32 *resp_len)
1512{
1513 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001514 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1515 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1516 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1517 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001518 dma_addr_t req_paddr = 0;
1519 dma_addr_t resp_paddr = 0;
1520 struct bmi_xfer xfer = {};
1521 void *treq, *tresp = NULL;
1522 int ret = 0;
1523
Michal Kazior85622cd2013-11-25 14:06:22 +01001524 might_sleep();
1525
Kalle Valo5e3dd152013-06-12 20:52:10 +03001526 if (resp && !resp_len)
1527 return -EINVAL;
1528
1529 if (resp && resp_len && *resp_len == 0)
1530 return -EINVAL;
1531
1532 treq = kmemdup(req, req_len, GFP_KERNEL);
1533 if (!treq)
1534 return -ENOMEM;
1535
1536 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1537 ret = dma_mapping_error(ar->dev, req_paddr);
1538 if (ret)
1539 goto err_dma;
1540
1541 if (resp && resp_len) {
1542 tresp = kzalloc(*resp_len, GFP_KERNEL);
1543 if (!tresp) {
1544 ret = -ENOMEM;
1545 goto err_req;
1546 }
1547
1548 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1549 DMA_FROM_DEVICE);
1550 ret = dma_mapping_error(ar->dev, resp_paddr);
1551 if (ret)
1552 goto err_req;
1553
1554 xfer.wait_for_resp = true;
1555 xfer.resp_len = 0;
1556
1557 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1558 }
1559
1560 init_completion(&xfer.done);
1561
1562 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1563 if (ret)
1564 goto err_resp;
1565
Michal Kazior85622cd2013-11-25 14:06:22 +01001566 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1567 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001568 u32 unused_buffer;
1569 unsigned int unused_nbytes;
1570 unsigned int unused_id;
1571
Kalle Valo5e3dd152013-06-12 20:52:10 +03001572 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1573 &unused_nbytes, &unused_id);
1574 } else {
1575 /* non-zero means we did not time out */
1576 ret = 0;
1577 }
1578
1579err_resp:
1580 if (resp) {
1581 u32 unused_buffer;
1582
1583 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1584 dma_unmap_single(ar->dev, resp_paddr,
1585 *resp_len, DMA_FROM_DEVICE);
1586 }
1587err_req:
1588 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1589
1590 if (ret == 0 && resp_len) {
1591 *resp_len = min(*resp_len, xfer.resp_len);
1592 memcpy(resp, tresp, xfer.resp_len);
1593 }
1594err_dma:
1595 kfree(treq);
1596 kfree(tresp);
1597
1598 return ret;
1599}
1600
Michal Kazior5440ce22013-09-03 15:09:58 +02001601static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001602{
Michal Kazior5440ce22013-09-03 15:09:58 +02001603 struct bmi_xfer *xfer;
1604 u32 ce_data;
1605 unsigned int nbytes;
1606 unsigned int transfer_id;
1607
1608 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1609 &nbytes, &transfer_id))
1610 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001611
1612 if (xfer->wait_for_resp)
1613 return;
1614
1615 complete(&xfer->done);
1616}
1617
Michal Kazior5440ce22013-09-03 15:09:58 +02001618static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001619{
Michal Kazior5440ce22013-09-03 15:09:58 +02001620 struct bmi_xfer *xfer;
1621 u32 ce_data;
1622 unsigned int nbytes;
1623 unsigned int transfer_id;
1624 unsigned int flags;
1625
1626 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1627 &nbytes, &transfer_id, &flags))
1628 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001629
1630 if (!xfer->wait_for_resp) {
1631 ath10k_warn("unexpected: BMI data received; ignoring\n");
1632 return;
1633 }
1634
1635 xfer->resp_len = nbytes;
1636 complete(&xfer->done);
1637}
1638
Michal Kazior85622cd2013-11-25 14:06:22 +01001639static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1640 struct ath10k_ce_pipe *rx_pipe,
1641 struct bmi_xfer *xfer)
1642{
1643 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1644
1645 while (time_before_eq(jiffies, timeout)) {
1646 ath10k_pci_bmi_send_done(tx_pipe);
1647 ath10k_pci_bmi_recv_data(rx_pipe);
1648
1649 if (completion_done(&xfer->done))
1650 return 0;
1651
1652 schedule();
1653 }
1654
1655 return -ETIMEDOUT;
1656}
1657
Kalle Valo5e3dd152013-06-12 20:52:10 +03001658/*
1659 * Map from service/endpoint to Copy Engine.
1660 * This table is derived from the CE_PCI TABLE, above.
1661 * It is passed to the Target at startup for use by firmware.
1662 */
1663static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1664 {
1665 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1666 PIPEDIR_OUT, /* out = UL = host -> target */
1667 3,
1668 },
1669 {
1670 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1671 PIPEDIR_IN, /* in = DL = target -> host */
1672 2,
1673 },
1674 {
1675 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1676 PIPEDIR_OUT, /* out = UL = host -> target */
1677 3,
1678 },
1679 {
1680 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1681 PIPEDIR_IN, /* in = DL = target -> host */
1682 2,
1683 },
1684 {
1685 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1686 PIPEDIR_OUT, /* out = UL = host -> target */
1687 3,
1688 },
1689 {
1690 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1691 PIPEDIR_IN, /* in = DL = target -> host */
1692 2,
1693 },
1694 {
1695 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1696 PIPEDIR_OUT, /* out = UL = host -> target */
1697 3,
1698 },
1699 {
1700 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1701 PIPEDIR_IN, /* in = DL = target -> host */
1702 2,
1703 },
1704 {
1705 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1706 PIPEDIR_OUT, /* out = UL = host -> target */
1707 3,
1708 },
1709 {
1710 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1711 PIPEDIR_IN, /* in = DL = target -> host */
1712 2,
1713 },
1714 {
1715 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1716 PIPEDIR_OUT, /* out = UL = host -> target */
1717 0, /* could be moved to 3 (share with WMI) */
1718 },
1719 {
1720 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1721 PIPEDIR_IN, /* in = DL = target -> host */
1722 1,
1723 },
1724 {
1725 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1726 PIPEDIR_OUT, /* out = UL = host -> target */
1727 0,
1728 },
1729 {
1730 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1731 PIPEDIR_IN, /* in = DL = target -> host */
1732 1,
1733 },
1734 {
1735 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1736 PIPEDIR_OUT, /* out = UL = host -> target */
1737 4,
1738 },
1739 {
1740 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1741 PIPEDIR_IN, /* in = DL = target -> host */
1742 1,
1743 },
1744
1745 /* (Additions here) */
1746
1747 { /* Must be last */
1748 0,
1749 0,
1750 0,
1751 },
1752};
1753
1754/*
1755 * Send an interrupt to the device to wake up the Target CPU
1756 * so it has an opportunity to notice any changed state.
1757 */
1758static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1759{
1760 int ret;
1761 u32 core_ctrl;
1762
1763 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1764 CORE_CTRL_ADDRESS,
1765 &core_ctrl);
1766 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001767 ath10k_warn("failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001768 return ret;
1769 }
1770
1771 /* A_INUM_FIRMWARE interrupt to Target CPU */
1772 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1773
1774 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1775 CORE_CTRL_ADDRESS,
1776 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001777 if (ret) {
1778 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1779 ret);
1780 return ret;
1781 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001782
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001783 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001784}
1785
1786static int ath10k_pci_init_config(struct ath10k *ar)
1787{
1788 u32 interconnect_targ_addr;
1789 u32 pcie_state_targ_addr = 0;
1790 u32 pipe_cfg_targ_addr = 0;
1791 u32 svc_to_pipe_map = 0;
1792 u32 pcie_config_flags = 0;
1793 u32 ealloc_value;
1794 u32 ealloc_targ_addr;
1795 u32 flag2_value;
1796 u32 flag2_targ_addr;
1797 int ret = 0;
1798
1799 /* Download to Target the CE Config and the service-to-CE map */
1800 interconnect_targ_addr =
1801 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1802
1803 /* Supply Target-side CE configuration */
1804 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1805 &pcie_state_targ_addr);
1806 if (ret != 0) {
1807 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1808 return ret;
1809 }
1810
1811 if (pcie_state_targ_addr == 0) {
1812 ret = -EIO;
1813 ath10k_err("Invalid pcie state addr\n");
1814 return ret;
1815 }
1816
1817 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1818 offsetof(struct pcie_state,
1819 pipe_cfg_addr),
1820 &pipe_cfg_targ_addr);
1821 if (ret != 0) {
1822 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1823 return ret;
1824 }
1825
1826 if (pipe_cfg_targ_addr == 0) {
1827 ret = -EIO;
1828 ath10k_err("Invalid pipe cfg addr\n");
1829 return ret;
1830 }
1831
1832 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1833 target_ce_config_wlan,
1834 sizeof(target_ce_config_wlan));
1835
1836 if (ret != 0) {
1837 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1838 return ret;
1839 }
1840
1841 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1842 offsetof(struct pcie_state,
1843 svc_to_pipe_map),
1844 &svc_to_pipe_map);
1845 if (ret != 0) {
1846 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1847 return ret;
1848 }
1849
1850 if (svc_to_pipe_map == 0) {
1851 ret = -EIO;
1852 ath10k_err("Invalid svc_to_pipe map\n");
1853 return ret;
1854 }
1855
1856 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1857 target_service_to_ce_map_wlan,
1858 sizeof(target_service_to_ce_map_wlan));
1859 if (ret != 0) {
1860 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1861 return ret;
1862 }
1863
1864 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1865 offsetof(struct pcie_state,
1866 config_flags),
1867 &pcie_config_flags);
1868 if (ret != 0) {
1869 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1870 return ret;
1871 }
1872
1873 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1874
1875 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1876 offsetof(struct pcie_state, config_flags),
1877 &pcie_config_flags,
1878 sizeof(pcie_config_flags));
1879 if (ret != 0) {
1880 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1881 return ret;
1882 }
1883
1884 /* configure early allocation */
1885 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1886
1887 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1888 if (ret != 0) {
1889 ath10k_err("Faile to get early alloc val: %d\n", ret);
1890 return ret;
1891 }
1892
1893 /* first bank is switched to IRAM */
1894 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1895 HI_EARLY_ALLOC_MAGIC_MASK);
1896 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1897 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1898
1899 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1900 if (ret != 0) {
1901 ath10k_err("Failed to set early alloc val: %d\n", ret);
1902 return ret;
1903 }
1904
1905 /* Tell Target to proceed with initialization */
1906 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1907
1908 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1909 if (ret != 0) {
1910 ath10k_err("Failed to get option val: %d\n", ret);
1911 return ret;
1912 }
1913
1914 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1915
1916 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1917 if (ret != 0) {
1918 ath10k_err("Failed to set option val: %d\n", ret);
1919 return ret;
1920 }
1921
1922 return 0;
1923}
1924
1925
1926
1927static int ath10k_pci_ce_init(struct ath10k *ar)
1928{
1929 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001930 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001931 const struct ce_attr *attr;
1932 int pipe_num;
1933
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001934 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001935 pipe_info = &ar_pci->pipe_info[pipe_num];
1936 pipe_info->pipe_num = pipe_num;
1937 pipe_info->hif_ce_state = ar;
1938 attr = &host_ce_config_wlan[pipe_num];
1939
1940 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1941 if (pipe_info->ce_hdl == NULL) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001942 ath10k_err("failed to initialize CE for pipe: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001943 pipe_num);
1944
1945 /* It is safe to call it here. It checks if ce_hdl is
1946 * valid for each pipe */
1947 ath10k_pci_ce_deinit(ar);
1948 return -1;
1949 }
1950
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001951 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001952 /*
1953 * Reserve the ultimate CE for
1954 * diagnostic Window support
1955 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001956 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001957 continue;
1958 }
1959
1960 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1961 }
1962
Kalle Valo5e3dd152013-06-12 20:52:10 +03001963 return 0;
1964}
1965
1966static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1967{
1968 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1969 u32 fw_indicator_address, fw_indicator;
1970
1971 ath10k_pci_wake(ar);
1972
1973 fw_indicator_address = ar_pci->fw_indicator_address;
1974 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1975
1976 if (fw_indicator & FW_IND_EVENT_PENDING) {
1977 /* ACK: clear Target-side pending event */
1978 ath10k_pci_write32(ar, fw_indicator_address,
1979 fw_indicator & ~FW_IND_EVENT_PENDING);
1980
1981 if (ar_pci->started) {
1982 ath10k_pci_hif_dump_area(ar);
1983 } else {
1984 /*
1985 * Probable Target failure before we're prepared
1986 * to handle it. Generally unexpected.
1987 */
1988 ath10k_warn("early firmware event indicated\n");
1989 }
1990 }
1991
1992 ath10k_pci_sleep(ar);
1993}
1994
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001995static int ath10k_pci_warm_reset(struct ath10k *ar)
1996{
1997 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1998 int ret = 0;
1999 u32 val;
2000
2001 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
2002
2003 ret = ath10k_do_pci_wake(ar);
2004 if (ret) {
2005 ath10k_err("failed to wake up target: %d\n", ret);
2006 return ret;
2007 }
2008
2009 /* debug */
2010 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2011 PCIE_INTR_CAUSE_ADDRESS);
2012 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
2013
2014 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2015 CPU_INTR_ADDRESS);
2016 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
2017 val);
2018
2019 /* disable pending irqs */
2020 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2021 PCIE_INTR_ENABLE_ADDRESS, 0);
2022
2023 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2024 PCIE_INTR_CLR_ADDRESS, ~0);
2025
2026 msleep(100);
2027
2028 /* clear fw indicator */
2029 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
2030
2031 /* clear target LF timer interrupts */
2032 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2033 SOC_LF_TIMER_CONTROL0_ADDRESS);
2034 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2035 SOC_LF_TIMER_CONTROL0_ADDRESS,
2036 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2037
2038 /* reset CE */
2039 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2040 SOC_RESET_CONTROL_ADDRESS);
2041 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2042 val | SOC_RESET_CONTROL_CE_RST_MASK);
2043 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2044 SOC_RESET_CONTROL_ADDRESS);
2045 msleep(10);
2046
2047 /* unreset CE */
2048 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2049 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2050 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2051 SOC_RESET_CONTROL_ADDRESS);
2052 msleep(10);
2053
2054 /* debug */
2055 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2056 PCIE_INTR_CAUSE_ADDRESS);
2057 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
2058
2059 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2060 CPU_INTR_ADDRESS);
2061 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
2062 val);
2063
2064 /* CPU warm reset */
2065 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2066 SOC_RESET_CONTROL_ADDRESS);
2067 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2068 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2069
2070 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2071 SOC_RESET_CONTROL_ADDRESS);
2072 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
2073
2074 msleep(100);
2075
2076 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
2077
2078 ath10k_do_pci_sleep(ar);
2079 return ret;
2080}
2081
2082static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
Michal Kazior8c5c5362013-07-16 09:38:50 +02002083{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002084 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002085 const char *irq_mode;
Michal Kazior8c5c5362013-07-16 09:38:50 +02002086 int ret;
2087
2088 /*
2089 * Bring the target up cleanly.
2090 *
2091 * The target may be in an undefined state with an AUX-powered Target
2092 * and a Host in WoW mode. If the Host crashes, loses power, or is
2093 * restarted (without unloading the driver) then the Target is left
2094 * (aux) powered and running. On a subsequent driver load, the Target
2095 * is in an unexpected state. We try to catch that here in order to
2096 * reset the Target and retry the probe.
2097 */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002098 if (cold_reset)
2099 ret = ath10k_pci_cold_reset(ar);
2100 else
2101 ret = ath10k_pci_warm_reset(ar);
2102
Michal Kazior5b2589f2013-11-08 08:01:30 +01002103 if (ret) {
2104 ath10k_err("failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01002105 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01002106 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002107
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002108 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002109 /* Force AWAKE forever */
Michal Kazior8c5c5362013-07-16 09:38:50 +02002110 ath10k_do_pci_wake(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002111
2112 ret = ath10k_pci_ce_init(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002113 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002114 ath10k_err("failed to initialize CE: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002115 goto err_ps;
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002116 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002117
Michal Kazior98563d52013-11-08 08:01:33 +01002118 ret = ath10k_ce_disable_interrupts(ar);
2119 if (ret) {
2120 ath10k_err("failed to disable CE interrupts: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002121 goto err_ce;
2122 }
2123
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002124 ret = ath10k_pci_init_irq(ar);
Michal Kazior98563d52013-11-08 08:01:33 +01002125 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002126 ath10k_err("failed to init irqs: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01002127 goto err_ce;
2128 }
2129
Michal Kaziorab977bd2013-11-25 14:06:26 +01002130 ret = ath10k_pci_request_early_irq(ar);
2131 if (ret) {
2132 ath10k_err("failed to request early irq: %d\n", ret);
2133 goto err_deinit_irq;
2134 }
2135
Michal Kazior98563d52013-11-08 08:01:33 +01002136 ret = ath10k_pci_wait_for_target_init(ar);
2137 if (ret) {
2138 ath10k_err("failed to wait for target to init: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002139 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01002140 }
2141
2142 ret = ath10k_pci_init_config(ar);
2143 if (ret) {
2144 ath10k_err("failed to setup init config: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002145 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01002146 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002147
2148 ret = ath10k_pci_wake_target_cpu(ar);
2149 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002150 ath10k_err("could not wake up target CPU: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002151 goto err_free_early_irq;
Michal Kazior8c5c5362013-07-16 09:38:50 +02002152 }
2153
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002154 if (ar_pci->num_msi_intrs > 1)
2155 irq_mode = "MSI-X";
2156 else if (ar_pci->num_msi_intrs == 1)
2157 irq_mode = "MSI";
2158 else
2159 irq_mode = "legacy";
2160
Kalle Valo650b91f2013-11-20 10:00:49 +02002161 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2162 ath10k_info("pci irq %s\n", irq_mode);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002163
Michal Kazior8c5c5362013-07-16 09:38:50 +02002164 return 0;
2165
Michal Kaziorab977bd2013-11-25 14:06:26 +01002166err_free_early_irq:
2167 ath10k_pci_free_early_irq(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002168err_deinit_irq:
2169 ath10k_pci_deinit_irq(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002170err_ce:
2171 ath10k_pci_ce_deinit(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002172 ath10k_pci_warm_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002173err_ps:
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002174 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002175 ath10k_do_pci_sleep(ar);
2176err:
2177 return ret;
2178}
2179
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002180static int ath10k_pci_hif_power_up(struct ath10k *ar)
2181{
2182 int ret;
2183
2184 /*
2185 * Hardware CUS232 version 2 has some issues with cold reset and the
2186 * preferred (and safer) way to perform a device reset is through a
2187 * warm reset.
2188 *
2189 * Warm reset doesn't always work though (notably after a firmware
2190 * crash) so fall back to cold reset if necessary.
2191 */
2192 ret = __ath10k_pci_hif_power_up(ar, false);
2193 if (ret) {
2194 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
2195 ret);
2196
2197 ret = __ath10k_pci_hif_power_up(ar, true);
2198 if (ret) {
2199 ath10k_err("failed to power up target using cold reset too (%d)\n",
2200 ret);
2201 return ret;
2202 }
2203 }
2204
2205 return 0;
2206}
2207
Michal Kazior8c5c5362013-07-16 09:38:50 +02002208static void ath10k_pci_hif_power_down(struct ath10k *ar)
2209{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002210 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2211
Michal Kaziorab977bd2013-11-25 14:06:26 +01002212 ath10k_pci_free_early_irq(ar);
2213 ath10k_pci_kill_tasklet(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002214 ath10k_pci_deinit_irq(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002215 ath10k_pci_warm_reset(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002216
Michal Kazior8c5c5362013-07-16 09:38:50 +02002217 ath10k_pci_ce_deinit(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002218 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002219 ath10k_do_pci_sleep(ar);
2220}
2221
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002222#ifdef CONFIG_PM
2223
2224#define ATH10K_PCI_PM_CONTROL 0x44
2225
2226static int ath10k_pci_hif_suspend(struct ath10k *ar)
2227{
2228 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2229 struct pci_dev *pdev = ar_pci->pdev;
2230 u32 val;
2231
2232 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2233
2234 if ((val & 0x000000ff) != 0x3) {
2235 pci_save_state(pdev);
2236 pci_disable_device(pdev);
2237 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2238 (val & 0xffffff00) | 0x03);
2239 }
2240
2241 return 0;
2242}
2243
2244static int ath10k_pci_hif_resume(struct ath10k *ar)
2245{
2246 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2247 struct pci_dev *pdev = ar_pci->pdev;
2248 u32 val;
2249
2250 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2251
2252 if ((val & 0x000000ff) != 0) {
2253 pci_restore_state(pdev);
2254 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2255 val & 0xffffff00);
2256 /*
2257 * Suspend/Resume resets the PCI configuration space,
2258 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2259 * to keep PCI Tx retries from interfering with C3 CPU state
2260 */
2261 pci_read_config_dword(pdev, 0x40, &val);
2262
2263 if ((val & 0x0000ff00) != 0)
2264 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2265 }
2266
2267 return 0;
2268}
2269#endif
2270
Kalle Valo5e3dd152013-06-12 20:52:10 +03002271static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2272 .send_head = ath10k_pci_hif_send_head,
2273 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2274 .start = ath10k_pci_hif_start,
2275 .stop = ath10k_pci_hif_stop,
2276 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2277 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2278 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002279 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002280 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002281 .power_up = ath10k_pci_hif_power_up,
2282 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002283#ifdef CONFIG_PM
2284 .suspend = ath10k_pci_hif_suspend,
2285 .resume = ath10k_pci_hif_resume,
2286#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002287};
2288
2289static void ath10k_pci_ce_tasklet(unsigned long ptr)
2290{
Michal Kazior87263e52013-08-27 13:08:01 +02002291 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002292 struct ath10k_pci *ar_pci = pipe->ar_pci;
2293
2294 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2295}
2296
2297static void ath10k_msi_err_tasklet(unsigned long data)
2298{
2299 struct ath10k *ar = (struct ath10k *)data;
2300
2301 ath10k_pci_fw_interrupt_handler(ar);
2302}
2303
2304/*
2305 * Handler for a per-engine interrupt on a PARTICULAR CE.
2306 * This is used in cases where each CE has a private MSI interrupt.
2307 */
2308static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2309{
2310 struct ath10k *ar = arg;
2311 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2312 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2313
Dan Carpentere5742672013-06-18 10:28:46 +03002314 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002315 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2316 return IRQ_HANDLED;
2317 }
2318
2319 /*
2320 * NOTE: We are able to derive ce_id from irq because we
2321 * use a one-to-one mapping for CE's 0..5.
2322 * CE's 6 & 7 do not use interrupts at all.
2323 *
2324 * This mapping must be kept in sync with the mapping
2325 * used by firmware.
2326 */
2327 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2328 return IRQ_HANDLED;
2329}
2330
2331static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2332{
2333 struct ath10k *ar = arg;
2334 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2335
2336 tasklet_schedule(&ar_pci->msi_fw_err);
2337 return IRQ_HANDLED;
2338}
2339
2340/*
2341 * Top-level interrupt handler for all PCI interrupts from a Target.
2342 * When a block of MSI interrupts is allocated, this top-level handler
2343 * is not used; instead, we directly call the correct sub-handler.
2344 */
2345static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2346{
2347 struct ath10k *ar = arg;
2348 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2349
2350 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002351 if (!ath10k_pci_irq_pending(ar))
2352 return IRQ_NONE;
2353
Michal Kazior26852182013-11-25 14:06:25 +01002354 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002355 }
2356
2357 tasklet_schedule(&ar_pci->intr_tq);
2358
2359 return IRQ_HANDLED;
2360}
2361
Michal Kaziorab977bd2013-11-25 14:06:26 +01002362static void ath10k_pci_early_irq_tasklet(unsigned long data)
2363{
2364 struct ath10k *ar = (struct ath10k *)data;
2365 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2366 u32 fw_ind;
2367 int ret;
2368
2369 ret = ath10k_pci_wake(ar);
2370 if (ret) {
2371 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2372 ret);
2373 return;
2374 }
2375
2376 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2377 if (fw_ind & FW_IND_EVENT_PENDING) {
2378 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2379 fw_ind & ~FW_IND_EVENT_PENDING);
2380
2381 /* Some structures are unavailable during early boot or at
2382 * driver teardown so just print that the device has crashed. */
2383 ath10k_warn("device crashed - no diagnostics available\n");
2384 }
2385
2386 ath10k_pci_sleep(ar);
2387 ath10k_pci_enable_legacy_irq(ar);
2388}
2389
Kalle Valo5e3dd152013-06-12 20:52:10 +03002390static void ath10k_pci_tasklet(unsigned long data)
2391{
2392 struct ath10k *ar = (struct ath10k *)data;
2393 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2394
2395 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2396 ath10k_ce_per_engine_service_any(ar);
2397
Michal Kazior26852182013-11-25 14:06:25 +01002398 /* Re-enable legacy irq that was disabled in the irq handler */
2399 if (ar_pci->num_msi_intrs == 0)
2400 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002401}
2402
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002403static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002404{
2405 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002406 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002407
2408 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2409 ath10k_pci_msi_fw_handler,
2410 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002411 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002412 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002413 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002414 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002415 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002416
2417 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2418 ret = request_irq(ar_pci->pdev->irq + i,
2419 ath10k_pci_per_engine_handler,
2420 IRQF_SHARED, "ath10k_pci", ar);
2421 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002422 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002423 ar_pci->pdev->irq + i, ret);
2424
Michal Kazior87b14232013-06-26 08:50:50 +02002425 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2426 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002427
Michal Kazior87b14232013-06-26 08:50:50 +02002428 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002429 return ret;
2430 }
2431 }
2432
Kalle Valo5e3dd152013-06-12 20:52:10 +03002433 return 0;
2434}
2435
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002436static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002437{
2438 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2439 int ret;
2440
2441 ret = request_irq(ar_pci->pdev->irq,
2442 ath10k_pci_interrupt_handler,
2443 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002444 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002445 ath10k_warn("failed to request MSI irq %d: %d\n",
2446 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002447 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002448 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002449
Kalle Valo5e3dd152013-06-12 20:52:10 +03002450 return 0;
2451}
2452
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002453static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002454{
2455 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002456 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002457
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002458 ret = request_irq(ar_pci->pdev->irq,
2459 ath10k_pci_interrupt_handler,
2460 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002461 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002462 ath10k_warn("failed to request legacy irq %d: %d\n",
2463 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002464 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002465 }
2466
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002467 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002468}
2469
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002470static int ath10k_pci_request_irq(struct ath10k *ar)
2471{
2472 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2473
2474 switch (ar_pci->num_msi_intrs) {
2475 case 0:
2476 return ath10k_pci_request_irq_legacy(ar);
2477 case 1:
2478 return ath10k_pci_request_irq_msi(ar);
2479 case MSI_NUM_REQUEST:
2480 return ath10k_pci_request_irq_msix(ar);
2481 }
2482
2483 ath10k_warn("unknown irq configuration upon request\n");
2484 return -EINVAL;
2485}
2486
2487static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002488{
2489 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2490 int i;
2491
2492 /* There's at least one interrupt irregardless whether its legacy INTR
2493 * or MSI or MSI-X */
2494 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2495 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002496}
2497
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002498static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2499{
2500 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2501 int i;
2502
2503 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2504 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2505 (unsigned long)ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002506 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2507 (unsigned long)ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002508
2509 for (i = 0; i < CE_COUNT; i++) {
2510 ar_pci->pipe_info[i].ar_pci = ar_pci;
2511 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2512 (unsigned long)&ar_pci->pipe_info[i]);
2513 }
2514}
2515
2516static int ath10k_pci_init_irq(struct ath10k *ar)
2517{
2518 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002519 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2520 ar_pci->features);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002521 int ret;
2522
2523 ath10k_pci_init_irq_tasklets(ar);
2524
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002525 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2526 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2527 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002528
2529 /* Try MSI-X */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002530 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2531 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002532 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2533 ar_pci->num_msi_intrs);
2534 if (ret > 0)
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002535 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002536
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002537 /* fall-through */
2538 }
2539
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002540 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002541 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2542 ar_pci->num_msi_intrs = 1;
2543 ret = pci_enable_msi(ar_pci->pdev);
2544 if (ret == 0)
2545 return 0;
2546
2547 /* fall-through */
2548 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002549
2550 /* Try legacy irq
2551 *
2552 * A potential race occurs here: The CORE_BASE write
2553 * depends on target correctly decoding AXI address but
2554 * host won't know when target writes BAR to CORE_CTRL.
2555 * This write might get lost if target has NOT written BAR.
2556 * For now, fix the race by repeating the write in below
2557 * synchronization checking. */
2558 ar_pci->num_msi_intrs = 0;
2559
2560 ret = ath10k_pci_wake(ar);
2561 if (ret) {
2562 ath10k_warn("failed to wake target: %d\n", ret);
2563 return ret;
2564 }
2565
2566 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2567 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2568 ath10k_pci_sleep(ar);
2569
2570 return 0;
2571}
2572
2573static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2574{
2575 int ret;
2576
2577 ret = ath10k_pci_wake(ar);
2578 if (ret) {
2579 ath10k_warn("failed to wake target: %d\n", ret);
2580 return ret;
2581 }
2582
2583 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2584 0);
2585 ath10k_pci_sleep(ar);
2586
2587 return 0;
2588}
2589
2590static int ath10k_pci_deinit_irq(struct ath10k *ar)
2591{
2592 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2593
2594 switch (ar_pci->num_msi_intrs) {
2595 case 0:
2596 return ath10k_pci_deinit_irq_legacy(ar);
2597 case 1:
2598 /* fall-through */
2599 case MSI_NUM_REQUEST:
2600 pci_disable_msi(ar_pci->pdev);
2601 return 0;
Alexander Gordeevbb8b6212014-02-13 17:50:01 +02002602 default:
2603 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002604 }
2605
2606 ath10k_warn("unknown irq configuration upon deinit\n");
2607 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002608}
2609
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002610static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002611{
2612 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2613 int wait_limit = 300; /* 3 sec */
Kalle Valof3782742013-10-17 11:36:15 +03002614 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002615
Michal Kazior98563d52013-11-08 08:01:33 +01002616 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002617 if (ret) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002618 ath10k_err("failed to wake up target: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002619 return ret;
2620 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002621
2622 while (wait_limit-- &&
2623 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2624 FW_IND_INITIALIZED)) {
2625 if (ar_pci->num_msi_intrs == 0)
2626 /* Fix potential race by repeating CORE_BASE writes */
2627 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2628 PCIE_INTR_CE_MASK_ALL,
2629 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2630 PCIE_INTR_ENABLE_ADDRESS));
2631 mdelay(10);
2632 }
2633
2634 if (wait_limit < 0) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002635 ath10k_err("target stalled\n");
2636 ret = -EIO;
2637 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002638 }
2639
Michal Kazior5b2589f2013-11-08 08:01:30 +01002640out:
Michal Kazior98563d52013-11-08 08:01:33 +01002641 ath10k_pci_sleep(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002642 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002643}
2644
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002645static int ath10k_pci_cold_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002646{
Michal Kazior5b2589f2013-11-08 08:01:30 +01002647 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002648 u32 val;
2649
Michal Kazior5b2589f2013-11-08 08:01:30 +01002650 ret = ath10k_do_pci_wake(ar);
2651 if (ret) {
2652 ath10k_err("failed to wake up target: %d\n",
2653 ret);
2654 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002655 }
2656
2657 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002658 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002659 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002660 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002661
2662 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002663 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002664 RTC_STATE_COLD_RESET_MASK)
2665 break;
2666 msleep(1);
2667 }
2668
2669 /* Pull Target, including PCIe, out of RESET. */
2670 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002671 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002672
2673 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002674 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002675 RTC_STATE_COLD_RESET_MASK))
2676 break;
2677 msleep(1);
2678 }
2679
Michal Kazior5b2589f2013-11-08 08:01:30 +01002680 ath10k_do_pci_sleep(ar);
2681 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002682}
2683
2684static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2685{
2686 int i;
2687
2688 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2689 if (!test_bit(i, ar_pci->features))
2690 continue;
2691
2692 switch (i) {
2693 case ATH10K_PCI_FEATURE_MSI_X:
Kalle Valo24cfade2013-09-08 17:55:50 +03002694 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002695 break;
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002696 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
Kalle Valo24cfade2013-09-08 17:55:50 +03002697 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002698 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002699 }
2700 }
2701}
2702
2703static int ath10k_pci_probe(struct pci_dev *pdev,
2704 const struct pci_device_id *pci_dev)
2705{
2706 void __iomem *mem;
2707 int ret = 0;
2708 struct ath10k *ar;
2709 struct ath10k_pci *ar_pci;
Kalle Valoe01ae682013-09-01 11:22:14 +03002710 u32 lcr_val, chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002711
2712 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2713
2714 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2715 if (ar_pci == NULL)
2716 return -ENOMEM;
2717
2718 ar_pci->pdev = pdev;
2719 ar_pci->dev = &pdev->dev;
2720
2721 switch (pci_dev->device) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002722 case QCA988X_2_0_DEVICE_ID:
2723 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2724 break;
2725 default:
2726 ret = -ENODEV;
Masanari Iida6d3be302013-09-30 23:19:09 +09002727 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002728 goto err_ar_pci;
2729 }
2730
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002731 if (ath10k_target_ps)
2732 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2733
Kalle Valo5e3dd152013-06-12 20:52:10 +03002734 ath10k_pci_dump_features(ar_pci);
2735
Michal Kazior3a0861f2013-07-05 16:15:06 +03002736 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002737 if (!ar) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002738 ath10k_err("failed to create driver core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002739 ret = -EINVAL;
2740 goto err_ar_pci;
2741 }
2742
Kalle Valo5e3dd152013-06-12 20:52:10 +03002743 ar_pci->ar = ar;
2744 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2745 atomic_set(&ar_pci->keep_awake_count, 0);
2746
2747 pci_set_drvdata(pdev, ar);
2748
2749 /*
2750 * Without any knowledge of the Host, the Target may have been reset or
2751 * power cycled and its Config Space may no longer reflect the PCI
2752 * address space that was assigned earlier by the PCI infrastructure.
2753 * Refresh it now.
2754 */
2755 ret = pci_assign_resource(pdev, BAR_NUM);
2756 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002757 ath10k_err("failed to assign PCI space: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002758 goto err_ar;
2759 }
2760
2761 ret = pci_enable_device(pdev);
2762 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002763 ath10k_err("failed to enable PCI device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002764 goto err_ar;
2765 }
2766
2767 /* Request MMIO resources */
2768 ret = pci_request_region(pdev, BAR_NUM, "ath");
2769 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002770 ath10k_err("failed to request MMIO region: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002771 goto err_device;
2772 }
2773
2774 /*
2775 * Target structures have a limit of 32 bit DMA pointers.
2776 * DMA pointers can be wider than 32 bits by default on some systems.
2777 */
2778 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2779 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002780 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002781 goto err_region;
2782 }
2783
2784 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2785 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002786 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002787 goto err_region;
2788 }
2789
2790 /* Set bus master bit in PCI_COMMAND to enable DMA */
2791 pci_set_master(pdev);
2792
2793 /*
2794 * Temporary FIX: disable ASPM
2795 * Will be removed after the OTP is programmed
2796 */
2797 pci_read_config_dword(pdev, 0x80, &lcr_val);
2798 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2799
2800 /* Arrange for access to Target SoC registers. */
2801 mem = pci_iomap(pdev, BAR_NUM, 0);
2802 if (!mem) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002803 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002804 ret = -EIO;
2805 goto err_master;
2806 }
2807
2808 ar_pci->mem = mem;
2809
2810 spin_lock_init(&ar_pci->ce_lock);
2811
Kalle Valoe01ae682013-09-01 11:22:14 +03002812 ret = ath10k_do_pci_wake(ar);
2813 if (ret) {
2814 ath10k_err("Failed to get chip id: %d\n", ret);
Wei Yongjun12eb0872013-10-30 13:24:39 +08002815 goto err_iomap;
Kalle Valoe01ae682013-09-01 11:22:14 +03002816 }
2817
Kalle Valo233eb972013-10-16 16:46:11 +03002818 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Kalle Valoe01ae682013-09-01 11:22:14 +03002819
2820 ath10k_do_pci_sleep(ar);
2821
Kalle Valo24cfade2013-09-08 17:55:50 +03002822 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2823
Kalle Valoe01ae682013-09-01 11:22:14 +03002824 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002825 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002826 ath10k_err("failed to register driver core: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02002827 goto err_iomap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002828 }
2829
2830 return 0;
2831
Kalle Valo5e3dd152013-06-12 20:52:10 +03002832err_iomap:
2833 pci_iounmap(pdev, mem);
2834err_master:
2835 pci_clear_master(pdev);
2836err_region:
2837 pci_release_region(pdev, BAR_NUM);
2838err_device:
2839 pci_disable_device(pdev);
2840err_ar:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002841 ath10k_core_destroy(ar);
2842err_ar_pci:
2843 /* call HIF PCI free here */
2844 kfree(ar_pci);
2845
2846 return ret;
2847}
2848
2849static void ath10k_pci_remove(struct pci_dev *pdev)
2850{
2851 struct ath10k *ar = pci_get_drvdata(pdev);
2852 struct ath10k_pci *ar_pci;
2853
2854 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2855
2856 if (!ar)
2857 return;
2858
2859 ar_pci = ath10k_pci_priv(ar);
2860
2861 if (!ar_pci)
2862 return;
2863
2864 tasklet_kill(&ar_pci->msi_fw_err);
2865
2866 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002867
Kalle Valo5e3dd152013-06-12 20:52:10 +03002868 pci_iounmap(pdev, ar_pci->mem);
2869 pci_release_region(pdev, BAR_NUM);
2870 pci_clear_master(pdev);
2871 pci_disable_device(pdev);
2872
2873 ath10k_core_destroy(ar);
2874 kfree(ar_pci);
2875}
2876
Kalle Valo5e3dd152013-06-12 20:52:10 +03002877MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2878
2879static struct pci_driver ath10k_pci_driver = {
2880 .name = "ath10k_pci",
2881 .id_table = ath10k_pci_id_table,
2882 .probe = ath10k_pci_probe,
2883 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002884};
2885
2886static int __init ath10k_pci_init(void)
2887{
2888 int ret;
2889
2890 ret = pci_register_driver(&ath10k_pci_driver);
2891 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002892 ath10k_err("failed to register PCI driver: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002893
2894 return ret;
2895}
2896module_init(ath10k_pci_init);
2897
2898static void __exit ath10k_pci_exit(void)
2899{
2900 pci_unregister_driver(&ath10k_pci_driver);
2901}
2902
2903module_exit(ath10k_pci_exit);
2904
2905MODULE_AUTHOR("Qualcomm Atheros");
2906MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2907MODULE_LICENSE("Dual BSD/GPL");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002908MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2909MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2910MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);