blob: 9179c88007d161f0084d2bfa1dcbb70a10eb7846 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Bartosz Markowski8cc8df92013-08-02 09:58:49 +020042static unsigned int ath10k_target_ps;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010043static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44
Kalle Valo5e3dd152013-06-12 20:52:10 +030045module_param(ath10k_target_ps, uint, 0644);
46MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47
Michal Kaziorcfe9c452013-11-25 14:06:27 +010048module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50
Kalle Valo5e3dd152013-06-12 20:52:10 +030051#define QCA988X_2_0_DEVICE_ID (0x003c)
52
53static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030054 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55 {0}
56};
57
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data);
60
61static void ath10k_pci_process_ce(struct ath10k *ar);
62static int ath10k_pci_post_rx(struct ath10k *ar);
Michal Kazior87263e52013-08-27 13:08:01 +020063static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +030064 int num);
Michal Kazior87263e52013-08-27 13:08:01 +020065static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +030066static void ath10k_pci_stop_ce(struct ath10k *ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +010067static int ath10k_pci_device_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010068static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010069static int ath10k_pci_init_irq(struct ath10k *ar);
70static int ath10k_pci_deinit_irq(struct ath10k *ar);
71static int ath10k_pci_request_irq(struct ath10k *ar);
72static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010073static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
74 struct ath10k_ce_pipe *rx_pipe,
75 struct bmi_xfer *xfer);
Michal Kaziorc80de122013-11-25 14:06:23 +010076static void ath10k_pci_cleanup_ce(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030077
78static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030079 /* CE0: host->target HTC control and raw streams */
80 {
81 .flags = CE_ATTR_FLAGS,
82 .src_nentries = 16,
83 .src_sz_max = 256,
84 .dest_nentries = 0,
85 },
86
87 /* CE1: target->host HTT + HTC control */
88 {
89 .flags = CE_ATTR_FLAGS,
90 .src_nentries = 0,
91 .src_sz_max = 512,
92 .dest_nentries = 512,
93 },
94
95 /* CE2: target->host WMI */
96 {
97 .flags = CE_ATTR_FLAGS,
98 .src_nentries = 0,
99 .src_sz_max = 2048,
100 .dest_nentries = 32,
101 },
102
103 /* CE3: host->target WMI */
104 {
105 .flags = CE_ATTR_FLAGS,
106 .src_nentries = 32,
107 .src_sz_max = 2048,
108 .dest_nentries = 0,
109 },
110
111 /* CE4: host->target HTT */
112 {
113 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
114 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
115 .src_sz_max = 256,
116 .dest_nentries = 0,
117 },
118
119 /* CE5: unused */
120 {
121 .flags = CE_ATTR_FLAGS,
122 .src_nentries = 0,
123 .src_sz_max = 0,
124 .dest_nentries = 0,
125 },
126
127 /* CE6: target autonomous hif_memcpy */
128 {
129 .flags = CE_ATTR_FLAGS,
130 .src_nentries = 0,
131 .src_sz_max = 0,
132 .dest_nentries = 0,
133 },
134
135 /* CE7: ce_diag, the Diagnostic Window */
136 {
137 .flags = CE_ATTR_FLAGS,
138 .src_nentries = 2,
139 .src_sz_max = DIAG_TRANSFER_LIMIT,
140 .dest_nentries = 2,
141 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300142};
143
144/* Target firmware's Copy Engine configuration. */
145static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300146 /* CE0: host->target HTC control and raw streams */
147 {
148 .pipenum = 0,
149 .pipedir = PIPEDIR_OUT,
150 .nentries = 32,
151 .nbytes_max = 256,
152 .flags = CE_ATTR_FLAGS,
153 .reserved = 0,
154 },
155
156 /* CE1: target->host HTT + HTC control */
157 {
158 .pipenum = 1,
159 .pipedir = PIPEDIR_IN,
160 .nentries = 32,
161 .nbytes_max = 512,
162 .flags = CE_ATTR_FLAGS,
163 .reserved = 0,
164 },
165
166 /* CE2: target->host WMI */
167 {
168 .pipenum = 2,
169 .pipedir = PIPEDIR_IN,
170 .nentries = 32,
171 .nbytes_max = 2048,
172 .flags = CE_ATTR_FLAGS,
173 .reserved = 0,
174 },
175
176 /* CE3: host->target WMI */
177 {
178 .pipenum = 3,
179 .pipedir = PIPEDIR_OUT,
180 .nentries = 32,
181 .nbytes_max = 2048,
182 .flags = CE_ATTR_FLAGS,
183 .reserved = 0,
184 },
185
186 /* CE4: host->target HTT */
187 {
188 .pipenum = 4,
189 .pipedir = PIPEDIR_OUT,
190 .nentries = 256,
191 .nbytes_max = 256,
192 .flags = CE_ATTR_FLAGS,
193 .reserved = 0,
194 },
195
Kalle Valo5e3dd152013-06-12 20:52:10 +0300196 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300197
198 /* CE5: unused */
199 {
200 .pipenum = 5,
201 .pipedir = PIPEDIR_OUT,
202 .nentries = 32,
203 .nbytes_max = 2048,
204 .flags = CE_ATTR_FLAGS,
205 .reserved = 0,
206 },
207
208 /* CE6: Reserved for target autonomous hif_memcpy */
209 {
210 .pipenum = 6,
211 .pipedir = PIPEDIR_INOUT,
212 .nentries = 32,
213 .nbytes_max = 4096,
214 .flags = CE_ATTR_FLAGS,
215 .reserved = 0,
216 },
217
Kalle Valo5e3dd152013-06-12 20:52:10 +0300218 /* CE7 used only by Host */
219};
220
Michal Kaziore5398872013-11-25 14:06:20 +0100221static bool ath10k_pci_irq_pending(struct ath10k *ar)
222{
223 u32 cause;
224
225 /* Check if the shared legacy irq is for us */
226 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
227 PCIE_INTR_CAUSE_ADDRESS);
228 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
229 return true;
230
231 return false;
232}
233
Michal Kazior26852182013-11-25 14:06:25 +0100234static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
235{
236 /* IMPORTANT: INTR_CLR register has to be set after
237 * INTR_ENABLE is set to 0, otherwise interrupt can not be
238 * really cleared. */
239 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
240 0);
241 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
242 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
243
244 /* IMPORTANT: this extra read transaction is required to
245 * flush the posted write buffer. */
246 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
247 PCIE_INTR_ENABLE_ADDRESS);
248}
249
250static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
251{
252 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
253 PCIE_INTR_ENABLE_ADDRESS,
254 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
255
256 /* IMPORTANT: this extra read transaction is required to
257 * flush the posted write buffer. */
258 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
259 PCIE_INTR_ENABLE_ADDRESS);
260}
261
Michal Kaziorab977bd2013-11-25 14:06:26 +0100262static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
263{
264 struct ath10k *ar = arg;
265 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
266
267 if (ar_pci->num_msi_intrs == 0) {
268 if (!ath10k_pci_irq_pending(ar))
269 return IRQ_NONE;
270
271 ath10k_pci_disable_and_clear_legacy_irq(ar);
272 }
273
274 tasklet_schedule(&ar_pci->early_irq_tasklet);
275
276 return IRQ_HANDLED;
277}
278
279static int ath10k_pci_request_early_irq(struct ath10k *ar)
280{
281 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
282 int ret;
283
284 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
285 * interrupt from irq vector is triggered in all cases for FW
286 * indication/errors */
287 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
288 IRQF_SHARED, "ath10k_pci (early)", ar);
289 if (ret) {
290 ath10k_warn("failed to request early irq: %d\n", ret);
291 return ret;
292 }
293
294 return 0;
295}
296
297static void ath10k_pci_free_early_irq(struct ath10k *ar)
298{
299 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
300}
301
Kalle Valo5e3dd152013-06-12 20:52:10 +0300302/*
303 * Diagnostic read/write access is provided for startup/config/debug usage.
304 * Caller must guarantee proper alignment, when applicable, and single user
305 * at any moment.
306 */
307static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
308 int nbytes)
309{
310 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
311 int ret = 0;
312 u32 buf;
313 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
314 unsigned int id;
315 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200316 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300317 /* Host buffer address in CE space */
318 u32 ce_data;
319 dma_addr_t ce_data_base = 0;
320 void *data_buf = NULL;
321 int i;
322
323 /*
324 * This code cannot handle reads to non-memory space. Redirect to the
325 * register read fn but preserve the multi word read capability of
326 * this fn
327 */
328 if (address < DRAM_BASE_ADDRESS) {
329 if (!IS_ALIGNED(address, 4) ||
330 !IS_ALIGNED((unsigned long)data, 4))
331 return -EIO;
332
333 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
334 ar, address, (u32 *)data)) == 0)) {
335 nbytes -= sizeof(u32);
336 address += sizeof(u32);
337 data += sizeof(u32);
338 }
339 return ret;
340 }
341
342 ce_diag = ar_pci->ce_diag;
343
344 /*
345 * Allocate a temporary bounce buffer to hold caller's data
346 * to be DMA'ed from Target. This guarantees
347 * 1) 4-byte alignment
348 * 2) Buffer in DMA-able space
349 */
350 orig_nbytes = nbytes;
351 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
352 orig_nbytes,
353 &ce_data_base);
354
355 if (!data_buf) {
356 ret = -ENOMEM;
357 goto done;
358 }
359 memset(data_buf, 0, orig_nbytes);
360
361 remaining_bytes = orig_nbytes;
362 ce_data = ce_data_base;
363 while (remaining_bytes) {
364 nbytes = min_t(unsigned int, remaining_bytes,
365 DIAG_TRANSFER_LIMIT);
366
367 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
368 if (ret != 0)
369 goto done;
370
371 /* Request CE to send from Target(!) address to Host buffer */
372 /*
373 * The address supplied by the caller is in the
374 * Target CPU virtual address space.
375 *
376 * In order to use this address with the diagnostic CE,
377 * convert it from Target CPU virtual address space
378 * to CE address space
379 */
380 ath10k_pci_wake(ar);
381 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
382 address);
383 ath10k_pci_sleep(ar);
384
385 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
386 0);
387 if (ret)
388 goto done;
389
390 i = 0;
391 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
392 &completed_nbytes,
393 &id) != 0) {
394 mdelay(1);
395 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
396 ret = -EBUSY;
397 goto done;
398 }
399 }
400
401 if (nbytes != completed_nbytes) {
402 ret = -EIO;
403 goto done;
404 }
405
406 if (buf != (u32) address) {
407 ret = -EIO;
408 goto done;
409 }
410
411 i = 0;
412 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
413 &completed_nbytes,
414 &id, &flags) != 0) {
415 mdelay(1);
416
417 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
418 ret = -EBUSY;
419 goto done;
420 }
421 }
422
423 if (nbytes != completed_nbytes) {
424 ret = -EIO;
425 goto done;
426 }
427
428 if (buf != ce_data) {
429 ret = -EIO;
430 goto done;
431 }
432
433 remaining_bytes -= nbytes;
434 address += nbytes;
435 ce_data += nbytes;
436 }
437
438done:
439 if (ret == 0) {
440 /* Copy data from allocated DMA buf to caller's buf */
441 WARN_ON_ONCE(orig_nbytes & 3);
442 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
443 ((u32 *)data)[i] =
444 __le32_to_cpu(((__le32 *)data_buf)[i]);
445 }
446 } else
447 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
448 __func__, address);
449
450 if (data_buf)
451 pci_free_consistent(ar_pci->pdev, orig_nbytes,
452 data_buf, ce_data_base);
453
454 return ret;
455}
456
457/* Read 4-byte aligned data from Target memory or register */
458static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
459 u32 *data)
460{
461 /* Assume range doesn't cross this boundary */
462 if (address >= DRAM_BASE_ADDRESS)
463 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
464
465 ath10k_pci_wake(ar);
466 *data = ath10k_pci_read32(ar, address);
467 ath10k_pci_sleep(ar);
468 return 0;
469}
470
471static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
472 const void *data, int nbytes)
473{
474 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
475 int ret = 0;
476 u32 buf;
477 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
478 unsigned int id;
479 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200480 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300481 void *data_buf = NULL;
482 u32 ce_data; /* Host buffer address in CE space */
483 dma_addr_t ce_data_base = 0;
484 int i;
485
486 ce_diag = ar_pci->ce_diag;
487
488 /*
489 * Allocate a temporary bounce buffer to hold caller's data
490 * to be DMA'ed to Target. This guarantees
491 * 1) 4-byte alignment
492 * 2) Buffer in DMA-able space
493 */
494 orig_nbytes = nbytes;
495 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
496 orig_nbytes,
497 &ce_data_base);
498 if (!data_buf) {
499 ret = -ENOMEM;
500 goto done;
501 }
502
503 /* Copy caller's data to allocated DMA buf */
504 WARN_ON_ONCE(orig_nbytes & 3);
505 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
506 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
507
508 /*
509 * The address supplied by the caller is in the
510 * Target CPU virtual address space.
511 *
512 * In order to use this address with the diagnostic CE,
513 * convert it from
514 * Target CPU virtual address space
515 * to
516 * CE address space
517 */
518 ath10k_pci_wake(ar);
519 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
520 ath10k_pci_sleep(ar);
521
522 remaining_bytes = orig_nbytes;
523 ce_data = ce_data_base;
524 while (remaining_bytes) {
525 /* FIXME: check cast */
526 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
527
528 /* Set up to receive directly into Target(!) address */
529 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
530 if (ret != 0)
531 goto done;
532
533 /*
534 * Request CE to send caller-supplied data that
535 * was copied to bounce buffer to Target(!) address.
536 */
537 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
538 nbytes, 0, 0);
539 if (ret != 0)
540 goto done;
541
542 i = 0;
543 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
544 &completed_nbytes,
545 &id) != 0) {
546 mdelay(1);
547
548 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
549 ret = -EBUSY;
550 goto done;
551 }
552 }
553
554 if (nbytes != completed_nbytes) {
555 ret = -EIO;
556 goto done;
557 }
558
559 if (buf != ce_data) {
560 ret = -EIO;
561 goto done;
562 }
563
564 i = 0;
565 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
566 &completed_nbytes,
567 &id, &flags) != 0) {
568 mdelay(1);
569
570 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
571 ret = -EBUSY;
572 goto done;
573 }
574 }
575
576 if (nbytes != completed_nbytes) {
577 ret = -EIO;
578 goto done;
579 }
580
581 if (buf != address) {
582 ret = -EIO;
583 goto done;
584 }
585
586 remaining_bytes -= nbytes;
587 address += nbytes;
588 ce_data += nbytes;
589 }
590
591done:
592 if (data_buf) {
593 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
594 ce_data_base);
595 }
596
597 if (ret != 0)
598 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
599 address);
600
601 return ret;
602}
603
604/* Write 4B data to Target memory or register */
605static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
606 u32 data)
607{
608 /* Assume range doesn't cross this boundary */
609 if (address >= DRAM_BASE_ADDRESS)
610 return ath10k_pci_diag_write_mem(ar, address, &data,
611 sizeof(u32));
612
613 ath10k_pci_wake(ar);
614 ath10k_pci_write32(ar, address, data);
615 ath10k_pci_sleep(ar);
616 return 0;
617}
618
619static bool ath10k_pci_target_is_awake(struct ath10k *ar)
620{
621 void __iomem *mem = ath10k_pci_priv(ar)->mem;
622 u32 val;
623 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
624 RTC_STATE_ADDRESS);
625 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
626}
627
Kalle Valo3aebe542013-09-01 10:02:07 +0300628int ath10k_do_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300629{
630 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
631 void __iomem *pci_addr = ar_pci->mem;
632 int tot_delay = 0;
633 int curr_delay = 5;
634
635 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
636 /* Force AWAKE */
637 iowrite32(PCIE_SOC_WAKE_V_MASK,
638 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
639 PCIE_SOC_WAKE_ADDRESS);
640 }
641 atomic_inc(&ar_pci->keep_awake_count);
642
643 if (ar_pci->verified_awake)
Kalle Valo3aebe542013-09-01 10:02:07 +0300644 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300645
646 for (;;) {
647 if (ath10k_pci_target_is_awake(ar)) {
648 ar_pci->verified_awake = true;
Kalle Valo3aebe542013-09-01 10:02:07 +0300649 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300650 }
651
652 if (tot_delay > PCIE_WAKE_TIMEOUT) {
Kalle Valo3aebe542013-09-01 10:02:07 +0300653 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
654 PCIE_WAKE_TIMEOUT,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300655 atomic_read(&ar_pci->keep_awake_count));
Kalle Valo3aebe542013-09-01 10:02:07 +0300656 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300657 }
658
659 udelay(curr_delay);
660 tot_delay += curr_delay;
661
662 if (curr_delay < 50)
663 curr_delay += 5;
664 }
665}
666
667void ath10k_do_pci_sleep(struct ath10k *ar)
668{
669 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
670 void __iomem *pci_addr = ar_pci->mem;
671
672 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
673 /* Allow sleep */
674 ar_pci->verified_awake = false;
675 iowrite32(PCIE_SOC_WAKE_RESET,
676 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
677 PCIE_SOC_WAKE_ADDRESS);
678 }
679}
680
681/*
682 * FIXME: Handle OOM properly.
683 */
684static inline
Michal Kazior87263e52013-08-27 13:08:01 +0200685struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300686{
687 struct ath10k_pci_compl *compl = NULL;
688
689 spin_lock_bh(&pipe_info->pipe_lock);
690 if (list_empty(&pipe_info->compl_free)) {
691 ath10k_warn("Completion buffers are full\n");
692 goto exit;
693 }
694 compl = list_first_entry(&pipe_info->compl_free,
695 struct ath10k_pci_compl, list);
696 list_del(&compl->list);
697exit:
698 spin_unlock_bh(&pipe_info->pipe_lock);
699 return compl;
700}
701
702/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200703static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300704{
705 struct ath10k *ar = ce_state->ar;
706 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200707 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300708 struct ath10k_pci_compl *compl;
Michal Kazior5440ce22013-09-03 15:09:58 +0200709 void *transfer_context;
710 u32 ce_data;
711 unsigned int nbytes;
712 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300713
Michal Kazior5440ce22013-09-03 15:09:58 +0200714 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
715 &ce_data, &nbytes,
716 &transfer_id) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300717 compl = get_free_compl(pipe_info);
718 if (!compl)
719 break;
720
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200721 compl->state = ATH10K_PCI_COMPL_SEND;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300722 compl->ce_state = ce_state;
723 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300724 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300725 compl->nbytes = nbytes;
726 compl->transfer_id = transfer_id;
727 compl->flags = 0;
728
729 /*
730 * Add the completion to the processing queue.
731 */
732 spin_lock_bh(&ar_pci->compl_lock);
733 list_add_tail(&compl->list, &ar_pci->compl_process);
734 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200735 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300736
737 ath10k_pci_process_ce(ar);
738}
739
740/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200741static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300742{
743 struct ath10k *ar = ce_state->ar;
744 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200745 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300746 struct ath10k_pci_compl *compl;
747 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200748 void *transfer_context;
749 u32 ce_data;
750 unsigned int nbytes;
751 unsigned int transfer_id;
752 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300753
Michal Kazior5440ce22013-09-03 15:09:58 +0200754 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
755 &ce_data, &nbytes, &transfer_id,
756 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300757 compl = get_free_compl(pipe_info);
758 if (!compl)
759 break;
760
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200761 compl->state = ATH10K_PCI_COMPL_RECV;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300762 compl->ce_state = ce_state;
763 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300764 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300765 compl->nbytes = nbytes;
766 compl->transfer_id = transfer_id;
767 compl->flags = flags;
768
769 skb = transfer_context;
770 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
771 skb->len + skb_tailroom(skb),
772 DMA_FROM_DEVICE);
773 /*
774 * Add the completion to the processing queue.
775 */
776 spin_lock_bh(&ar_pci->compl_lock);
777 list_add_tail(&compl->list, &ar_pci->compl_process);
778 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200779 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300780
781 ath10k_pci_process_ce(ar);
782}
783
784/* Send the first nbytes bytes of the buffer */
785static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
786 unsigned int transfer_id,
787 unsigned int bytes, struct sk_buff *nbuf)
788{
789 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
790 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200791 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
Michal Kazior2aa39112013-08-27 13:08:02 +0200792 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300793 unsigned int len;
794 u32 flags = 0;
795 int ret;
796
Kalle Valo5e3dd152013-06-12 20:52:10 +0300797 len = min(bytes, nbuf->len);
798 bytes -= len;
799
800 if (len & 3)
801 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
802
803 ath10k_dbg(ATH10K_DBG_PCI,
804 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
805 nbuf->data, (unsigned long long) skb_cb->paddr,
806 nbuf->len, len);
807 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
808 "ath10k tx: data: ",
809 nbuf->data, nbuf->len);
810
Michal Kazior2e761b52013-10-02 11:03:40 +0200811 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
812 flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300813 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100814 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300815
816 return ret;
817}
818
819static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
820{
821 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200822 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300823}
824
825static void ath10k_pci_hif_dump_area(struct ath10k *ar)
826{
827 u32 reg_dump_area = 0;
828 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
829 u32 host_addr;
830 int ret;
831 u32 i;
832
833 ath10k_err("firmware crashed!\n");
834 ath10k_err("hardware name %s version 0x%x\n",
835 ar->hw_params.name, ar->target_version);
Chun-Yeow Yeoh5ba88b32014-01-21 17:21:21 +0800836 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300837
838 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100839 ret = ath10k_pci_diag_read_mem(ar, host_addr,
840 &reg_dump_area, sizeof(u32));
841 if (ret) {
842 ath10k_err("failed to read FW dump area address: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300843 return;
844 }
845
846 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
847
848 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
849 &reg_dump_values[0],
850 REG_DUMP_COUNT_QCA988X * sizeof(u32));
851 if (ret != 0) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100852 ath10k_err("failed to read FW dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300853 return;
854 }
855
856 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
857
858 ath10k_err("target Register Dump\n");
859 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
860 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
861 i,
862 reg_dump_values[i],
863 reg_dump_values[i + 1],
864 reg_dump_values[i + 2],
865 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200866
Michal Kazior5e90de82013-10-16 16:46:05 +0300867 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300868}
869
870static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
871 int force)
872{
873 if (!force) {
874 int resources;
875 /*
876 * Decide whether to actually poll for completions, or just
877 * wait for a later chance.
878 * If there seem to be plenty of resources left, then just wait
879 * since checking involves reading a CE register, which is a
880 * relatively expensive operation.
881 */
882 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
883
884 /*
885 * If at least 50% of the total resources are still available,
886 * don't bother checking again yet.
887 */
888 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
889 return;
890 }
891 ath10k_ce_per_engine_service(ar, pipe);
892}
893
Michal Kaziore799bbf2013-07-05 16:15:12 +0300894static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
895 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300896{
897 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
898
899 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
900
901 memcpy(&ar_pci->msg_callbacks_current, callbacks,
902 sizeof(ar_pci->msg_callbacks_current));
903}
904
Michal Kaziorc80de122013-11-25 14:06:23 +0100905static int ath10k_pci_alloc_compl(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300906{
907 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300908 const struct ce_attr *attr;
Michal Kazior87263e52013-08-27 13:08:01 +0200909 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300910 struct ath10k_pci_compl *compl;
Michal Kaziorc80de122013-11-25 14:06:23 +0100911 int i, pipe_num, completions;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300912
913 spin_lock_init(&ar_pci->compl_lock);
914 INIT_LIST_HEAD(&ar_pci->compl_process);
915
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100916 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300917 pipe_info = &ar_pci->pipe_info[pipe_num];
918
919 spin_lock_init(&pipe_info->pipe_lock);
920 INIT_LIST_HEAD(&pipe_info->compl_free);
921
922 /* Handle Diagnostic CE specially */
Michal Kaziorc80de122013-11-25 14:06:23 +0100923 if (pipe_info->ce_hdl == ar_pci->ce_diag)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300924 continue;
925
926 attr = &host_ce_config_wlan[pipe_num];
927 completions = 0;
928
Michal Kaziorc80de122013-11-25 14:06:23 +0100929 if (attr->src_nentries)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300930 completions += attr->src_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300931
Michal Kaziorc80de122013-11-25 14:06:23 +0100932 if (attr->dest_nentries)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300933 completions += attr->dest_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300934
935 for (i = 0; i < completions; i++) {
Michal Kaziorffe5daa2013-08-13 07:54:55 +0200936 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300937 if (!compl) {
938 ath10k_warn("No memory for completion state\n");
Michal Kaziorc80de122013-11-25 14:06:23 +0100939 ath10k_pci_cleanup_ce(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300940 return -ENOMEM;
941 }
942
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200943 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300944 list_add_tail(&compl->list, &pipe_info->compl_free);
945 }
946 }
947
948 return 0;
949}
950
Michal Kaziorc80de122013-11-25 14:06:23 +0100951static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
952{
953 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
954 const struct ce_attr *attr;
955 struct ath10k_pci_pipe *pipe_info;
956 int pipe_num, disable_interrupts;
957
958 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
959 pipe_info = &ar_pci->pipe_info[pipe_num];
960
961 /* Handle Diagnostic CE specially */
962 if (pipe_info->ce_hdl == ar_pci->ce_diag)
963 continue;
964
965 attr = &host_ce_config_wlan[pipe_num];
966
967 if (attr->src_nentries) {
968 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
969 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
970 ath10k_pci_ce_send_done,
971 disable_interrupts);
972 }
973
974 if (attr->dest_nentries)
975 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
976 ath10k_pci_ce_recv_data);
977 }
978
979 return 0;
980}
981
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100982static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300983{
984 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300985 int i;
986
Kalle Valo5e3dd152013-06-12 20:52:10 +0300987 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +0100988 tasklet_kill(&ar_pci->msi_fw_err);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100989 tasklet_kill(&ar_pci->early_irq_tasklet);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300990
991 for (i = 0; i < CE_COUNT; i++)
992 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100993}
994
Kalle Valo5e3dd152013-06-12 20:52:10 +0300995static void ath10k_pci_stop_ce(struct ath10k *ar)
996{
997 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
998 struct ath10k_pci_compl *compl;
999 struct sk_buff *skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001000
1001 /* Mark pending completions as aborted, so that upper layers free up
1002 * their associated resources */
1003 spin_lock_bh(&ar_pci->compl_lock);
1004 list_for_each_entry(compl, &ar_pci->compl_process, list) {
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001005 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001006 ATH10K_SKB_CB(skb)->is_aborted = true;
1007 }
1008 spin_unlock_bh(&ar_pci->compl_lock);
1009}
1010
1011static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1012{
1013 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1014 struct ath10k_pci_compl *compl, *tmp;
Michal Kazior87263e52013-08-27 13:08:01 +02001015 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001016 struct sk_buff *netbuf;
1017 int pipe_num;
1018
1019 /* Free pending completions. */
1020 spin_lock_bh(&ar_pci->compl_lock);
1021 if (!list_empty(&ar_pci->compl_process))
1022 ath10k_warn("pending completions still present! possible memory leaks.\n");
1023
1024 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1025 list_del(&compl->list);
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001026 netbuf = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001027 dev_kfree_skb_any(netbuf);
1028 kfree(compl);
1029 }
1030 spin_unlock_bh(&ar_pci->compl_lock);
1031
1032 /* Free unused completions for each pipe. */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001033 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001034 pipe_info = &ar_pci->pipe_info[pipe_num];
1035
1036 spin_lock_bh(&pipe_info->pipe_lock);
1037 list_for_each_entry_safe(compl, tmp,
1038 &pipe_info->compl_free, list) {
1039 list_del(&compl->list);
1040 kfree(compl);
1041 }
1042 spin_unlock_bh(&pipe_info->pipe_lock);
1043 }
1044}
1045
1046static void ath10k_pci_process_ce(struct ath10k *ar)
1047{
1048 struct ath10k_pci *ar_pci = ar->hif.priv;
1049 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1050 struct ath10k_pci_compl *compl;
1051 struct sk_buff *skb;
1052 unsigned int nbytes;
1053 int ret, send_done = 0;
1054
1055 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1056 * we must serialize all completion processing. */
1057
1058 spin_lock_bh(&ar_pci->compl_lock);
1059 if (ar_pci->compl_processing) {
1060 spin_unlock_bh(&ar_pci->compl_lock);
1061 return;
1062 }
1063 ar_pci->compl_processing = true;
1064 spin_unlock_bh(&ar_pci->compl_lock);
1065
1066 for (;;) {
1067 spin_lock_bh(&ar_pci->compl_lock);
1068 if (list_empty(&ar_pci->compl_process)) {
1069 spin_unlock_bh(&ar_pci->compl_lock);
1070 break;
1071 }
1072 compl = list_first_entry(&ar_pci->compl_process,
1073 struct ath10k_pci_compl, list);
1074 list_del(&compl->list);
1075 spin_unlock_bh(&ar_pci->compl_lock);
1076
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001077 switch (compl->state) {
1078 case ATH10K_PCI_COMPL_SEND:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001079 cb->tx_completion(ar,
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001080 compl->skb,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001081 compl->transfer_id);
1082 send_done = 1;
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001083 break;
1084 case ATH10K_PCI_COMPL_RECV:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001085 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1086 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001087 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1088 compl->pipe_info->pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001089 break;
1090 }
1091
Kalle Valoaa5c1db42013-09-01 10:01:46 +03001092 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001093 nbytes = compl->nbytes;
1094
1095 ath10k_dbg(ATH10K_DBG_PCI,
1096 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1097 skb, nbytes);
1098 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1099 "ath10k rx: ", skb->data, nbytes);
1100
1101 if (skb->len + skb_tailroom(skb) >= nbytes) {
1102 skb_trim(skb, 0);
1103 skb_put(skb, nbytes);
1104 cb->rx_completion(ar, skb,
1105 compl->pipe_info->pipe_num);
1106 } else {
1107 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1108 nbytes,
1109 skb->len + skb_tailroom(skb));
1110 }
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001111 break;
1112 case ATH10K_PCI_COMPL_FREE:
1113 ath10k_warn("free completion cannot be processed\n");
1114 break;
1115 default:
1116 ath10k_warn("invalid completion state (%d)\n",
1117 compl->state);
1118 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001119 }
1120
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001121 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001122
1123 /*
1124 * Add completion back to the pipe's free list.
1125 */
1126 spin_lock_bh(&compl->pipe_info->pipe_lock);
1127 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001128 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1129 }
1130
1131 spin_lock_bh(&ar_pci->compl_lock);
1132 ar_pci->compl_processing = false;
1133 spin_unlock_bh(&ar_pci->compl_lock);
1134}
1135
1136/* TODO - temporary mapping while we have too few CE's */
1137static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1138 u16 service_id, u8 *ul_pipe,
1139 u8 *dl_pipe, int *ul_is_polled,
1140 int *dl_is_polled)
1141{
1142 int ret = 0;
1143
1144 /* polling for received messages not supported */
1145 *dl_is_polled = 0;
1146
1147 switch (service_id) {
1148 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1149 /*
1150 * Host->target HTT gets its own pipe, so it can be polled
1151 * while other pipes are interrupt driven.
1152 */
1153 *ul_pipe = 4;
1154 /*
1155 * Use the same target->host pipe for HTC ctrl, HTC raw
1156 * streams, and HTT.
1157 */
1158 *dl_pipe = 1;
1159 break;
1160
1161 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1162 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1163 /*
1164 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1165 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1166 * WMI services. So, if another CE is needed, change
1167 * this to *ul_pipe = 3, which frees up CE 0.
1168 */
1169 /* *ul_pipe = 3; */
1170 *ul_pipe = 0;
1171 *dl_pipe = 1;
1172 break;
1173
1174 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1175 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1176 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1177 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1178
1179 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1180 *ul_pipe = 3;
1181 *dl_pipe = 2;
1182 break;
1183
1184 /* pipe 5 unused */
1185 /* pipe 6 reserved */
1186 /* pipe 7 reserved */
1187
1188 default:
1189 ret = -1;
1190 break;
1191 }
1192 *ul_is_polled =
1193 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1194
1195 return ret;
1196}
1197
1198static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1199 u8 *ul_pipe, u8 *dl_pipe)
1200{
1201 int ul_is_polled, dl_is_polled;
1202
1203 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1204 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1205 ul_pipe,
1206 dl_pipe,
1207 &ul_is_polled,
1208 &dl_is_polled);
1209}
1210
Michal Kazior87263e52013-08-27 13:08:01 +02001211static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001212 int num)
1213{
1214 struct ath10k *ar = pipe_info->hif_ce_state;
1215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001216 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001217 struct sk_buff *skb;
1218 dma_addr_t ce_data;
1219 int i, ret = 0;
1220
1221 if (pipe_info->buf_sz == 0)
1222 return 0;
1223
1224 for (i = 0; i < num; i++) {
1225 skb = dev_alloc_skb(pipe_info->buf_sz);
1226 if (!skb) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001227 ath10k_warn("failed to allocate skbuff for pipe %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001228 num);
1229 ret = -ENOMEM;
1230 goto err;
1231 }
1232
1233 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1234
1235 ce_data = dma_map_single(ar->dev, skb->data,
1236 skb->len + skb_tailroom(skb),
1237 DMA_FROM_DEVICE);
1238
1239 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001240 ath10k_warn("failed to DMA map sk_buff\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001241 dev_kfree_skb_any(skb);
1242 ret = -EIO;
1243 goto err;
1244 }
1245
1246 ATH10K_SKB_CB(skb)->paddr = ce_data;
1247
1248 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1249 pipe_info->buf_sz,
1250 PCI_DMA_FROMDEVICE);
1251
1252 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1253 ce_data);
1254 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001255 ath10k_warn("failed to enqueue to pipe %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001256 num, ret);
1257 goto err;
1258 }
1259 }
1260
1261 return ret;
1262
1263err:
1264 ath10k_pci_rx_pipe_cleanup(pipe_info);
1265 return ret;
1266}
1267
1268static int ath10k_pci_post_rx(struct ath10k *ar)
1269{
1270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001271 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001272 const struct ce_attr *attr;
1273 int pipe_num, ret = 0;
1274
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001275 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001276 pipe_info = &ar_pci->pipe_info[pipe_num];
1277 attr = &host_ce_config_wlan[pipe_num];
1278
1279 if (attr->dest_nentries == 0)
1280 continue;
1281
1282 ret = ath10k_pci_post_rx_pipe(pipe_info,
1283 attr->dest_nentries - 1);
1284 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001285 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1286 pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001287
1288 for (; pipe_num >= 0; pipe_num--) {
1289 pipe_info = &ar_pci->pipe_info[pipe_num];
1290 ath10k_pci_rx_pipe_cleanup(pipe_info);
1291 }
1292 return ret;
1293 }
1294 }
1295
1296 return 0;
1297}
1298
1299static int ath10k_pci_hif_start(struct ath10k *ar)
1300{
1301 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001302 int ret, ret_early;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001303
Michal Kaziorab977bd2013-11-25 14:06:26 +01001304 ath10k_pci_free_early_irq(ar);
1305 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001306
Michal Kaziorc80de122013-11-25 14:06:23 +01001307 ret = ath10k_pci_alloc_compl(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001308 if (ret) {
Michal Kaziorc80de122013-11-25 14:06:23 +01001309 ath10k_warn("failed to allocate CE completions: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001310 goto err_early_irq;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001311 }
1312
Michal Kazior5d1aa942013-11-25 14:06:24 +01001313 ret = ath10k_pci_request_irq(ar);
1314 if (ret) {
1315 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1316 ret);
1317 goto err_free_compl;
1318 }
1319
Michal Kaziorc80de122013-11-25 14:06:23 +01001320 ret = ath10k_pci_setup_ce_irq(ar);
1321 if (ret) {
1322 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001323 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001324 }
1325
1326 /* Post buffers once to start things off. */
1327 ret = ath10k_pci_post_rx(ar);
1328 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001329 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1330 ret);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001331 goto err_stop;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001332 }
1333
1334 ar_pci->started = 1;
1335 return 0;
Michal Kaziorc80de122013-11-25 14:06:23 +01001336
Michal Kazior5d1aa942013-11-25 14:06:24 +01001337err_stop:
1338 ath10k_ce_disable_interrupts(ar);
1339 ath10k_pci_free_irq(ar);
1340 ath10k_pci_kill_tasklet(ar);
Michal Kaziorc80de122013-11-25 14:06:23 +01001341 ath10k_pci_stop_ce(ar);
1342 ath10k_pci_process_ce(ar);
1343err_free_compl:
1344 ath10k_pci_cleanup_ce(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01001345err_early_irq:
1346 /* Though there should be no interrupts (device was reset)
1347 * power_down() expects the early IRQ to be installed as per the
1348 * driver lifecycle. */
1349 ret_early = ath10k_pci_request_early_irq(ar);
1350 if (ret_early)
1351 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1352
Michal Kaziorc80de122013-11-25 14:06:23 +01001353 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001354}
1355
Michal Kazior87263e52013-08-27 13:08:01 +02001356static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001357{
1358 struct ath10k *ar;
1359 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001360 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001361 u32 buf_sz;
1362 struct sk_buff *netbuf;
1363 u32 ce_data;
1364
1365 buf_sz = pipe_info->buf_sz;
1366
1367 /* Unused Copy Engine */
1368 if (buf_sz == 0)
1369 return;
1370
1371 ar = pipe_info->hif_ce_state;
1372 ar_pci = ath10k_pci_priv(ar);
1373
1374 if (!ar_pci->started)
1375 return;
1376
1377 ce_hdl = pipe_info->ce_hdl;
1378
1379 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1380 &ce_data) == 0) {
1381 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1382 netbuf->len + skb_tailroom(netbuf),
1383 DMA_FROM_DEVICE);
1384 dev_kfree_skb_any(netbuf);
1385 }
1386}
1387
Michal Kazior87263e52013-08-27 13:08:01 +02001388static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001389{
1390 struct ath10k *ar;
1391 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001392 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001393 struct sk_buff *netbuf;
1394 u32 ce_data;
1395 unsigned int nbytes;
1396 unsigned int id;
1397 u32 buf_sz;
1398
1399 buf_sz = pipe_info->buf_sz;
1400
1401 /* Unused Copy Engine */
1402 if (buf_sz == 0)
1403 return;
1404
1405 ar = pipe_info->hif_ce_state;
1406 ar_pci = ath10k_pci_priv(ar);
1407
1408 if (!ar_pci->started)
1409 return;
1410
1411 ce_hdl = pipe_info->ce_hdl;
1412
1413 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1414 &ce_data, &nbytes, &id) == 0) {
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001415 /*
1416 * Indicate the completion to higer layer to free
1417 * the buffer
1418 */
Michal Kazior2415fc12013-11-08 08:01:32 +01001419
1420 if (!netbuf) {
1421 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1422 ce_hdl->id);
1423 continue;
1424 }
1425
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001426 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1427 ar_pci->msg_callbacks_current.tx_completion(ar,
1428 netbuf,
1429 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001430 }
1431}
1432
1433/*
1434 * Cleanup residual buffers for device shutdown:
1435 * buffers that were enqueued for receive
1436 * buffers that were to be sent
1437 * Note: Buffers that had completed but which were
1438 * not yet processed are on a completion queue. They
1439 * are handled when the completion thread shuts down.
1440 */
1441static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1442{
1443 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1444 int pipe_num;
1445
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001446 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001447 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001448
1449 pipe_info = &ar_pci->pipe_info[pipe_num];
1450 ath10k_pci_rx_pipe_cleanup(pipe_info);
1451 ath10k_pci_tx_pipe_cleanup(pipe_info);
1452 }
1453}
1454
1455static void ath10k_pci_ce_deinit(struct ath10k *ar)
1456{
1457 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001458 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001459 int pipe_num;
1460
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001461 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001462 pipe_info = &ar_pci->pipe_info[pipe_num];
1463 if (pipe_info->ce_hdl) {
1464 ath10k_ce_deinit(pipe_info->ce_hdl);
1465 pipe_info->ce_hdl = NULL;
1466 pipe_info->buf_sz = 0;
1467 }
1468 }
1469}
1470
1471static void ath10k_pci_hif_stop(struct ath10k *ar)
1472{
Michal Kazior32270b62013-08-02 09:15:47 +02001473 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior5d1aa942013-11-25 14:06:24 +01001474 int ret;
Michal Kazior32270b62013-08-02 09:15:47 +02001475
Kalle Valo5e3dd152013-06-12 20:52:10 +03001476 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1477
Michal Kazior5d1aa942013-11-25 14:06:24 +01001478 ret = ath10k_ce_disable_interrupts(ar);
1479 if (ret)
1480 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02001481
Michal Kazior5d1aa942013-11-25 14:06:24 +01001482 ath10k_pci_free_irq(ar);
1483 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001484 ath10k_pci_stop_ce(ar);
1485
Michal Kaziorab977bd2013-11-25 14:06:26 +01001486 ret = ath10k_pci_request_early_irq(ar);
1487 if (ret)
1488 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1489
Kalle Valo5e3dd152013-06-12 20:52:10 +03001490 /* At this point, asynchronous threads are stopped, the target should
1491 * not DMA nor interrupt. We process the leftovers and then free
1492 * everything else up. */
1493
1494 ath10k_pci_process_ce(ar);
1495 ath10k_pci_cleanup_ce(ar);
1496 ath10k_pci_buffer_cleanup(ar);
Michal Kazior32270b62013-08-02 09:15:47 +02001497
Michal Kazior6a42a472013-11-08 08:01:35 +01001498 /* Make the sure the device won't access any structures on the host by
1499 * resetting it. The device was fed with PCI CE ringbuffer
1500 * configuration during init. If ringbuffers are freed and the device
1501 * were to access them this could lead to memory corruption on the
1502 * host. */
1503 ath10k_pci_device_reset(ar);
1504
Michal Kazior32270b62013-08-02 09:15:47 +02001505 ar_pci->started = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001506}
1507
1508static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1509 void *req, u32 req_len,
1510 void *resp, u32 *resp_len)
1511{
1512 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001513 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1514 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1515 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1516 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001517 dma_addr_t req_paddr = 0;
1518 dma_addr_t resp_paddr = 0;
1519 struct bmi_xfer xfer = {};
1520 void *treq, *tresp = NULL;
1521 int ret = 0;
1522
Michal Kazior85622cd2013-11-25 14:06:22 +01001523 might_sleep();
1524
Kalle Valo5e3dd152013-06-12 20:52:10 +03001525 if (resp && !resp_len)
1526 return -EINVAL;
1527
1528 if (resp && resp_len && *resp_len == 0)
1529 return -EINVAL;
1530
1531 treq = kmemdup(req, req_len, GFP_KERNEL);
1532 if (!treq)
1533 return -ENOMEM;
1534
1535 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1536 ret = dma_mapping_error(ar->dev, req_paddr);
1537 if (ret)
1538 goto err_dma;
1539
1540 if (resp && resp_len) {
1541 tresp = kzalloc(*resp_len, GFP_KERNEL);
1542 if (!tresp) {
1543 ret = -ENOMEM;
1544 goto err_req;
1545 }
1546
1547 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1548 DMA_FROM_DEVICE);
1549 ret = dma_mapping_error(ar->dev, resp_paddr);
1550 if (ret)
1551 goto err_req;
1552
1553 xfer.wait_for_resp = true;
1554 xfer.resp_len = 0;
1555
1556 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1557 }
1558
1559 init_completion(&xfer.done);
1560
1561 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1562 if (ret)
1563 goto err_resp;
1564
Michal Kazior85622cd2013-11-25 14:06:22 +01001565 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1566 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001567 u32 unused_buffer;
1568 unsigned int unused_nbytes;
1569 unsigned int unused_id;
1570
Kalle Valo5e3dd152013-06-12 20:52:10 +03001571 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1572 &unused_nbytes, &unused_id);
1573 } else {
1574 /* non-zero means we did not time out */
1575 ret = 0;
1576 }
1577
1578err_resp:
1579 if (resp) {
1580 u32 unused_buffer;
1581
1582 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1583 dma_unmap_single(ar->dev, resp_paddr,
1584 *resp_len, DMA_FROM_DEVICE);
1585 }
1586err_req:
1587 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1588
1589 if (ret == 0 && resp_len) {
1590 *resp_len = min(*resp_len, xfer.resp_len);
1591 memcpy(resp, tresp, xfer.resp_len);
1592 }
1593err_dma:
1594 kfree(treq);
1595 kfree(tresp);
1596
1597 return ret;
1598}
1599
Michal Kazior5440ce22013-09-03 15:09:58 +02001600static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001601{
Michal Kazior5440ce22013-09-03 15:09:58 +02001602 struct bmi_xfer *xfer;
1603 u32 ce_data;
1604 unsigned int nbytes;
1605 unsigned int transfer_id;
1606
1607 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1608 &nbytes, &transfer_id))
1609 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001610
1611 if (xfer->wait_for_resp)
1612 return;
1613
1614 complete(&xfer->done);
1615}
1616
Michal Kazior5440ce22013-09-03 15:09:58 +02001617static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001618{
Michal Kazior5440ce22013-09-03 15:09:58 +02001619 struct bmi_xfer *xfer;
1620 u32 ce_data;
1621 unsigned int nbytes;
1622 unsigned int transfer_id;
1623 unsigned int flags;
1624
1625 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1626 &nbytes, &transfer_id, &flags))
1627 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001628
1629 if (!xfer->wait_for_resp) {
1630 ath10k_warn("unexpected: BMI data received; ignoring\n");
1631 return;
1632 }
1633
1634 xfer->resp_len = nbytes;
1635 complete(&xfer->done);
1636}
1637
Michal Kazior85622cd2013-11-25 14:06:22 +01001638static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1639 struct ath10k_ce_pipe *rx_pipe,
1640 struct bmi_xfer *xfer)
1641{
1642 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1643
1644 while (time_before_eq(jiffies, timeout)) {
1645 ath10k_pci_bmi_send_done(tx_pipe);
1646 ath10k_pci_bmi_recv_data(rx_pipe);
1647
1648 if (completion_done(&xfer->done))
1649 return 0;
1650
1651 schedule();
1652 }
1653
1654 return -ETIMEDOUT;
1655}
1656
Kalle Valo5e3dd152013-06-12 20:52:10 +03001657/*
1658 * Map from service/endpoint to Copy Engine.
1659 * This table is derived from the CE_PCI TABLE, above.
1660 * It is passed to the Target at startup for use by firmware.
1661 */
1662static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1663 {
1664 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1665 PIPEDIR_OUT, /* out = UL = host -> target */
1666 3,
1667 },
1668 {
1669 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1670 PIPEDIR_IN, /* in = DL = target -> host */
1671 2,
1672 },
1673 {
1674 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1675 PIPEDIR_OUT, /* out = UL = host -> target */
1676 3,
1677 },
1678 {
1679 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1680 PIPEDIR_IN, /* in = DL = target -> host */
1681 2,
1682 },
1683 {
1684 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1685 PIPEDIR_OUT, /* out = UL = host -> target */
1686 3,
1687 },
1688 {
1689 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1690 PIPEDIR_IN, /* in = DL = target -> host */
1691 2,
1692 },
1693 {
1694 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1695 PIPEDIR_OUT, /* out = UL = host -> target */
1696 3,
1697 },
1698 {
1699 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1700 PIPEDIR_IN, /* in = DL = target -> host */
1701 2,
1702 },
1703 {
1704 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1705 PIPEDIR_OUT, /* out = UL = host -> target */
1706 3,
1707 },
1708 {
1709 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1710 PIPEDIR_IN, /* in = DL = target -> host */
1711 2,
1712 },
1713 {
1714 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1715 PIPEDIR_OUT, /* out = UL = host -> target */
1716 0, /* could be moved to 3 (share with WMI) */
1717 },
1718 {
1719 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1720 PIPEDIR_IN, /* in = DL = target -> host */
1721 1,
1722 },
1723 {
1724 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1725 PIPEDIR_OUT, /* out = UL = host -> target */
1726 0,
1727 },
1728 {
1729 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1730 PIPEDIR_IN, /* in = DL = target -> host */
1731 1,
1732 },
1733 {
1734 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1735 PIPEDIR_OUT, /* out = UL = host -> target */
1736 4,
1737 },
1738 {
1739 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1740 PIPEDIR_IN, /* in = DL = target -> host */
1741 1,
1742 },
1743
1744 /* (Additions here) */
1745
1746 { /* Must be last */
1747 0,
1748 0,
1749 0,
1750 },
1751};
1752
1753/*
1754 * Send an interrupt to the device to wake up the Target CPU
1755 * so it has an opportunity to notice any changed state.
1756 */
1757static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1758{
1759 int ret;
1760 u32 core_ctrl;
1761
1762 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1763 CORE_CTRL_ADDRESS,
1764 &core_ctrl);
1765 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001766 ath10k_warn("failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001767 return ret;
1768 }
1769
1770 /* A_INUM_FIRMWARE interrupt to Target CPU */
1771 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1772
1773 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1774 CORE_CTRL_ADDRESS,
1775 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001776 if (ret) {
1777 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1778 ret);
1779 return ret;
1780 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001781
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001782 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001783}
1784
1785static int ath10k_pci_init_config(struct ath10k *ar)
1786{
1787 u32 interconnect_targ_addr;
1788 u32 pcie_state_targ_addr = 0;
1789 u32 pipe_cfg_targ_addr = 0;
1790 u32 svc_to_pipe_map = 0;
1791 u32 pcie_config_flags = 0;
1792 u32 ealloc_value;
1793 u32 ealloc_targ_addr;
1794 u32 flag2_value;
1795 u32 flag2_targ_addr;
1796 int ret = 0;
1797
1798 /* Download to Target the CE Config and the service-to-CE map */
1799 interconnect_targ_addr =
1800 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1801
1802 /* Supply Target-side CE configuration */
1803 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1804 &pcie_state_targ_addr);
1805 if (ret != 0) {
1806 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1807 return ret;
1808 }
1809
1810 if (pcie_state_targ_addr == 0) {
1811 ret = -EIO;
1812 ath10k_err("Invalid pcie state addr\n");
1813 return ret;
1814 }
1815
1816 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1817 offsetof(struct pcie_state,
1818 pipe_cfg_addr),
1819 &pipe_cfg_targ_addr);
1820 if (ret != 0) {
1821 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1822 return ret;
1823 }
1824
1825 if (pipe_cfg_targ_addr == 0) {
1826 ret = -EIO;
1827 ath10k_err("Invalid pipe cfg addr\n");
1828 return ret;
1829 }
1830
1831 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1832 target_ce_config_wlan,
1833 sizeof(target_ce_config_wlan));
1834
1835 if (ret != 0) {
1836 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1837 return ret;
1838 }
1839
1840 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1841 offsetof(struct pcie_state,
1842 svc_to_pipe_map),
1843 &svc_to_pipe_map);
1844 if (ret != 0) {
1845 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1846 return ret;
1847 }
1848
1849 if (svc_to_pipe_map == 0) {
1850 ret = -EIO;
1851 ath10k_err("Invalid svc_to_pipe map\n");
1852 return ret;
1853 }
1854
1855 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1856 target_service_to_ce_map_wlan,
1857 sizeof(target_service_to_ce_map_wlan));
1858 if (ret != 0) {
1859 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1860 return ret;
1861 }
1862
1863 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1864 offsetof(struct pcie_state,
1865 config_flags),
1866 &pcie_config_flags);
1867 if (ret != 0) {
1868 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1869 return ret;
1870 }
1871
1872 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1873
1874 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1875 offsetof(struct pcie_state, config_flags),
1876 &pcie_config_flags,
1877 sizeof(pcie_config_flags));
1878 if (ret != 0) {
1879 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1880 return ret;
1881 }
1882
1883 /* configure early allocation */
1884 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1885
1886 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1887 if (ret != 0) {
1888 ath10k_err("Faile to get early alloc val: %d\n", ret);
1889 return ret;
1890 }
1891
1892 /* first bank is switched to IRAM */
1893 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1894 HI_EARLY_ALLOC_MAGIC_MASK);
1895 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1896 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1897
1898 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1899 if (ret != 0) {
1900 ath10k_err("Failed to set early alloc val: %d\n", ret);
1901 return ret;
1902 }
1903
1904 /* Tell Target to proceed with initialization */
1905 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1906
1907 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1908 if (ret != 0) {
1909 ath10k_err("Failed to get option val: %d\n", ret);
1910 return ret;
1911 }
1912
1913 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1914
1915 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1916 if (ret != 0) {
1917 ath10k_err("Failed to set option val: %d\n", ret);
1918 return ret;
1919 }
1920
1921 return 0;
1922}
1923
1924
1925
1926static int ath10k_pci_ce_init(struct ath10k *ar)
1927{
1928 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001929 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001930 const struct ce_attr *attr;
1931 int pipe_num;
1932
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001933 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001934 pipe_info = &ar_pci->pipe_info[pipe_num];
1935 pipe_info->pipe_num = pipe_num;
1936 pipe_info->hif_ce_state = ar;
1937 attr = &host_ce_config_wlan[pipe_num];
1938
1939 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1940 if (pipe_info->ce_hdl == NULL) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001941 ath10k_err("failed to initialize CE for pipe: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001942 pipe_num);
1943
1944 /* It is safe to call it here. It checks if ce_hdl is
1945 * valid for each pipe */
1946 ath10k_pci_ce_deinit(ar);
1947 return -1;
1948 }
1949
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001950 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001951 /*
1952 * Reserve the ultimate CE for
1953 * diagnostic Window support
1954 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001955 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001956 continue;
1957 }
1958
1959 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1960 }
1961
Kalle Valo5e3dd152013-06-12 20:52:10 +03001962 return 0;
1963}
1964
1965static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1966{
1967 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1968 u32 fw_indicator_address, fw_indicator;
1969
1970 ath10k_pci_wake(ar);
1971
1972 fw_indicator_address = ar_pci->fw_indicator_address;
1973 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1974
1975 if (fw_indicator & FW_IND_EVENT_PENDING) {
1976 /* ACK: clear Target-side pending event */
1977 ath10k_pci_write32(ar, fw_indicator_address,
1978 fw_indicator & ~FW_IND_EVENT_PENDING);
1979
1980 if (ar_pci->started) {
1981 ath10k_pci_hif_dump_area(ar);
1982 } else {
1983 /*
1984 * Probable Target failure before we're prepared
1985 * to handle it. Generally unexpected.
1986 */
1987 ath10k_warn("early firmware event indicated\n");
1988 }
1989 }
1990
1991 ath10k_pci_sleep(ar);
1992}
1993
Michal Kazior8c5c5362013-07-16 09:38:50 +02001994static int ath10k_pci_hif_power_up(struct ath10k *ar)
1995{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001996 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001997 const char *irq_mode;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001998 int ret;
1999
2000 /*
2001 * Bring the target up cleanly.
2002 *
2003 * The target may be in an undefined state with an AUX-powered Target
2004 * and a Host in WoW mode. If the Host crashes, loses power, or is
2005 * restarted (without unloading the driver) then the Target is left
2006 * (aux) powered and running. On a subsequent driver load, the Target
2007 * is in an unexpected state. We try to catch that here in order to
2008 * reset the Target and retry the probe.
2009 */
Michal Kazior5b2589f2013-11-08 08:01:30 +01002010 ret = ath10k_pci_device_reset(ar);
2011 if (ret) {
2012 ath10k_err("failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01002013 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01002014 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002015
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002016 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002017 /* Force AWAKE forever */
Michal Kazior8c5c5362013-07-16 09:38:50 +02002018 ath10k_do_pci_wake(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002019
2020 ret = ath10k_pci_ce_init(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002021 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002022 ath10k_err("failed to initialize CE: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002023 goto err_ps;
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002024 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002025
Michal Kazior98563d52013-11-08 08:01:33 +01002026 ret = ath10k_ce_disable_interrupts(ar);
2027 if (ret) {
2028 ath10k_err("failed to disable CE interrupts: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002029 goto err_ce;
2030 }
2031
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002032 ret = ath10k_pci_init_irq(ar);
Michal Kazior98563d52013-11-08 08:01:33 +01002033 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002034 ath10k_err("failed to init irqs: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01002035 goto err_ce;
2036 }
2037
Michal Kaziorab977bd2013-11-25 14:06:26 +01002038 ret = ath10k_pci_request_early_irq(ar);
2039 if (ret) {
2040 ath10k_err("failed to request early irq: %d\n", ret);
2041 goto err_deinit_irq;
2042 }
2043
Michal Kazior98563d52013-11-08 08:01:33 +01002044 ret = ath10k_pci_wait_for_target_init(ar);
2045 if (ret) {
2046 ath10k_err("failed to wait for target to init: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002047 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01002048 }
2049
2050 ret = ath10k_pci_init_config(ar);
2051 if (ret) {
2052 ath10k_err("failed to setup init config: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002053 goto err_free_early_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01002054 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002055
2056 ret = ath10k_pci_wake_target_cpu(ar);
2057 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002058 ath10k_err("could not wake up target CPU: %d\n", ret);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002059 goto err_free_early_irq;
Michal Kazior8c5c5362013-07-16 09:38:50 +02002060 }
2061
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002062 if (ar_pci->num_msi_intrs > 1)
2063 irq_mode = "MSI-X";
2064 else if (ar_pci->num_msi_intrs == 1)
2065 irq_mode = "MSI";
2066 else
2067 irq_mode = "legacy";
2068
Kalle Valo650b91f2013-11-20 10:00:49 +02002069 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2070 ath10k_info("pci irq %s\n", irq_mode);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002071
Michal Kazior8c5c5362013-07-16 09:38:50 +02002072 return 0;
2073
Michal Kaziorab977bd2013-11-25 14:06:26 +01002074err_free_early_irq:
2075 ath10k_pci_free_early_irq(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002076err_deinit_irq:
2077 ath10k_pci_deinit_irq(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002078err_ce:
2079 ath10k_pci_ce_deinit(ar);
Michal Kazior5d1aa942013-11-25 14:06:24 +01002080 ath10k_pci_device_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002081err_ps:
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002082 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002083 ath10k_do_pci_sleep(ar);
2084err:
2085 return ret;
2086}
2087
2088static void ath10k_pci_hif_power_down(struct ath10k *ar)
2089{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002090 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2091
Michal Kaziorab977bd2013-11-25 14:06:26 +01002092 ath10k_pci_free_early_irq(ar);
2093 ath10k_pci_kill_tasklet(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002094 ath10k_pci_deinit_irq(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01002095 ath10k_pci_device_reset(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002096
Michal Kazior8c5c5362013-07-16 09:38:50 +02002097 ath10k_pci_ce_deinit(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002098 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002099 ath10k_do_pci_sleep(ar);
2100}
2101
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002102#ifdef CONFIG_PM
2103
2104#define ATH10K_PCI_PM_CONTROL 0x44
2105
2106static int ath10k_pci_hif_suspend(struct ath10k *ar)
2107{
2108 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2109 struct pci_dev *pdev = ar_pci->pdev;
2110 u32 val;
2111
2112 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2113
2114 if ((val & 0x000000ff) != 0x3) {
2115 pci_save_state(pdev);
2116 pci_disable_device(pdev);
2117 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2118 (val & 0xffffff00) | 0x03);
2119 }
2120
2121 return 0;
2122}
2123
2124static int ath10k_pci_hif_resume(struct ath10k *ar)
2125{
2126 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2127 struct pci_dev *pdev = ar_pci->pdev;
2128 u32 val;
2129
2130 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2131
2132 if ((val & 0x000000ff) != 0) {
2133 pci_restore_state(pdev);
2134 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2135 val & 0xffffff00);
2136 /*
2137 * Suspend/Resume resets the PCI configuration space,
2138 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2139 * to keep PCI Tx retries from interfering with C3 CPU state
2140 */
2141 pci_read_config_dword(pdev, 0x40, &val);
2142
2143 if ((val & 0x0000ff00) != 0)
2144 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2145 }
2146
2147 return 0;
2148}
2149#endif
2150
Kalle Valo5e3dd152013-06-12 20:52:10 +03002151static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2152 .send_head = ath10k_pci_hif_send_head,
2153 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2154 .start = ath10k_pci_hif_start,
2155 .stop = ath10k_pci_hif_stop,
2156 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2157 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2158 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002159 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002160 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002161 .power_up = ath10k_pci_hif_power_up,
2162 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002163#ifdef CONFIG_PM
2164 .suspend = ath10k_pci_hif_suspend,
2165 .resume = ath10k_pci_hif_resume,
2166#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002167};
2168
2169static void ath10k_pci_ce_tasklet(unsigned long ptr)
2170{
Michal Kazior87263e52013-08-27 13:08:01 +02002171 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002172 struct ath10k_pci *ar_pci = pipe->ar_pci;
2173
2174 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2175}
2176
2177static void ath10k_msi_err_tasklet(unsigned long data)
2178{
2179 struct ath10k *ar = (struct ath10k *)data;
2180
2181 ath10k_pci_fw_interrupt_handler(ar);
2182}
2183
2184/*
2185 * Handler for a per-engine interrupt on a PARTICULAR CE.
2186 * This is used in cases where each CE has a private MSI interrupt.
2187 */
2188static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2189{
2190 struct ath10k *ar = arg;
2191 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2192 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2193
Dan Carpentere5742672013-06-18 10:28:46 +03002194 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002195 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2196 return IRQ_HANDLED;
2197 }
2198
2199 /*
2200 * NOTE: We are able to derive ce_id from irq because we
2201 * use a one-to-one mapping for CE's 0..5.
2202 * CE's 6 & 7 do not use interrupts at all.
2203 *
2204 * This mapping must be kept in sync with the mapping
2205 * used by firmware.
2206 */
2207 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2208 return IRQ_HANDLED;
2209}
2210
2211static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2212{
2213 struct ath10k *ar = arg;
2214 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2215
2216 tasklet_schedule(&ar_pci->msi_fw_err);
2217 return IRQ_HANDLED;
2218}
2219
2220/*
2221 * Top-level interrupt handler for all PCI interrupts from a Target.
2222 * When a block of MSI interrupts is allocated, this top-level handler
2223 * is not used; instead, we directly call the correct sub-handler.
2224 */
2225static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2226{
2227 struct ath10k *ar = arg;
2228 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2229
2230 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002231 if (!ath10k_pci_irq_pending(ar))
2232 return IRQ_NONE;
2233
Michal Kazior26852182013-11-25 14:06:25 +01002234 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002235 }
2236
2237 tasklet_schedule(&ar_pci->intr_tq);
2238
2239 return IRQ_HANDLED;
2240}
2241
Michal Kaziorab977bd2013-11-25 14:06:26 +01002242static void ath10k_pci_early_irq_tasklet(unsigned long data)
2243{
2244 struct ath10k *ar = (struct ath10k *)data;
2245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2246 u32 fw_ind;
2247 int ret;
2248
2249 ret = ath10k_pci_wake(ar);
2250 if (ret) {
2251 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2252 ret);
2253 return;
2254 }
2255
2256 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2257 if (fw_ind & FW_IND_EVENT_PENDING) {
2258 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2259 fw_ind & ~FW_IND_EVENT_PENDING);
2260
2261 /* Some structures are unavailable during early boot or at
2262 * driver teardown so just print that the device has crashed. */
2263 ath10k_warn("device crashed - no diagnostics available\n");
2264 }
2265
2266 ath10k_pci_sleep(ar);
2267 ath10k_pci_enable_legacy_irq(ar);
2268}
2269
Kalle Valo5e3dd152013-06-12 20:52:10 +03002270static void ath10k_pci_tasklet(unsigned long data)
2271{
2272 struct ath10k *ar = (struct ath10k *)data;
2273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2274
2275 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2276 ath10k_ce_per_engine_service_any(ar);
2277
Michal Kazior26852182013-11-25 14:06:25 +01002278 /* Re-enable legacy irq that was disabled in the irq handler */
2279 if (ar_pci->num_msi_intrs == 0)
2280 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002281}
2282
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002283static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002284{
2285 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002286 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002287
2288 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2289 ath10k_pci_msi_fw_handler,
2290 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002291 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002292 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002293 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002294 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002295 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002296
2297 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2298 ret = request_irq(ar_pci->pdev->irq + i,
2299 ath10k_pci_per_engine_handler,
2300 IRQF_SHARED, "ath10k_pci", ar);
2301 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002302 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002303 ar_pci->pdev->irq + i, ret);
2304
Michal Kazior87b14232013-06-26 08:50:50 +02002305 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2306 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002307
Michal Kazior87b14232013-06-26 08:50:50 +02002308 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002309 return ret;
2310 }
2311 }
2312
Kalle Valo5e3dd152013-06-12 20:52:10 +03002313 return 0;
2314}
2315
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002316static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002317{
2318 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2319 int ret;
2320
2321 ret = request_irq(ar_pci->pdev->irq,
2322 ath10k_pci_interrupt_handler,
2323 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002324 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002325 ath10k_warn("failed to request MSI irq %d: %d\n",
2326 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002327 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002328 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002329
Kalle Valo5e3dd152013-06-12 20:52:10 +03002330 return 0;
2331}
2332
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002333static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002334{
2335 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002336 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002337
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002338 ret = request_irq(ar_pci->pdev->irq,
2339 ath10k_pci_interrupt_handler,
2340 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002341 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002342 ath10k_warn("failed to request legacy irq %d: %d\n",
2343 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002344 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002345 }
2346
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002347 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002348}
2349
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002350static int ath10k_pci_request_irq(struct ath10k *ar)
2351{
2352 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2353
2354 switch (ar_pci->num_msi_intrs) {
2355 case 0:
2356 return ath10k_pci_request_irq_legacy(ar);
2357 case 1:
2358 return ath10k_pci_request_irq_msi(ar);
2359 case MSI_NUM_REQUEST:
2360 return ath10k_pci_request_irq_msix(ar);
2361 }
2362
2363 ath10k_warn("unknown irq configuration upon request\n");
2364 return -EINVAL;
2365}
2366
2367static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002368{
2369 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2370 int i;
2371
2372 /* There's at least one interrupt irregardless whether its legacy INTR
2373 * or MSI or MSI-X */
2374 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2375 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002376}
2377
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002378static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2379{
2380 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2381 int i;
2382
2383 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2384 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2385 (unsigned long)ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +01002386 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2387 (unsigned long)ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002388
2389 for (i = 0; i < CE_COUNT; i++) {
2390 ar_pci->pipe_info[i].ar_pci = ar_pci;
2391 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2392 (unsigned long)&ar_pci->pipe_info[i]);
2393 }
2394}
2395
2396static int ath10k_pci_init_irq(struct ath10k *ar)
2397{
2398 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002399 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2400 ar_pci->features);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002401 int ret;
2402
2403 ath10k_pci_init_irq_tasklets(ar);
2404
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002405 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2406 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2407 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002408
2409 /* Try MSI-X */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002410 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2411 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2412 ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
2413 if (ret == 0)
2414 return 0;
2415 if (ret > 0)
2416 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002417
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002418 /* fall-through */
2419 }
2420
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002421 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002422 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2423 ar_pci->num_msi_intrs = 1;
2424 ret = pci_enable_msi(ar_pci->pdev);
2425 if (ret == 0)
2426 return 0;
2427
2428 /* fall-through */
2429 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002430
2431 /* Try legacy irq
2432 *
2433 * A potential race occurs here: The CORE_BASE write
2434 * depends on target correctly decoding AXI address but
2435 * host won't know when target writes BAR to CORE_CTRL.
2436 * This write might get lost if target has NOT written BAR.
2437 * For now, fix the race by repeating the write in below
2438 * synchronization checking. */
2439 ar_pci->num_msi_intrs = 0;
2440
2441 ret = ath10k_pci_wake(ar);
2442 if (ret) {
2443 ath10k_warn("failed to wake target: %d\n", ret);
2444 return ret;
2445 }
2446
2447 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2448 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2449 ath10k_pci_sleep(ar);
2450
2451 return 0;
2452}
2453
2454static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2455{
2456 int ret;
2457
2458 ret = ath10k_pci_wake(ar);
2459 if (ret) {
2460 ath10k_warn("failed to wake target: %d\n", ret);
2461 return ret;
2462 }
2463
2464 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2465 0);
2466 ath10k_pci_sleep(ar);
2467
2468 return 0;
2469}
2470
2471static int ath10k_pci_deinit_irq(struct ath10k *ar)
2472{
2473 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2474
2475 switch (ar_pci->num_msi_intrs) {
2476 case 0:
2477 return ath10k_pci_deinit_irq_legacy(ar);
2478 case 1:
2479 /* fall-through */
2480 case MSI_NUM_REQUEST:
2481 pci_disable_msi(ar_pci->pdev);
2482 return 0;
2483 }
2484
2485 ath10k_warn("unknown irq configuration upon deinit\n");
2486 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002487}
2488
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002489static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002490{
2491 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2492 int wait_limit = 300; /* 3 sec */
Kalle Valof3782742013-10-17 11:36:15 +03002493 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002494
Michal Kazior98563d52013-11-08 08:01:33 +01002495 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002496 if (ret) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002497 ath10k_err("failed to wake up target: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002498 return ret;
2499 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002500
2501 while (wait_limit-- &&
2502 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2503 FW_IND_INITIALIZED)) {
2504 if (ar_pci->num_msi_intrs == 0)
2505 /* Fix potential race by repeating CORE_BASE writes */
2506 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2507 PCIE_INTR_CE_MASK_ALL,
2508 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2509 PCIE_INTR_ENABLE_ADDRESS));
2510 mdelay(10);
2511 }
2512
2513 if (wait_limit < 0) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002514 ath10k_err("target stalled\n");
2515 ret = -EIO;
2516 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002517 }
2518
Michal Kazior5b2589f2013-11-08 08:01:30 +01002519out:
Michal Kazior98563d52013-11-08 08:01:33 +01002520 ath10k_pci_sleep(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002521 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002522}
2523
Michal Kazior5b2589f2013-11-08 08:01:30 +01002524static int ath10k_pci_device_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002525{
Michal Kazior5b2589f2013-11-08 08:01:30 +01002526 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002527 u32 val;
2528
Michal Kazior5b2589f2013-11-08 08:01:30 +01002529 ret = ath10k_do_pci_wake(ar);
2530 if (ret) {
2531 ath10k_err("failed to wake up target: %d\n",
2532 ret);
2533 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002534 }
2535
2536 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002537 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002538 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002539 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002540
2541 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002542 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002543 RTC_STATE_COLD_RESET_MASK)
2544 break;
2545 msleep(1);
2546 }
2547
2548 /* Pull Target, including PCIe, out of RESET. */
2549 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002550 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002551
2552 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002553 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002554 RTC_STATE_COLD_RESET_MASK))
2555 break;
2556 msleep(1);
2557 }
2558
Michal Kazior5b2589f2013-11-08 08:01:30 +01002559 ath10k_do_pci_sleep(ar);
2560 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002561}
2562
2563static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2564{
2565 int i;
2566
2567 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2568 if (!test_bit(i, ar_pci->features))
2569 continue;
2570
2571 switch (i) {
2572 case ATH10K_PCI_FEATURE_MSI_X:
Kalle Valo24cfade2013-09-08 17:55:50 +03002573 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002574 break;
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002575 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
Kalle Valo24cfade2013-09-08 17:55:50 +03002576 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002577 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002578 }
2579 }
2580}
2581
2582static int ath10k_pci_probe(struct pci_dev *pdev,
2583 const struct pci_device_id *pci_dev)
2584{
2585 void __iomem *mem;
2586 int ret = 0;
2587 struct ath10k *ar;
2588 struct ath10k_pci *ar_pci;
Kalle Valoe01ae682013-09-01 11:22:14 +03002589 u32 lcr_val, chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002590
2591 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2592
2593 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2594 if (ar_pci == NULL)
2595 return -ENOMEM;
2596
2597 ar_pci->pdev = pdev;
2598 ar_pci->dev = &pdev->dev;
2599
2600 switch (pci_dev->device) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002601 case QCA988X_2_0_DEVICE_ID:
2602 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2603 break;
2604 default:
2605 ret = -ENODEV;
Masanari Iida6d3be302013-09-30 23:19:09 +09002606 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002607 goto err_ar_pci;
2608 }
2609
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002610 if (ath10k_target_ps)
2611 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2612
Kalle Valo5e3dd152013-06-12 20:52:10 +03002613 ath10k_pci_dump_features(ar_pci);
2614
Michal Kazior3a0861f2013-07-05 16:15:06 +03002615 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002616 if (!ar) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002617 ath10k_err("failed to create driver core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002618 ret = -EINVAL;
2619 goto err_ar_pci;
2620 }
2621
Kalle Valo5e3dd152013-06-12 20:52:10 +03002622 ar_pci->ar = ar;
2623 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2624 atomic_set(&ar_pci->keep_awake_count, 0);
2625
2626 pci_set_drvdata(pdev, ar);
2627
2628 /*
2629 * Without any knowledge of the Host, the Target may have been reset or
2630 * power cycled and its Config Space may no longer reflect the PCI
2631 * address space that was assigned earlier by the PCI infrastructure.
2632 * Refresh it now.
2633 */
2634 ret = pci_assign_resource(pdev, BAR_NUM);
2635 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002636 ath10k_err("failed to assign PCI space: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002637 goto err_ar;
2638 }
2639
2640 ret = pci_enable_device(pdev);
2641 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002642 ath10k_err("failed to enable PCI device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002643 goto err_ar;
2644 }
2645
2646 /* Request MMIO resources */
2647 ret = pci_request_region(pdev, BAR_NUM, "ath");
2648 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002649 ath10k_err("failed to request MMIO region: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002650 goto err_device;
2651 }
2652
2653 /*
2654 * Target structures have a limit of 32 bit DMA pointers.
2655 * DMA pointers can be wider than 32 bits by default on some systems.
2656 */
2657 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2658 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002659 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002660 goto err_region;
2661 }
2662
2663 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2664 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002665 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002666 goto err_region;
2667 }
2668
2669 /* Set bus master bit in PCI_COMMAND to enable DMA */
2670 pci_set_master(pdev);
2671
2672 /*
2673 * Temporary FIX: disable ASPM
2674 * Will be removed after the OTP is programmed
2675 */
2676 pci_read_config_dword(pdev, 0x80, &lcr_val);
2677 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2678
2679 /* Arrange for access to Target SoC registers. */
2680 mem = pci_iomap(pdev, BAR_NUM, 0);
2681 if (!mem) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002682 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002683 ret = -EIO;
2684 goto err_master;
2685 }
2686
2687 ar_pci->mem = mem;
2688
2689 spin_lock_init(&ar_pci->ce_lock);
2690
Kalle Valoe01ae682013-09-01 11:22:14 +03002691 ret = ath10k_do_pci_wake(ar);
2692 if (ret) {
2693 ath10k_err("Failed to get chip id: %d\n", ret);
Wei Yongjun12eb0872013-10-30 13:24:39 +08002694 goto err_iomap;
Kalle Valoe01ae682013-09-01 11:22:14 +03002695 }
2696
Kalle Valo233eb972013-10-16 16:46:11 +03002697 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Kalle Valoe01ae682013-09-01 11:22:14 +03002698
2699 ath10k_do_pci_sleep(ar);
2700
Kalle Valo24cfade2013-09-08 17:55:50 +03002701 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2702
Kalle Valoe01ae682013-09-01 11:22:14 +03002703 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002704 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002705 ath10k_err("failed to register driver core: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02002706 goto err_iomap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002707 }
2708
2709 return 0;
2710
Kalle Valo5e3dd152013-06-12 20:52:10 +03002711err_iomap:
2712 pci_iounmap(pdev, mem);
2713err_master:
2714 pci_clear_master(pdev);
2715err_region:
2716 pci_release_region(pdev, BAR_NUM);
2717err_device:
2718 pci_disable_device(pdev);
2719err_ar:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002720 ath10k_core_destroy(ar);
2721err_ar_pci:
2722 /* call HIF PCI free here */
2723 kfree(ar_pci);
2724
2725 return ret;
2726}
2727
2728static void ath10k_pci_remove(struct pci_dev *pdev)
2729{
2730 struct ath10k *ar = pci_get_drvdata(pdev);
2731 struct ath10k_pci *ar_pci;
2732
2733 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2734
2735 if (!ar)
2736 return;
2737
2738 ar_pci = ath10k_pci_priv(ar);
2739
2740 if (!ar_pci)
2741 return;
2742
2743 tasklet_kill(&ar_pci->msi_fw_err);
2744
2745 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002746
Kalle Valo5e3dd152013-06-12 20:52:10 +03002747 pci_iounmap(pdev, ar_pci->mem);
2748 pci_release_region(pdev, BAR_NUM);
2749 pci_clear_master(pdev);
2750 pci_disable_device(pdev);
2751
2752 ath10k_core_destroy(ar);
2753 kfree(ar_pci);
2754}
2755
Kalle Valo5e3dd152013-06-12 20:52:10 +03002756MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2757
2758static struct pci_driver ath10k_pci_driver = {
2759 .name = "ath10k_pci",
2760 .id_table = ath10k_pci_id_table,
2761 .probe = ath10k_pci_probe,
2762 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002763};
2764
2765static int __init ath10k_pci_init(void)
2766{
2767 int ret;
2768
2769 ret = pci_register_driver(&ath10k_pci_driver);
2770 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002771 ath10k_err("failed to register PCI driver: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002772
2773 return ret;
2774}
2775module_init(ath10k_pci_init);
2776
2777static void __exit ath10k_pci_exit(void)
2778{
2779 pci_unregister_driver(&ath10k_pci_driver);
2780}
2781
2782module_exit(ath10k_pci_exit);
2783
2784MODULE_AUTHOR("Qualcomm Atheros");
2785MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2786MODULE_LICENSE("Dual BSD/GPL");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002787MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2788MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2789MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);