blob: 12fb12ea2182c6599d0be7404754dde24672d54f [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Bartosz Markowski8cc8df92013-08-02 09:58:49 +020036static unsigned int ath10k_target_ps;
Kalle Valo5e3dd152013-06-12 20:52:10 +030037module_param(ath10k_target_ps, uint, 0644);
38MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39
Kalle Valo5e3dd152013-06-12 20:52:10 +030040#define QCA988X_2_0_DEVICE_ID (0x003c)
41
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030043 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
44 {0}
45};
46
47static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
48 u32 *data);
49
50static void ath10k_pci_process_ce(struct ath10k *ar);
51static int ath10k_pci_post_rx(struct ath10k *ar);
Michal Kazior87263e52013-08-27 13:08:01 +020052static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +030053 int num);
Michal Kazior87263e52013-08-27 13:08:01 +020054static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +030055static void ath10k_pci_stop_ce(struct ath10k *ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +010056static int ath10k_pci_device_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010057static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kazior32270b62013-08-02 09:15:47 +020058static int ath10k_pci_start_intr(struct ath10k *ar);
59static void ath10k_pci_stop_intr(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030060
61static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030062 /* CE0: host->target HTC control and raw streams */
63 {
64 .flags = CE_ATTR_FLAGS,
65 .src_nentries = 16,
66 .src_sz_max = 256,
67 .dest_nentries = 0,
68 },
69
70 /* CE1: target->host HTT + HTC control */
71 {
72 .flags = CE_ATTR_FLAGS,
73 .src_nentries = 0,
74 .src_sz_max = 512,
75 .dest_nentries = 512,
76 },
77
78 /* CE2: target->host WMI */
79 {
80 .flags = CE_ATTR_FLAGS,
81 .src_nentries = 0,
82 .src_sz_max = 2048,
83 .dest_nentries = 32,
84 },
85
86 /* CE3: host->target WMI */
87 {
88 .flags = CE_ATTR_FLAGS,
89 .src_nentries = 32,
90 .src_sz_max = 2048,
91 .dest_nentries = 0,
92 },
93
94 /* CE4: host->target HTT */
95 {
96 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
97 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
98 .src_sz_max = 256,
99 .dest_nentries = 0,
100 },
101
102 /* CE5: unused */
103 {
104 .flags = CE_ATTR_FLAGS,
105 .src_nentries = 0,
106 .src_sz_max = 0,
107 .dest_nentries = 0,
108 },
109
110 /* CE6: target autonomous hif_memcpy */
111 {
112 .flags = CE_ATTR_FLAGS,
113 .src_nentries = 0,
114 .src_sz_max = 0,
115 .dest_nentries = 0,
116 },
117
118 /* CE7: ce_diag, the Diagnostic Window */
119 {
120 .flags = CE_ATTR_FLAGS,
121 .src_nentries = 2,
122 .src_sz_max = DIAG_TRANSFER_LIMIT,
123 .dest_nentries = 2,
124 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300125};
126
127/* Target firmware's Copy Engine configuration. */
128static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300129 /* CE0: host->target HTC control and raw streams */
130 {
131 .pipenum = 0,
132 .pipedir = PIPEDIR_OUT,
133 .nentries = 32,
134 .nbytes_max = 256,
135 .flags = CE_ATTR_FLAGS,
136 .reserved = 0,
137 },
138
139 /* CE1: target->host HTT + HTC control */
140 {
141 .pipenum = 1,
142 .pipedir = PIPEDIR_IN,
143 .nentries = 32,
144 .nbytes_max = 512,
145 .flags = CE_ATTR_FLAGS,
146 .reserved = 0,
147 },
148
149 /* CE2: target->host WMI */
150 {
151 .pipenum = 2,
152 .pipedir = PIPEDIR_IN,
153 .nentries = 32,
154 .nbytes_max = 2048,
155 .flags = CE_ATTR_FLAGS,
156 .reserved = 0,
157 },
158
159 /* CE3: host->target WMI */
160 {
161 .pipenum = 3,
162 .pipedir = PIPEDIR_OUT,
163 .nentries = 32,
164 .nbytes_max = 2048,
165 .flags = CE_ATTR_FLAGS,
166 .reserved = 0,
167 },
168
169 /* CE4: host->target HTT */
170 {
171 .pipenum = 4,
172 .pipedir = PIPEDIR_OUT,
173 .nentries = 256,
174 .nbytes_max = 256,
175 .flags = CE_ATTR_FLAGS,
176 .reserved = 0,
177 },
178
Kalle Valo5e3dd152013-06-12 20:52:10 +0300179 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300180
181 /* CE5: unused */
182 {
183 .pipenum = 5,
184 .pipedir = PIPEDIR_OUT,
185 .nentries = 32,
186 .nbytes_max = 2048,
187 .flags = CE_ATTR_FLAGS,
188 .reserved = 0,
189 },
190
191 /* CE6: Reserved for target autonomous hif_memcpy */
192 {
193 .pipenum = 6,
194 .pipedir = PIPEDIR_INOUT,
195 .nentries = 32,
196 .nbytes_max = 4096,
197 .flags = CE_ATTR_FLAGS,
198 .reserved = 0,
199 },
200
Kalle Valo5e3dd152013-06-12 20:52:10 +0300201 /* CE7 used only by Host */
202};
203
Michal Kaziore5398872013-11-25 14:06:20 +0100204static bool ath10k_pci_irq_pending(struct ath10k *ar)
205{
206 u32 cause;
207
208 /* Check if the shared legacy irq is for us */
209 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
210 PCIE_INTR_CAUSE_ADDRESS);
211 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
212 return true;
213
214 return false;
215}
216
Kalle Valo5e3dd152013-06-12 20:52:10 +0300217/*
218 * Diagnostic read/write access is provided for startup/config/debug usage.
219 * Caller must guarantee proper alignment, when applicable, and single user
220 * at any moment.
221 */
222static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
223 int nbytes)
224{
225 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
226 int ret = 0;
227 u32 buf;
228 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
229 unsigned int id;
230 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200231 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300232 /* Host buffer address in CE space */
233 u32 ce_data;
234 dma_addr_t ce_data_base = 0;
235 void *data_buf = NULL;
236 int i;
237
238 /*
239 * This code cannot handle reads to non-memory space. Redirect to the
240 * register read fn but preserve the multi word read capability of
241 * this fn
242 */
243 if (address < DRAM_BASE_ADDRESS) {
244 if (!IS_ALIGNED(address, 4) ||
245 !IS_ALIGNED((unsigned long)data, 4))
246 return -EIO;
247
248 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
249 ar, address, (u32 *)data)) == 0)) {
250 nbytes -= sizeof(u32);
251 address += sizeof(u32);
252 data += sizeof(u32);
253 }
254 return ret;
255 }
256
257 ce_diag = ar_pci->ce_diag;
258
259 /*
260 * Allocate a temporary bounce buffer to hold caller's data
261 * to be DMA'ed from Target. This guarantees
262 * 1) 4-byte alignment
263 * 2) Buffer in DMA-able space
264 */
265 orig_nbytes = nbytes;
266 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
267 orig_nbytes,
268 &ce_data_base);
269
270 if (!data_buf) {
271 ret = -ENOMEM;
272 goto done;
273 }
274 memset(data_buf, 0, orig_nbytes);
275
276 remaining_bytes = orig_nbytes;
277 ce_data = ce_data_base;
278 while (remaining_bytes) {
279 nbytes = min_t(unsigned int, remaining_bytes,
280 DIAG_TRANSFER_LIMIT);
281
282 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
283 if (ret != 0)
284 goto done;
285
286 /* Request CE to send from Target(!) address to Host buffer */
287 /*
288 * The address supplied by the caller is in the
289 * Target CPU virtual address space.
290 *
291 * In order to use this address with the diagnostic CE,
292 * convert it from Target CPU virtual address space
293 * to CE address space
294 */
295 ath10k_pci_wake(ar);
296 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
297 address);
298 ath10k_pci_sleep(ar);
299
300 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
301 0);
302 if (ret)
303 goto done;
304
305 i = 0;
306 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
307 &completed_nbytes,
308 &id) != 0) {
309 mdelay(1);
310 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
311 ret = -EBUSY;
312 goto done;
313 }
314 }
315
316 if (nbytes != completed_nbytes) {
317 ret = -EIO;
318 goto done;
319 }
320
321 if (buf != (u32) address) {
322 ret = -EIO;
323 goto done;
324 }
325
326 i = 0;
327 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
328 &completed_nbytes,
329 &id, &flags) != 0) {
330 mdelay(1);
331
332 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
333 ret = -EBUSY;
334 goto done;
335 }
336 }
337
338 if (nbytes != completed_nbytes) {
339 ret = -EIO;
340 goto done;
341 }
342
343 if (buf != ce_data) {
344 ret = -EIO;
345 goto done;
346 }
347
348 remaining_bytes -= nbytes;
349 address += nbytes;
350 ce_data += nbytes;
351 }
352
353done:
354 if (ret == 0) {
355 /* Copy data from allocated DMA buf to caller's buf */
356 WARN_ON_ONCE(orig_nbytes & 3);
357 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
358 ((u32 *)data)[i] =
359 __le32_to_cpu(((__le32 *)data_buf)[i]);
360 }
361 } else
362 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
363 __func__, address);
364
365 if (data_buf)
366 pci_free_consistent(ar_pci->pdev, orig_nbytes,
367 data_buf, ce_data_base);
368
369 return ret;
370}
371
372/* Read 4-byte aligned data from Target memory or register */
373static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
374 u32 *data)
375{
376 /* Assume range doesn't cross this boundary */
377 if (address >= DRAM_BASE_ADDRESS)
378 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
379
380 ath10k_pci_wake(ar);
381 *data = ath10k_pci_read32(ar, address);
382 ath10k_pci_sleep(ar);
383 return 0;
384}
385
386static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
387 const void *data, int nbytes)
388{
389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
390 int ret = 0;
391 u32 buf;
392 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
393 unsigned int id;
394 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200395 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300396 void *data_buf = NULL;
397 u32 ce_data; /* Host buffer address in CE space */
398 dma_addr_t ce_data_base = 0;
399 int i;
400
401 ce_diag = ar_pci->ce_diag;
402
403 /*
404 * Allocate a temporary bounce buffer to hold caller's data
405 * to be DMA'ed to Target. This guarantees
406 * 1) 4-byte alignment
407 * 2) Buffer in DMA-able space
408 */
409 orig_nbytes = nbytes;
410 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
411 orig_nbytes,
412 &ce_data_base);
413 if (!data_buf) {
414 ret = -ENOMEM;
415 goto done;
416 }
417
418 /* Copy caller's data to allocated DMA buf */
419 WARN_ON_ONCE(orig_nbytes & 3);
420 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
421 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
422
423 /*
424 * The address supplied by the caller is in the
425 * Target CPU virtual address space.
426 *
427 * In order to use this address with the diagnostic CE,
428 * convert it from
429 * Target CPU virtual address space
430 * to
431 * CE address space
432 */
433 ath10k_pci_wake(ar);
434 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
435 ath10k_pci_sleep(ar);
436
437 remaining_bytes = orig_nbytes;
438 ce_data = ce_data_base;
439 while (remaining_bytes) {
440 /* FIXME: check cast */
441 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
442
443 /* Set up to receive directly into Target(!) address */
444 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
445 if (ret != 0)
446 goto done;
447
448 /*
449 * Request CE to send caller-supplied data that
450 * was copied to bounce buffer to Target(!) address.
451 */
452 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
453 nbytes, 0, 0);
454 if (ret != 0)
455 goto done;
456
457 i = 0;
458 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
459 &completed_nbytes,
460 &id) != 0) {
461 mdelay(1);
462
463 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
464 ret = -EBUSY;
465 goto done;
466 }
467 }
468
469 if (nbytes != completed_nbytes) {
470 ret = -EIO;
471 goto done;
472 }
473
474 if (buf != ce_data) {
475 ret = -EIO;
476 goto done;
477 }
478
479 i = 0;
480 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
481 &completed_nbytes,
482 &id, &flags) != 0) {
483 mdelay(1);
484
485 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
486 ret = -EBUSY;
487 goto done;
488 }
489 }
490
491 if (nbytes != completed_nbytes) {
492 ret = -EIO;
493 goto done;
494 }
495
496 if (buf != address) {
497 ret = -EIO;
498 goto done;
499 }
500
501 remaining_bytes -= nbytes;
502 address += nbytes;
503 ce_data += nbytes;
504 }
505
506done:
507 if (data_buf) {
508 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
509 ce_data_base);
510 }
511
512 if (ret != 0)
513 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
514 address);
515
516 return ret;
517}
518
519/* Write 4B data to Target memory or register */
520static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
521 u32 data)
522{
523 /* Assume range doesn't cross this boundary */
524 if (address >= DRAM_BASE_ADDRESS)
525 return ath10k_pci_diag_write_mem(ar, address, &data,
526 sizeof(u32));
527
528 ath10k_pci_wake(ar);
529 ath10k_pci_write32(ar, address, data);
530 ath10k_pci_sleep(ar);
531 return 0;
532}
533
534static bool ath10k_pci_target_is_awake(struct ath10k *ar)
535{
536 void __iomem *mem = ath10k_pci_priv(ar)->mem;
537 u32 val;
538 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
539 RTC_STATE_ADDRESS);
540 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
541}
542
Kalle Valo3aebe542013-09-01 10:02:07 +0300543int ath10k_do_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300544{
545 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
546 void __iomem *pci_addr = ar_pci->mem;
547 int tot_delay = 0;
548 int curr_delay = 5;
549
550 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
551 /* Force AWAKE */
552 iowrite32(PCIE_SOC_WAKE_V_MASK,
553 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
554 PCIE_SOC_WAKE_ADDRESS);
555 }
556 atomic_inc(&ar_pci->keep_awake_count);
557
558 if (ar_pci->verified_awake)
Kalle Valo3aebe542013-09-01 10:02:07 +0300559 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300560
561 for (;;) {
562 if (ath10k_pci_target_is_awake(ar)) {
563 ar_pci->verified_awake = true;
Kalle Valo3aebe542013-09-01 10:02:07 +0300564 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300565 }
566
567 if (tot_delay > PCIE_WAKE_TIMEOUT) {
Kalle Valo3aebe542013-09-01 10:02:07 +0300568 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
569 PCIE_WAKE_TIMEOUT,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300570 atomic_read(&ar_pci->keep_awake_count));
Kalle Valo3aebe542013-09-01 10:02:07 +0300571 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300572 }
573
574 udelay(curr_delay);
575 tot_delay += curr_delay;
576
577 if (curr_delay < 50)
578 curr_delay += 5;
579 }
580}
581
582void ath10k_do_pci_sleep(struct ath10k *ar)
583{
584 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
585 void __iomem *pci_addr = ar_pci->mem;
586
587 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
588 /* Allow sleep */
589 ar_pci->verified_awake = false;
590 iowrite32(PCIE_SOC_WAKE_RESET,
591 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
592 PCIE_SOC_WAKE_ADDRESS);
593 }
594}
595
596/*
597 * FIXME: Handle OOM properly.
598 */
599static inline
Michal Kazior87263e52013-08-27 13:08:01 +0200600struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300601{
602 struct ath10k_pci_compl *compl = NULL;
603
604 spin_lock_bh(&pipe_info->pipe_lock);
605 if (list_empty(&pipe_info->compl_free)) {
606 ath10k_warn("Completion buffers are full\n");
607 goto exit;
608 }
609 compl = list_first_entry(&pipe_info->compl_free,
610 struct ath10k_pci_compl, list);
611 list_del(&compl->list);
612exit:
613 spin_unlock_bh(&pipe_info->pipe_lock);
614 return compl;
615}
616
617/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200618static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300619{
620 struct ath10k *ar = ce_state->ar;
621 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200622 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300623 struct ath10k_pci_compl *compl;
Michal Kazior5440ce22013-09-03 15:09:58 +0200624 void *transfer_context;
625 u32 ce_data;
626 unsigned int nbytes;
627 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300628
Michal Kazior5440ce22013-09-03 15:09:58 +0200629 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
630 &ce_data, &nbytes,
631 &transfer_id) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300632 compl = get_free_compl(pipe_info);
633 if (!compl)
634 break;
635
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200636 compl->state = ATH10K_PCI_COMPL_SEND;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300637 compl->ce_state = ce_state;
638 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300639 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300640 compl->nbytes = nbytes;
641 compl->transfer_id = transfer_id;
642 compl->flags = 0;
643
644 /*
645 * Add the completion to the processing queue.
646 */
647 spin_lock_bh(&ar_pci->compl_lock);
648 list_add_tail(&compl->list, &ar_pci->compl_process);
649 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200650 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300651
652 ath10k_pci_process_ce(ar);
653}
654
655/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200656static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300657{
658 struct ath10k *ar = ce_state->ar;
659 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200660 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300661 struct ath10k_pci_compl *compl;
662 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200663 void *transfer_context;
664 u32 ce_data;
665 unsigned int nbytes;
666 unsigned int transfer_id;
667 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300668
Michal Kazior5440ce22013-09-03 15:09:58 +0200669 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
670 &ce_data, &nbytes, &transfer_id,
671 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300672 compl = get_free_compl(pipe_info);
673 if (!compl)
674 break;
675
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200676 compl->state = ATH10K_PCI_COMPL_RECV;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300677 compl->ce_state = ce_state;
678 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300679 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300680 compl->nbytes = nbytes;
681 compl->transfer_id = transfer_id;
682 compl->flags = flags;
683
684 skb = transfer_context;
685 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
686 skb->len + skb_tailroom(skb),
687 DMA_FROM_DEVICE);
688 /*
689 * Add the completion to the processing queue.
690 */
691 spin_lock_bh(&ar_pci->compl_lock);
692 list_add_tail(&compl->list, &ar_pci->compl_process);
693 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200694 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300695
696 ath10k_pci_process_ce(ar);
697}
698
699/* Send the first nbytes bytes of the buffer */
700static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
701 unsigned int transfer_id,
702 unsigned int bytes, struct sk_buff *nbuf)
703{
704 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200706 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
Michal Kazior2aa39112013-08-27 13:08:02 +0200707 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300708 unsigned int len;
709 u32 flags = 0;
710 int ret;
711
Kalle Valo5e3dd152013-06-12 20:52:10 +0300712 len = min(bytes, nbuf->len);
713 bytes -= len;
714
715 if (len & 3)
716 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
717
718 ath10k_dbg(ATH10K_DBG_PCI,
719 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
720 nbuf->data, (unsigned long long) skb_cb->paddr,
721 nbuf->len, len);
722 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
723 "ath10k tx: data: ",
724 nbuf->data, nbuf->len);
725
Michal Kazior2e761b52013-10-02 11:03:40 +0200726 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
727 flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300728 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100729 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300730
731 return ret;
732}
733
734static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
735{
736 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200737 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300738}
739
740static void ath10k_pci_hif_dump_area(struct ath10k *ar)
741{
742 u32 reg_dump_area = 0;
743 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
744 u32 host_addr;
745 int ret;
746 u32 i;
747
748 ath10k_err("firmware crashed!\n");
749 ath10k_err("hardware name %s version 0x%x\n",
750 ar->hw_params.name, ar->target_version);
751 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
752 ar->fw_version_minor, ar->fw_version_release,
753 ar->fw_version_build);
754
755 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100756 ret = ath10k_pci_diag_read_mem(ar, host_addr,
757 &reg_dump_area, sizeof(u32));
758 if (ret) {
759 ath10k_err("failed to read FW dump area address: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300760 return;
761 }
762
763 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
764
765 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
766 &reg_dump_values[0],
767 REG_DUMP_COUNT_QCA988X * sizeof(u32));
768 if (ret != 0) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100769 ath10k_err("failed to read FW dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300770 return;
771 }
772
773 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
774
775 ath10k_err("target Register Dump\n");
776 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
777 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
778 i,
779 reg_dump_values[i],
780 reg_dump_values[i + 1],
781 reg_dump_values[i + 2],
782 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200783
Michal Kazior5e90de82013-10-16 16:46:05 +0300784 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300785}
786
787static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
788 int force)
789{
790 if (!force) {
791 int resources;
792 /*
793 * Decide whether to actually poll for completions, or just
794 * wait for a later chance.
795 * If there seem to be plenty of resources left, then just wait
796 * since checking involves reading a CE register, which is a
797 * relatively expensive operation.
798 */
799 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
800
801 /*
802 * If at least 50% of the total resources are still available,
803 * don't bother checking again yet.
804 */
805 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
806 return;
807 }
808 ath10k_ce_per_engine_service(ar, pipe);
809}
810
Michal Kaziore799bbf2013-07-05 16:15:12 +0300811static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
812 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300813{
814 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
815
816 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
817
818 memcpy(&ar_pci->msg_callbacks_current, callbacks,
819 sizeof(ar_pci->msg_callbacks_current));
820}
821
822static int ath10k_pci_start_ce(struct ath10k *ar)
823{
824 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +0200825 struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300826 const struct ce_attr *attr;
Michal Kazior87263e52013-08-27 13:08:01 +0200827 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300828 struct ath10k_pci_compl *compl;
829 int i, pipe_num, completions, disable_interrupts;
830
831 spin_lock_init(&ar_pci->compl_lock);
832 INIT_LIST_HEAD(&ar_pci->compl_process);
833
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100834 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300835 pipe_info = &ar_pci->pipe_info[pipe_num];
836
837 spin_lock_init(&pipe_info->pipe_lock);
838 INIT_LIST_HEAD(&pipe_info->compl_free);
839
840 /* Handle Diagnostic CE specially */
841 if (pipe_info->ce_hdl == ce_diag)
842 continue;
843
844 attr = &host_ce_config_wlan[pipe_num];
845 completions = 0;
846
847 if (attr->src_nentries) {
848 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
849 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
850 ath10k_pci_ce_send_done,
851 disable_interrupts);
852 completions += attr->src_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300853 }
854
855 if (attr->dest_nentries) {
856 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
857 ath10k_pci_ce_recv_data);
858 completions += attr->dest_nentries;
859 }
860
861 if (completions == 0)
862 continue;
863
864 for (i = 0; i < completions; i++) {
Michal Kaziorffe5daa2013-08-13 07:54:55 +0200865 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300866 if (!compl) {
867 ath10k_warn("No memory for completion state\n");
868 ath10k_pci_stop_ce(ar);
869 return -ENOMEM;
870 }
871
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200872 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300873 list_add_tail(&compl->list, &pipe_info->compl_free);
874 }
875 }
876
877 return 0;
878}
879
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100880static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300881{
882 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300883 int i;
884
Kalle Valo5e3dd152013-06-12 20:52:10 +0300885 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +0100886 tasklet_kill(&ar_pci->msi_fw_err);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300887
888 for (i = 0; i < CE_COUNT; i++)
889 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100890}
891
892static void ath10k_pci_stop_ce(struct ath10k *ar)
893{
894 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
895 struct ath10k_pci_compl *compl;
896 struct sk_buff *skb;
Michal Kazior28642f42013-11-08 08:01:31 +0100897 int ret;
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100898
Michal Kazior28642f42013-11-08 08:01:31 +0100899 ret = ath10k_ce_disable_interrupts(ar);
900 if (ret)
901 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
902
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100903 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300904
905 /* Mark pending completions as aborted, so that upper layers free up
906 * their associated resources */
907 spin_lock_bh(&ar_pci->compl_lock);
908 list_for_each_entry(compl, &ar_pci->compl_process, list) {
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300909 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300910 ATH10K_SKB_CB(skb)->is_aborted = true;
911 }
912 spin_unlock_bh(&ar_pci->compl_lock);
913}
914
915static void ath10k_pci_cleanup_ce(struct ath10k *ar)
916{
917 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
918 struct ath10k_pci_compl *compl, *tmp;
Michal Kazior87263e52013-08-27 13:08:01 +0200919 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300920 struct sk_buff *netbuf;
921 int pipe_num;
922
923 /* Free pending completions. */
924 spin_lock_bh(&ar_pci->compl_lock);
925 if (!list_empty(&ar_pci->compl_process))
926 ath10k_warn("pending completions still present! possible memory leaks.\n");
927
928 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
929 list_del(&compl->list);
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300930 netbuf = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300931 dev_kfree_skb_any(netbuf);
932 kfree(compl);
933 }
934 spin_unlock_bh(&ar_pci->compl_lock);
935
936 /* Free unused completions for each pipe. */
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100937 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300938 pipe_info = &ar_pci->pipe_info[pipe_num];
939
940 spin_lock_bh(&pipe_info->pipe_lock);
941 list_for_each_entry_safe(compl, tmp,
942 &pipe_info->compl_free, list) {
943 list_del(&compl->list);
944 kfree(compl);
945 }
946 spin_unlock_bh(&pipe_info->pipe_lock);
947 }
948}
949
950static void ath10k_pci_process_ce(struct ath10k *ar)
951{
952 struct ath10k_pci *ar_pci = ar->hif.priv;
953 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
954 struct ath10k_pci_compl *compl;
955 struct sk_buff *skb;
956 unsigned int nbytes;
957 int ret, send_done = 0;
958
959 /* Upper layers aren't ready to handle tx/rx completions in parallel so
960 * we must serialize all completion processing. */
961
962 spin_lock_bh(&ar_pci->compl_lock);
963 if (ar_pci->compl_processing) {
964 spin_unlock_bh(&ar_pci->compl_lock);
965 return;
966 }
967 ar_pci->compl_processing = true;
968 spin_unlock_bh(&ar_pci->compl_lock);
969
970 for (;;) {
971 spin_lock_bh(&ar_pci->compl_lock);
972 if (list_empty(&ar_pci->compl_process)) {
973 spin_unlock_bh(&ar_pci->compl_lock);
974 break;
975 }
976 compl = list_first_entry(&ar_pci->compl_process,
977 struct ath10k_pci_compl, list);
978 list_del(&compl->list);
979 spin_unlock_bh(&ar_pci->compl_lock);
980
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200981 switch (compl->state) {
982 case ATH10K_PCI_COMPL_SEND:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300983 cb->tx_completion(ar,
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300984 compl->skb,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300985 compl->transfer_id);
986 send_done = 1;
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200987 break;
988 case ATH10K_PCI_COMPL_RECV:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300989 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
990 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100991 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
992 compl->pipe_info->pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300993 break;
994 }
995
Kalle Valoaa5c1db42013-09-01 10:01:46 +0300996 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300997 nbytes = compl->nbytes;
998
999 ath10k_dbg(ATH10K_DBG_PCI,
1000 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1001 skb, nbytes);
1002 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1003 "ath10k rx: ", skb->data, nbytes);
1004
1005 if (skb->len + skb_tailroom(skb) >= nbytes) {
1006 skb_trim(skb, 0);
1007 skb_put(skb, nbytes);
1008 cb->rx_completion(ar, skb,
1009 compl->pipe_info->pipe_num);
1010 } else {
1011 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1012 nbytes,
1013 skb->len + skb_tailroom(skb));
1014 }
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001015 break;
1016 case ATH10K_PCI_COMPL_FREE:
1017 ath10k_warn("free completion cannot be processed\n");
1018 break;
1019 default:
1020 ath10k_warn("invalid completion state (%d)\n",
1021 compl->state);
1022 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001023 }
1024
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001025 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001026
1027 /*
1028 * Add completion back to the pipe's free list.
1029 */
1030 spin_lock_bh(&compl->pipe_info->pipe_lock);
1031 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001032 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1033 }
1034
1035 spin_lock_bh(&ar_pci->compl_lock);
1036 ar_pci->compl_processing = false;
1037 spin_unlock_bh(&ar_pci->compl_lock);
1038}
1039
1040/* TODO - temporary mapping while we have too few CE's */
1041static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1042 u16 service_id, u8 *ul_pipe,
1043 u8 *dl_pipe, int *ul_is_polled,
1044 int *dl_is_polled)
1045{
1046 int ret = 0;
1047
1048 /* polling for received messages not supported */
1049 *dl_is_polled = 0;
1050
1051 switch (service_id) {
1052 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1053 /*
1054 * Host->target HTT gets its own pipe, so it can be polled
1055 * while other pipes are interrupt driven.
1056 */
1057 *ul_pipe = 4;
1058 /*
1059 * Use the same target->host pipe for HTC ctrl, HTC raw
1060 * streams, and HTT.
1061 */
1062 *dl_pipe = 1;
1063 break;
1064
1065 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1066 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1067 /*
1068 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1069 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1070 * WMI services. So, if another CE is needed, change
1071 * this to *ul_pipe = 3, which frees up CE 0.
1072 */
1073 /* *ul_pipe = 3; */
1074 *ul_pipe = 0;
1075 *dl_pipe = 1;
1076 break;
1077
1078 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1079 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1080 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1081 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1082
1083 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1084 *ul_pipe = 3;
1085 *dl_pipe = 2;
1086 break;
1087
1088 /* pipe 5 unused */
1089 /* pipe 6 reserved */
1090 /* pipe 7 reserved */
1091
1092 default:
1093 ret = -1;
1094 break;
1095 }
1096 *ul_is_polled =
1097 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1098
1099 return ret;
1100}
1101
1102static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1103 u8 *ul_pipe, u8 *dl_pipe)
1104{
1105 int ul_is_polled, dl_is_polled;
1106
1107 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1108 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1109 ul_pipe,
1110 dl_pipe,
1111 &ul_is_polled,
1112 &dl_is_polled);
1113}
1114
Michal Kazior87263e52013-08-27 13:08:01 +02001115static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001116 int num)
1117{
1118 struct ath10k *ar = pipe_info->hif_ce_state;
1119 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001120 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001121 struct sk_buff *skb;
1122 dma_addr_t ce_data;
1123 int i, ret = 0;
1124
1125 if (pipe_info->buf_sz == 0)
1126 return 0;
1127
1128 for (i = 0; i < num; i++) {
1129 skb = dev_alloc_skb(pipe_info->buf_sz);
1130 if (!skb) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001131 ath10k_warn("failed to allocate skbuff for pipe %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001132 num);
1133 ret = -ENOMEM;
1134 goto err;
1135 }
1136
1137 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1138
1139 ce_data = dma_map_single(ar->dev, skb->data,
1140 skb->len + skb_tailroom(skb),
1141 DMA_FROM_DEVICE);
1142
1143 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001144 ath10k_warn("failed to DMA map sk_buff\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001145 dev_kfree_skb_any(skb);
1146 ret = -EIO;
1147 goto err;
1148 }
1149
1150 ATH10K_SKB_CB(skb)->paddr = ce_data;
1151
1152 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1153 pipe_info->buf_sz,
1154 PCI_DMA_FROMDEVICE);
1155
1156 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1157 ce_data);
1158 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001159 ath10k_warn("failed to enqueue to pipe %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001160 num, ret);
1161 goto err;
1162 }
1163 }
1164
1165 return ret;
1166
1167err:
1168 ath10k_pci_rx_pipe_cleanup(pipe_info);
1169 return ret;
1170}
1171
1172static int ath10k_pci_post_rx(struct ath10k *ar)
1173{
1174 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001175 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001176 const struct ce_attr *attr;
1177 int pipe_num, ret = 0;
1178
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001179 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001180 pipe_info = &ar_pci->pipe_info[pipe_num];
1181 attr = &host_ce_config_wlan[pipe_num];
1182
1183 if (attr->dest_nentries == 0)
1184 continue;
1185
1186 ret = ath10k_pci_post_rx_pipe(pipe_info,
1187 attr->dest_nentries - 1);
1188 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001189 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1190 pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001191
1192 for (; pipe_num >= 0; pipe_num--) {
1193 pipe_info = &ar_pci->pipe_info[pipe_num];
1194 ath10k_pci_rx_pipe_cleanup(pipe_info);
1195 }
1196 return ret;
1197 }
1198 }
1199
1200 return 0;
1201}
1202
1203static int ath10k_pci_hif_start(struct ath10k *ar)
1204{
1205 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1206 int ret;
1207
1208 ret = ath10k_pci_start_ce(ar);
1209 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001210 ath10k_warn("failed to start CE: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001211 return ret;
1212 }
1213
1214 /* Post buffers once to start things off. */
1215 ret = ath10k_pci_post_rx(ar);
1216 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001217 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1218 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001219 return ret;
1220 }
1221
1222 ar_pci->started = 1;
1223 return 0;
1224}
1225
Michal Kazior87263e52013-08-27 13:08:01 +02001226static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001227{
1228 struct ath10k *ar;
1229 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001230 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001231 u32 buf_sz;
1232 struct sk_buff *netbuf;
1233 u32 ce_data;
1234
1235 buf_sz = pipe_info->buf_sz;
1236
1237 /* Unused Copy Engine */
1238 if (buf_sz == 0)
1239 return;
1240
1241 ar = pipe_info->hif_ce_state;
1242 ar_pci = ath10k_pci_priv(ar);
1243
1244 if (!ar_pci->started)
1245 return;
1246
1247 ce_hdl = pipe_info->ce_hdl;
1248
1249 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1250 &ce_data) == 0) {
1251 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1252 netbuf->len + skb_tailroom(netbuf),
1253 DMA_FROM_DEVICE);
1254 dev_kfree_skb_any(netbuf);
1255 }
1256}
1257
Michal Kazior87263e52013-08-27 13:08:01 +02001258static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001259{
1260 struct ath10k *ar;
1261 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001262 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001263 struct sk_buff *netbuf;
1264 u32 ce_data;
1265 unsigned int nbytes;
1266 unsigned int id;
1267 u32 buf_sz;
1268
1269 buf_sz = pipe_info->buf_sz;
1270
1271 /* Unused Copy Engine */
1272 if (buf_sz == 0)
1273 return;
1274
1275 ar = pipe_info->hif_ce_state;
1276 ar_pci = ath10k_pci_priv(ar);
1277
1278 if (!ar_pci->started)
1279 return;
1280
1281 ce_hdl = pipe_info->ce_hdl;
1282
1283 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1284 &ce_data, &nbytes, &id) == 0) {
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001285 /*
1286 * Indicate the completion to higer layer to free
1287 * the buffer
1288 */
Michal Kazior2415fc12013-11-08 08:01:32 +01001289
1290 if (!netbuf) {
1291 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1292 ce_hdl->id);
1293 continue;
1294 }
1295
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001296 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1297 ar_pci->msg_callbacks_current.tx_completion(ar,
1298 netbuf,
1299 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001300 }
1301}
1302
1303/*
1304 * Cleanup residual buffers for device shutdown:
1305 * buffers that were enqueued for receive
1306 * buffers that were to be sent
1307 * Note: Buffers that had completed but which were
1308 * not yet processed are on a completion queue. They
1309 * are handled when the completion thread shuts down.
1310 */
1311static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1312{
1313 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1314 int pipe_num;
1315
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001316 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001317 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001318
1319 pipe_info = &ar_pci->pipe_info[pipe_num];
1320 ath10k_pci_rx_pipe_cleanup(pipe_info);
1321 ath10k_pci_tx_pipe_cleanup(pipe_info);
1322 }
1323}
1324
1325static void ath10k_pci_ce_deinit(struct ath10k *ar)
1326{
1327 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001328 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001329 int pipe_num;
1330
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001331 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001332 pipe_info = &ar_pci->pipe_info[pipe_num];
1333 if (pipe_info->ce_hdl) {
1334 ath10k_ce_deinit(pipe_info->ce_hdl);
1335 pipe_info->ce_hdl = NULL;
1336 pipe_info->buf_sz = 0;
1337 }
1338 }
1339}
1340
Michal Kazior32270b62013-08-02 09:15:47 +02001341static void ath10k_pci_disable_irqs(struct ath10k *ar)
1342{
1343 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1344 int i;
1345
1346 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1347 disable_irq(ar_pci->pdev->irq + i);
1348}
1349
Kalle Valo5e3dd152013-06-12 20:52:10 +03001350static void ath10k_pci_hif_stop(struct ath10k *ar)
1351{
Michal Kazior32270b62013-08-02 09:15:47 +02001352 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1353
Kalle Valo5e3dd152013-06-12 20:52:10 +03001354 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1355
Michal Kazior32270b62013-08-02 09:15:47 +02001356 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1357 * by ath10k_pci_start_intr(). */
1358 ath10k_pci_disable_irqs(ar);
1359
Kalle Valo5e3dd152013-06-12 20:52:10 +03001360 ath10k_pci_stop_ce(ar);
1361
1362 /* At this point, asynchronous threads are stopped, the target should
1363 * not DMA nor interrupt. We process the leftovers and then free
1364 * everything else up. */
1365
1366 ath10k_pci_process_ce(ar);
1367 ath10k_pci_cleanup_ce(ar);
1368 ath10k_pci_buffer_cleanup(ar);
Michal Kazior32270b62013-08-02 09:15:47 +02001369
Michal Kazior6a42a472013-11-08 08:01:35 +01001370 /* Make the sure the device won't access any structures on the host by
1371 * resetting it. The device was fed with PCI CE ringbuffer
1372 * configuration during init. If ringbuffers are freed and the device
1373 * were to access them this could lead to memory corruption on the
1374 * host. */
1375 ath10k_pci_device_reset(ar);
1376
Michal Kazior32270b62013-08-02 09:15:47 +02001377 ar_pci->started = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001378}
1379
1380static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1381 void *req, u32 req_len,
1382 void *resp, u32 *resp_len)
1383{
1384 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001385 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1386 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1387 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1388 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001389 dma_addr_t req_paddr = 0;
1390 dma_addr_t resp_paddr = 0;
1391 struct bmi_xfer xfer = {};
1392 void *treq, *tresp = NULL;
1393 int ret = 0;
1394
1395 if (resp && !resp_len)
1396 return -EINVAL;
1397
1398 if (resp && resp_len && *resp_len == 0)
1399 return -EINVAL;
1400
1401 treq = kmemdup(req, req_len, GFP_KERNEL);
1402 if (!treq)
1403 return -ENOMEM;
1404
1405 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1406 ret = dma_mapping_error(ar->dev, req_paddr);
1407 if (ret)
1408 goto err_dma;
1409
1410 if (resp && resp_len) {
1411 tresp = kzalloc(*resp_len, GFP_KERNEL);
1412 if (!tresp) {
1413 ret = -ENOMEM;
1414 goto err_req;
1415 }
1416
1417 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1418 DMA_FROM_DEVICE);
1419 ret = dma_mapping_error(ar->dev, resp_paddr);
1420 if (ret)
1421 goto err_req;
1422
1423 xfer.wait_for_resp = true;
1424 xfer.resp_len = 0;
1425
1426 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1427 }
1428
1429 init_completion(&xfer.done);
1430
1431 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1432 if (ret)
1433 goto err_resp;
1434
1435 ret = wait_for_completion_timeout(&xfer.done,
1436 BMI_COMMUNICATION_TIMEOUT_HZ);
1437 if (ret <= 0) {
1438 u32 unused_buffer;
1439 unsigned int unused_nbytes;
1440 unsigned int unused_id;
1441
1442 ret = -ETIMEDOUT;
1443 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1444 &unused_nbytes, &unused_id);
1445 } else {
1446 /* non-zero means we did not time out */
1447 ret = 0;
1448 }
1449
1450err_resp:
1451 if (resp) {
1452 u32 unused_buffer;
1453
1454 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1455 dma_unmap_single(ar->dev, resp_paddr,
1456 *resp_len, DMA_FROM_DEVICE);
1457 }
1458err_req:
1459 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1460
1461 if (ret == 0 && resp_len) {
1462 *resp_len = min(*resp_len, xfer.resp_len);
1463 memcpy(resp, tresp, xfer.resp_len);
1464 }
1465err_dma:
1466 kfree(treq);
1467 kfree(tresp);
1468
1469 return ret;
1470}
1471
Michal Kazior5440ce22013-09-03 15:09:58 +02001472static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001473{
Michal Kazior5440ce22013-09-03 15:09:58 +02001474 struct bmi_xfer *xfer;
1475 u32 ce_data;
1476 unsigned int nbytes;
1477 unsigned int transfer_id;
1478
1479 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1480 &nbytes, &transfer_id))
1481 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001482
1483 if (xfer->wait_for_resp)
1484 return;
1485
1486 complete(&xfer->done);
1487}
1488
Michal Kazior5440ce22013-09-03 15:09:58 +02001489static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001490{
Michal Kazior5440ce22013-09-03 15:09:58 +02001491 struct bmi_xfer *xfer;
1492 u32 ce_data;
1493 unsigned int nbytes;
1494 unsigned int transfer_id;
1495 unsigned int flags;
1496
1497 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1498 &nbytes, &transfer_id, &flags))
1499 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001500
1501 if (!xfer->wait_for_resp) {
1502 ath10k_warn("unexpected: BMI data received; ignoring\n");
1503 return;
1504 }
1505
1506 xfer->resp_len = nbytes;
1507 complete(&xfer->done);
1508}
1509
1510/*
1511 * Map from service/endpoint to Copy Engine.
1512 * This table is derived from the CE_PCI TABLE, above.
1513 * It is passed to the Target at startup for use by firmware.
1514 */
1515static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1516 {
1517 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1518 PIPEDIR_OUT, /* out = UL = host -> target */
1519 3,
1520 },
1521 {
1522 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1523 PIPEDIR_IN, /* in = DL = target -> host */
1524 2,
1525 },
1526 {
1527 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1528 PIPEDIR_OUT, /* out = UL = host -> target */
1529 3,
1530 },
1531 {
1532 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1533 PIPEDIR_IN, /* in = DL = target -> host */
1534 2,
1535 },
1536 {
1537 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1538 PIPEDIR_OUT, /* out = UL = host -> target */
1539 3,
1540 },
1541 {
1542 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1543 PIPEDIR_IN, /* in = DL = target -> host */
1544 2,
1545 },
1546 {
1547 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1548 PIPEDIR_OUT, /* out = UL = host -> target */
1549 3,
1550 },
1551 {
1552 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1553 PIPEDIR_IN, /* in = DL = target -> host */
1554 2,
1555 },
1556 {
1557 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1558 PIPEDIR_OUT, /* out = UL = host -> target */
1559 3,
1560 },
1561 {
1562 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1563 PIPEDIR_IN, /* in = DL = target -> host */
1564 2,
1565 },
1566 {
1567 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1568 PIPEDIR_OUT, /* out = UL = host -> target */
1569 0, /* could be moved to 3 (share with WMI) */
1570 },
1571 {
1572 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1573 PIPEDIR_IN, /* in = DL = target -> host */
1574 1,
1575 },
1576 {
1577 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1578 PIPEDIR_OUT, /* out = UL = host -> target */
1579 0,
1580 },
1581 {
1582 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1583 PIPEDIR_IN, /* in = DL = target -> host */
1584 1,
1585 },
1586 {
1587 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1588 PIPEDIR_OUT, /* out = UL = host -> target */
1589 4,
1590 },
1591 {
1592 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1593 PIPEDIR_IN, /* in = DL = target -> host */
1594 1,
1595 },
1596
1597 /* (Additions here) */
1598
1599 { /* Must be last */
1600 0,
1601 0,
1602 0,
1603 },
1604};
1605
1606/*
1607 * Send an interrupt to the device to wake up the Target CPU
1608 * so it has an opportunity to notice any changed state.
1609 */
1610static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1611{
1612 int ret;
1613 u32 core_ctrl;
1614
1615 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1616 CORE_CTRL_ADDRESS,
1617 &core_ctrl);
1618 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001619 ath10k_warn("failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001620 return ret;
1621 }
1622
1623 /* A_INUM_FIRMWARE interrupt to Target CPU */
1624 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1625
1626 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1627 CORE_CTRL_ADDRESS,
1628 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001629 if (ret) {
1630 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1631 ret);
1632 return ret;
1633 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001634
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001635 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001636}
1637
1638static int ath10k_pci_init_config(struct ath10k *ar)
1639{
1640 u32 interconnect_targ_addr;
1641 u32 pcie_state_targ_addr = 0;
1642 u32 pipe_cfg_targ_addr = 0;
1643 u32 svc_to_pipe_map = 0;
1644 u32 pcie_config_flags = 0;
1645 u32 ealloc_value;
1646 u32 ealloc_targ_addr;
1647 u32 flag2_value;
1648 u32 flag2_targ_addr;
1649 int ret = 0;
1650
1651 /* Download to Target the CE Config and the service-to-CE map */
1652 interconnect_targ_addr =
1653 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1654
1655 /* Supply Target-side CE configuration */
1656 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1657 &pcie_state_targ_addr);
1658 if (ret != 0) {
1659 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1660 return ret;
1661 }
1662
1663 if (pcie_state_targ_addr == 0) {
1664 ret = -EIO;
1665 ath10k_err("Invalid pcie state addr\n");
1666 return ret;
1667 }
1668
1669 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1670 offsetof(struct pcie_state,
1671 pipe_cfg_addr),
1672 &pipe_cfg_targ_addr);
1673 if (ret != 0) {
1674 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1675 return ret;
1676 }
1677
1678 if (pipe_cfg_targ_addr == 0) {
1679 ret = -EIO;
1680 ath10k_err("Invalid pipe cfg addr\n");
1681 return ret;
1682 }
1683
1684 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1685 target_ce_config_wlan,
1686 sizeof(target_ce_config_wlan));
1687
1688 if (ret != 0) {
1689 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1690 return ret;
1691 }
1692
1693 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1694 offsetof(struct pcie_state,
1695 svc_to_pipe_map),
1696 &svc_to_pipe_map);
1697 if (ret != 0) {
1698 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1699 return ret;
1700 }
1701
1702 if (svc_to_pipe_map == 0) {
1703 ret = -EIO;
1704 ath10k_err("Invalid svc_to_pipe map\n");
1705 return ret;
1706 }
1707
1708 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1709 target_service_to_ce_map_wlan,
1710 sizeof(target_service_to_ce_map_wlan));
1711 if (ret != 0) {
1712 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1713 return ret;
1714 }
1715
1716 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1717 offsetof(struct pcie_state,
1718 config_flags),
1719 &pcie_config_flags);
1720 if (ret != 0) {
1721 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1722 return ret;
1723 }
1724
1725 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1726
1727 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1728 offsetof(struct pcie_state, config_flags),
1729 &pcie_config_flags,
1730 sizeof(pcie_config_flags));
1731 if (ret != 0) {
1732 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1733 return ret;
1734 }
1735
1736 /* configure early allocation */
1737 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1738
1739 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1740 if (ret != 0) {
1741 ath10k_err("Faile to get early alloc val: %d\n", ret);
1742 return ret;
1743 }
1744
1745 /* first bank is switched to IRAM */
1746 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1747 HI_EARLY_ALLOC_MAGIC_MASK);
1748 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1749 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1750
1751 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1752 if (ret != 0) {
1753 ath10k_err("Failed to set early alloc val: %d\n", ret);
1754 return ret;
1755 }
1756
1757 /* Tell Target to proceed with initialization */
1758 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1759
1760 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1761 if (ret != 0) {
1762 ath10k_err("Failed to get option val: %d\n", ret);
1763 return ret;
1764 }
1765
1766 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1767
1768 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1769 if (ret != 0) {
1770 ath10k_err("Failed to set option val: %d\n", ret);
1771 return ret;
1772 }
1773
1774 return 0;
1775}
1776
1777
1778
1779static int ath10k_pci_ce_init(struct ath10k *ar)
1780{
1781 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001782 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001783 const struct ce_attr *attr;
1784 int pipe_num;
1785
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001786 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001787 pipe_info = &ar_pci->pipe_info[pipe_num];
1788 pipe_info->pipe_num = pipe_num;
1789 pipe_info->hif_ce_state = ar;
1790 attr = &host_ce_config_wlan[pipe_num];
1791
1792 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1793 if (pipe_info->ce_hdl == NULL) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001794 ath10k_err("failed to initialize CE for pipe: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001795 pipe_num);
1796
1797 /* It is safe to call it here. It checks if ce_hdl is
1798 * valid for each pipe */
1799 ath10k_pci_ce_deinit(ar);
1800 return -1;
1801 }
1802
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001803 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001804 /*
1805 * Reserve the ultimate CE for
1806 * diagnostic Window support
1807 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001808 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001809 continue;
1810 }
1811
1812 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1813 }
1814
Kalle Valo5e3dd152013-06-12 20:52:10 +03001815 return 0;
1816}
1817
1818static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1819{
1820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1821 u32 fw_indicator_address, fw_indicator;
1822
1823 ath10k_pci_wake(ar);
1824
1825 fw_indicator_address = ar_pci->fw_indicator_address;
1826 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1827
1828 if (fw_indicator & FW_IND_EVENT_PENDING) {
1829 /* ACK: clear Target-side pending event */
1830 ath10k_pci_write32(ar, fw_indicator_address,
1831 fw_indicator & ~FW_IND_EVENT_PENDING);
1832
1833 if (ar_pci->started) {
1834 ath10k_pci_hif_dump_area(ar);
1835 } else {
1836 /*
1837 * Probable Target failure before we're prepared
1838 * to handle it. Generally unexpected.
1839 */
1840 ath10k_warn("early firmware event indicated\n");
1841 }
1842 }
1843
1844 ath10k_pci_sleep(ar);
1845}
1846
Michal Kazior98563d52013-11-08 08:01:33 +01001847static void ath10k_pci_start_bmi(struct ath10k *ar)
1848{
1849 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1850 struct ath10k_pci_pipe *pipe;
1851
1852 /*
1853 * Initially, establish CE completion handlers for use with BMI.
1854 * These are overwritten with generic handlers after we exit BMI phase.
1855 */
1856 pipe = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1857 ath10k_ce_send_cb_register(pipe->ce_hdl, ath10k_pci_bmi_send_done, 0);
1858
1859 pipe = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1860 ath10k_ce_recv_cb_register(pipe->ce_hdl, ath10k_pci_bmi_recv_data);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001861
1862 ath10k_dbg(ATH10K_DBG_BOOT, "boot start bmi\n");
Michal Kazior98563d52013-11-08 08:01:33 +01001863}
1864
Michal Kazior8c5c5362013-07-16 09:38:50 +02001865static int ath10k_pci_hif_power_up(struct ath10k *ar)
1866{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001867 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001868 const char *irq_mode;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001869 int ret;
1870
1871 /*
1872 * Bring the target up cleanly.
1873 *
1874 * The target may be in an undefined state with an AUX-powered Target
1875 * and a Host in WoW mode. If the Host crashes, loses power, or is
1876 * restarted (without unloading the driver) then the Target is left
1877 * (aux) powered and running. On a subsequent driver load, the Target
1878 * is in an unexpected state. We try to catch that here in order to
1879 * reset the Target and retry the probe.
1880 */
Michal Kazior5b2589f2013-11-08 08:01:30 +01001881 ret = ath10k_pci_device_reset(ar);
1882 if (ret) {
1883 ath10k_err("failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001884 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01001885 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001886
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001887 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001888 /* Force AWAKE forever */
Michal Kazior8c5c5362013-07-16 09:38:50 +02001889 ath10k_do_pci_wake(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001890
1891 ret = ath10k_pci_ce_init(ar);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001892 if (ret) {
1893 ath10k_err("failed to initialize CE: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001894 goto err_ps;
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001895 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001896
Michal Kazior98563d52013-11-08 08:01:33 +01001897 ret = ath10k_ce_disable_interrupts(ar);
1898 if (ret) {
1899 ath10k_err("failed to disable CE interrupts: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001900 goto err_ce;
Michal Kazior98563d52013-11-08 08:01:33 +01001901 }
1902
1903 ret = ath10k_pci_start_intr(ar);
1904 if (ret) {
1905 ath10k_err("failed to start interrupt handling: %d\n", ret);
1906 goto err_ce;
1907 }
1908
1909 ret = ath10k_pci_wait_for_target_init(ar);
1910 if (ret) {
1911 ath10k_err("failed to wait for target to init: %d\n", ret);
1912 goto err_irq;
1913 }
1914
1915 ret = ath10k_ce_enable_err_irq(ar);
1916 if (ret) {
1917 ath10k_err("failed to enable CE error irq: %d\n", ret);
1918 goto err_irq;
1919 }
1920
1921 ret = ath10k_pci_init_config(ar);
1922 if (ret) {
1923 ath10k_err("failed to setup init config: %d\n", ret);
1924 goto err_irq;
1925 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001926
1927 ret = ath10k_pci_wake_target_cpu(ar);
1928 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001929 ath10k_err("could not wake up target CPU: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001930 goto err_irq;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001931 }
1932
Michal Kazior98563d52013-11-08 08:01:33 +01001933 ath10k_pci_start_bmi(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001934
1935 if (ar_pci->num_msi_intrs > 1)
1936 irq_mode = "MSI-X";
1937 else if (ar_pci->num_msi_intrs == 1)
1938 irq_mode = "MSI";
1939 else
1940 irq_mode = "legacy";
1941
Kalle Valo650b91f2013-11-20 10:00:49 +02001942 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1943 ath10k_info("pci irq %s\n", irq_mode);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001944
Michal Kazior8c5c5362013-07-16 09:38:50 +02001945 return 0;
1946
Michal Kazior98563d52013-11-08 08:01:33 +01001947err_irq:
1948 ath10k_ce_disable_interrupts(ar);
1949 ath10k_pci_stop_intr(ar);
1950 ath10k_pci_kill_tasklet(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01001951 ath10k_pci_device_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001952err_ce:
1953 ath10k_pci_ce_deinit(ar);
1954err_ps:
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001955 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001956 ath10k_do_pci_sleep(ar);
1957err:
1958 return ret;
1959}
1960
1961static void ath10k_pci_hif_power_down(struct ath10k *ar)
1962{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001963 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1964
Michal Kazior32270b62013-08-02 09:15:47 +02001965 ath10k_pci_stop_intr(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01001966 ath10k_pci_device_reset(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001967
Michal Kazior8c5c5362013-07-16 09:38:50 +02001968 ath10k_pci_ce_deinit(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001969 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001970 ath10k_do_pci_sleep(ar);
1971}
1972
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001973#ifdef CONFIG_PM
1974
1975#define ATH10K_PCI_PM_CONTROL 0x44
1976
1977static int ath10k_pci_hif_suspend(struct ath10k *ar)
1978{
1979 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1980 struct pci_dev *pdev = ar_pci->pdev;
1981 u32 val;
1982
1983 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1984
1985 if ((val & 0x000000ff) != 0x3) {
1986 pci_save_state(pdev);
1987 pci_disable_device(pdev);
1988 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1989 (val & 0xffffff00) | 0x03);
1990 }
1991
1992 return 0;
1993}
1994
1995static int ath10k_pci_hif_resume(struct ath10k *ar)
1996{
1997 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1998 struct pci_dev *pdev = ar_pci->pdev;
1999 u32 val;
2000
2001 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2002
2003 if ((val & 0x000000ff) != 0) {
2004 pci_restore_state(pdev);
2005 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2006 val & 0xffffff00);
2007 /*
2008 * Suspend/Resume resets the PCI configuration space,
2009 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2010 * to keep PCI Tx retries from interfering with C3 CPU state
2011 */
2012 pci_read_config_dword(pdev, 0x40, &val);
2013
2014 if ((val & 0x0000ff00) != 0)
2015 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2016 }
2017
2018 return 0;
2019}
2020#endif
2021
Kalle Valo5e3dd152013-06-12 20:52:10 +03002022static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2023 .send_head = ath10k_pci_hif_send_head,
2024 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2025 .start = ath10k_pci_hif_start,
2026 .stop = ath10k_pci_hif_stop,
2027 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2028 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2029 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002030 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002031 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002032 .power_up = ath10k_pci_hif_power_up,
2033 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002034#ifdef CONFIG_PM
2035 .suspend = ath10k_pci_hif_suspend,
2036 .resume = ath10k_pci_hif_resume,
2037#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002038};
2039
2040static void ath10k_pci_ce_tasklet(unsigned long ptr)
2041{
Michal Kazior87263e52013-08-27 13:08:01 +02002042 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002043 struct ath10k_pci *ar_pci = pipe->ar_pci;
2044
2045 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2046}
2047
2048static void ath10k_msi_err_tasklet(unsigned long data)
2049{
2050 struct ath10k *ar = (struct ath10k *)data;
2051
2052 ath10k_pci_fw_interrupt_handler(ar);
2053}
2054
2055/*
2056 * Handler for a per-engine interrupt on a PARTICULAR CE.
2057 * This is used in cases where each CE has a private MSI interrupt.
2058 */
2059static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2060{
2061 struct ath10k *ar = arg;
2062 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2063 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2064
Dan Carpentere5742672013-06-18 10:28:46 +03002065 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002066 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2067 return IRQ_HANDLED;
2068 }
2069
2070 /*
2071 * NOTE: We are able to derive ce_id from irq because we
2072 * use a one-to-one mapping for CE's 0..5.
2073 * CE's 6 & 7 do not use interrupts at all.
2074 *
2075 * This mapping must be kept in sync with the mapping
2076 * used by firmware.
2077 */
2078 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2079 return IRQ_HANDLED;
2080}
2081
2082static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2083{
2084 struct ath10k *ar = arg;
2085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2086
2087 tasklet_schedule(&ar_pci->msi_fw_err);
2088 return IRQ_HANDLED;
2089}
2090
2091/*
2092 * Top-level interrupt handler for all PCI interrupts from a Target.
2093 * When a block of MSI interrupts is allocated, this top-level handler
2094 * is not used; instead, we directly call the correct sub-handler.
2095 */
2096static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2097{
2098 struct ath10k *ar = arg;
2099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2100
2101 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002102 if (!ath10k_pci_irq_pending(ar))
2103 return IRQ_NONE;
2104
Kalle Valo5e3dd152013-06-12 20:52:10 +03002105 /*
2106 * IMPORTANT: INTR_CLR regiser has to be set after
2107 * INTR_ENABLE is set to 0, otherwise interrupt can not be
2108 * really cleared.
2109 */
2110 iowrite32(0, ar_pci->mem +
2111 (SOC_CORE_BASE_ADDRESS |
2112 PCIE_INTR_ENABLE_ADDRESS));
2113 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2114 PCIE_INTR_CE_MASK_ALL,
2115 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2116 PCIE_INTR_CLR_ADDRESS));
2117 /*
2118 * IMPORTANT: this extra read transaction is required to
2119 * flush the posted write buffer.
2120 */
2121 (void) ioread32(ar_pci->mem +
2122 (SOC_CORE_BASE_ADDRESS |
2123 PCIE_INTR_ENABLE_ADDRESS));
2124 }
2125
2126 tasklet_schedule(&ar_pci->intr_tq);
2127
2128 return IRQ_HANDLED;
2129}
2130
2131static void ath10k_pci_tasklet(unsigned long data)
2132{
2133 struct ath10k *ar = (struct ath10k *)data;
2134 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2135
2136 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2137 ath10k_ce_per_engine_service_any(ar);
2138
2139 if (ar_pci->num_msi_intrs == 0) {
2140 /* Enable Legacy PCI line interrupts */
2141 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2142 PCIE_INTR_CE_MASK_ALL,
2143 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2144 PCIE_INTR_ENABLE_ADDRESS));
2145 /*
2146 * IMPORTANT: this extra read transaction is required to
2147 * flush the posted write buffer
2148 */
2149 (void) ioread32(ar_pci->mem +
2150 (SOC_CORE_BASE_ADDRESS |
2151 PCIE_INTR_ENABLE_ADDRESS));
2152 }
2153}
2154
2155static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2156{
2157 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2158 int ret;
2159 int i;
2160
2161 ret = pci_enable_msi_block(ar_pci->pdev, num);
2162 if (ret)
2163 return ret;
2164
2165 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2166 ath10k_pci_msi_fw_handler,
2167 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002168 if (ret) {
2169 ath10k_warn("request_irq(%d) failed %d\n",
2170 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2171
2172 pci_disable_msi(ar_pci->pdev);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002173 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002174 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002175
2176 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2177 ret = request_irq(ar_pci->pdev->irq + i,
2178 ath10k_pci_per_engine_handler,
2179 IRQF_SHARED, "ath10k_pci", ar);
2180 if (ret) {
2181 ath10k_warn("request_irq(%d) failed %d\n",
2182 ar_pci->pdev->irq + i, ret);
2183
Michal Kazior87b14232013-06-26 08:50:50 +02002184 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2185 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002186
Michal Kazior87b14232013-06-26 08:50:50 +02002187 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002188 pci_disable_msi(ar_pci->pdev);
2189 return ret;
2190 }
2191 }
2192
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002193 ath10k_dbg(ATH10K_DBG_BOOT,
2194 "MSI-X interrupt handling (%d intrs)\n", num);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002195 return 0;
2196}
2197
2198static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2199{
2200 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2201 int ret;
2202
2203 ret = pci_enable_msi(ar_pci->pdev);
2204 if (ret < 0)
2205 return ret;
2206
2207 ret = request_irq(ar_pci->pdev->irq,
2208 ath10k_pci_interrupt_handler,
2209 IRQF_SHARED, "ath10k_pci", ar);
2210 if (ret < 0) {
2211 pci_disable_msi(ar_pci->pdev);
2212 return ret;
2213 }
2214
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002215 ath10k_dbg(ATH10K_DBG_BOOT, "MSI interrupt handling\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002216 return 0;
2217}
2218
2219static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2220{
2221 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2222 int ret;
2223
2224 ret = request_irq(ar_pci->pdev->irq,
2225 ath10k_pci_interrupt_handler,
2226 IRQF_SHARED, "ath10k_pci", ar);
2227 if (ret < 0)
2228 return ret;
2229
Michal Kazior98563d52013-11-08 08:01:33 +01002230 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002231 if (ret) {
Kalle Valof3782742013-10-17 11:36:15 +03002232 free_irq(ar_pci->pdev->irq, ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002233 ath10k_err("failed to wake up target: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002234 return ret;
2235 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002236
2237 /*
2238 * A potential race occurs here: The CORE_BASE write
2239 * depends on target correctly decoding AXI address but
2240 * host won't know when target writes BAR to CORE_CTRL.
2241 * This write might get lost if target has NOT written BAR.
2242 * For now, fix the race by repeating the write in below
2243 * synchronization checking.
2244 */
2245 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2246 PCIE_INTR_CE_MASK_ALL,
2247 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2248 PCIE_INTR_ENABLE_ADDRESS));
Kalle Valo5e3dd152013-06-12 20:52:10 +03002249
Michal Kazior98563d52013-11-08 08:01:33 +01002250 ath10k_pci_sleep(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002251 ath10k_dbg(ATH10K_DBG_BOOT, "legacy interrupt handling\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002252 return 0;
2253}
2254
2255static int ath10k_pci_start_intr(struct ath10k *ar)
2256{
2257 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2258 int num = MSI_NUM_REQUEST;
2259 int ret;
2260 int i;
2261
2262 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2263 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2264 (unsigned long) ar);
2265
2266 for (i = 0; i < CE_COUNT; i++) {
2267 ar_pci->pipe_info[i].ar_pci = ar_pci;
2268 tasklet_init(&ar_pci->pipe_info[i].intr,
2269 ath10k_pci_ce_tasklet,
2270 (unsigned long)&ar_pci->pipe_info[i]);
2271 }
2272
2273 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2274 num = 1;
2275
2276 if (num > 1) {
2277 ret = ath10k_pci_start_intr_msix(ar, num);
2278 if (ret == 0)
2279 goto exit;
2280
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002281 ath10k_dbg(ATH10K_DBG_BOOT,
2282 "MSI-X didn't succeed (%d), trying MSI\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002283 num = 1;
2284 }
2285
2286 if (num == 1) {
2287 ret = ath10k_pci_start_intr_msi(ar);
2288 if (ret == 0)
2289 goto exit;
2290
Kalle Valo95cbb6a2013-11-20 10:00:35 +02002291 ath10k_dbg(ATH10K_DBG_BOOT,
2292 "MSI didn't succeed (%d), trying legacy INTR\n",
2293 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002294 num = 0;
2295 }
2296
2297 ret = ath10k_pci_start_intr_legacy(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002298 if (ret) {
2299 ath10k_warn("Failed to start legacy interrupts: %d\n", ret);
2300 return ret;
2301 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002302
2303exit:
2304 ar_pci->num_msi_intrs = num;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002305 return ret;
2306}
2307
2308static void ath10k_pci_stop_intr(struct ath10k *ar)
2309{
2310 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2311 int i;
2312
2313 /* There's at least one interrupt irregardless whether its legacy INTR
2314 * or MSI or MSI-X */
2315 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2316 free_irq(ar_pci->pdev->irq + i, ar);
2317
2318 if (ar_pci->num_msi_intrs > 0)
2319 pci_disable_msi(ar_pci->pdev);
2320}
2321
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002322static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002323{
2324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2325 int wait_limit = 300; /* 3 sec */
Kalle Valof3782742013-10-17 11:36:15 +03002326 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002327
Michal Kazior98563d52013-11-08 08:01:33 +01002328 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002329 if (ret) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002330 ath10k_err("failed to wake up target: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002331 return ret;
2332 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002333
2334 while (wait_limit-- &&
2335 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2336 FW_IND_INITIALIZED)) {
2337 if (ar_pci->num_msi_intrs == 0)
2338 /* Fix potential race by repeating CORE_BASE writes */
2339 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2340 PCIE_INTR_CE_MASK_ALL,
2341 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2342 PCIE_INTR_ENABLE_ADDRESS));
2343 mdelay(10);
2344 }
2345
2346 if (wait_limit < 0) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002347 ath10k_err("target stalled\n");
2348 ret = -EIO;
2349 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002350 }
2351
Michal Kazior5b2589f2013-11-08 08:01:30 +01002352out:
Michal Kazior98563d52013-11-08 08:01:33 +01002353 ath10k_pci_sleep(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002354 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002355}
2356
Michal Kazior5b2589f2013-11-08 08:01:30 +01002357static int ath10k_pci_device_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002358{
Michal Kazior5b2589f2013-11-08 08:01:30 +01002359 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002360 u32 val;
2361
Michal Kazior5b2589f2013-11-08 08:01:30 +01002362 ret = ath10k_do_pci_wake(ar);
2363 if (ret) {
2364 ath10k_err("failed to wake up target: %d\n",
2365 ret);
2366 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002367 }
2368
2369 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002370 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002371 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002372 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002373
2374 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002375 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002376 RTC_STATE_COLD_RESET_MASK)
2377 break;
2378 msleep(1);
2379 }
2380
2381 /* Pull Target, including PCIe, out of RESET. */
2382 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002383 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002384
2385 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002386 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002387 RTC_STATE_COLD_RESET_MASK))
2388 break;
2389 msleep(1);
2390 }
2391
Michal Kazior5b2589f2013-11-08 08:01:30 +01002392 ath10k_do_pci_sleep(ar);
2393 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002394}
2395
2396static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2397{
2398 int i;
2399
2400 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2401 if (!test_bit(i, ar_pci->features))
2402 continue;
2403
2404 switch (i) {
2405 case ATH10K_PCI_FEATURE_MSI_X:
Kalle Valo24cfade2013-09-08 17:55:50 +03002406 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002407 break;
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002408 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
Kalle Valo24cfade2013-09-08 17:55:50 +03002409 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002410 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002411 }
2412 }
2413}
2414
2415static int ath10k_pci_probe(struct pci_dev *pdev,
2416 const struct pci_device_id *pci_dev)
2417{
2418 void __iomem *mem;
2419 int ret = 0;
2420 struct ath10k *ar;
2421 struct ath10k_pci *ar_pci;
Kalle Valoe01ae682013-09-01 11:22:14 +03002422 u32 lcr_val, chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002423
2424 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2425
2426 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2427 if (ar_pci == NULL)
2428 return -ENOMEM;
2429
2430 ar_pci->pdev = pdev;
2431 ar_pci->dev = &pdev->dev;
2432
2433 switch (pci_dev->device) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002434 case QCA988X_2_0_DEVICE_ID:
2435 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2436 break;
2437 default:
2438 ret = -ENODEV;
2439 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2440 goto err_ar_pci;
2441 }
2442
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002443 if (ath10k_target_ps)
2444 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2445
Kalle Valo5e3dd152013-06-12 20:52:10 +03002446 ath10k_pci_dump_features(ar_pci);
2447
Michal Kazior3a0861f2013-07-05 16:15:06 +03002448 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002449 if (!ar) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002450 ath10k_err("failed to create driver core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002451 ret = -EINVAL;
2452 goto err_ar_pci;
2453 }
2454
Kalle Valo5e3dd152013-06-12 20:52:10 +03002455 ar_pci->ar = ar;
2456 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2457 atomic_set(&ar_pci->keep_awake_count, 0);
2458
2459 pci_set_drvdata(pdev, ar);
2460
2461 /*
2462 * Without any knowledge of the Host, the Target may have been reset or
2463 * power cycled and its Config Space may no longer reflect the PCI
2464 * address space that was assigned earlier by the PCI infrastructure.
2465 * Refresh it now.
2466 */
2467 ret = pci_assign_resource(pdev, BAR_NUM);
2468 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002469 ath10k_err("failed to assign PCI space: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002470 goto err_ar;
2471 }
2472
2473 ret = pci_enable_device(pdev);
2474 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002475 ath10k_err("failed to enable PCI device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002476 goto err_ar;
2477 }
2478
2479 /* Request MMIO resources */
2480 ret = pci_request_region(pdev, BAR_NUM, "ath");
2481 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002482 ath10k_err("failed to request MMIO region: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002483 goto err_device;
2484 }
2485
2486 /*
2487 * Target structures have a limit of 32 bit DMA pointers.
2488 * DMA pointers can be wider than 32 bits by default on some systems.
2489 */
2490 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2491 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002492 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002493 goto err_region;
2494 }
2495
2496 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2497 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002498 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002499 goto err_region;
2500 }
2501
2502 /* Set bus master bit in PCI_COMMAND to enable DMA */
2503 pci_set_master(pdev);
2504
2505 /*
2506 * Temporary FIX: disable ASPM
2507 * Will be removed after the OTP is programmed
2508 */
2509 pci_read_config_dword(pdev, 0x80, &lcr_val);
2510 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2511
2512 /* Arrange for access to Target SoC registers. */
2513 mem = pci_iomap(pdev, BAR_NUM, 0);
2514 if (!mem) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002515 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002516 ret = -EIO;
2517 goto err_master;
2518 }
2519
2520 ar_pci->mem = mem;
2521
2522 spin_lock_init(&ar_pci->ce_lock);
2523
Kalle Valoe01ae682013-09-01 11:22:14 +03002524 ret = ath10k_do_pci_wake(ar);
2525 if (ret) {
2526 ath10k_err("Failed to get chip id: %d\n", ret);
Wei Yongjun12eb0872013-10-30 13:24:39 +08002527 goto err_iomap;
Kalle Valoe01ae682013-09-01 11:22:14 +03002528 }
2529
Kalle Valo233eb972013-10-16 16:46:11 +03002530 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Kalle Valoe01ae682013-09-01 11:22:14 +03002531
2532 ath10k_do_pci_sleep(ar);
2533
Kalle Valo24cfade2013-09-08 17:55:50 +03002534 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2535
Kalle Valoe01ae682013-09-01 11:22:14 +03002536 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002537 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002538 ath10k_err("failed to register driver core: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02002539 goto err_iomap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002540 }
2541
2542 return 0;
2543
Kalle Valo5e3dd152013-06-12 20:52:10 +03002544err_iomap:
2545 pci_iounmap(pdev, mem);
2546err_master:
2547 pci_clear_master(pdev);
2548err_region:
2549 pci_release_region(pdev, BAR_NUM);
2550err_device:
2551 pci_disable_device(pdev);
2552err_ar:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002553 ath10k_core_destroy(ar);
2554err_ar_pci:
2555 /* call HIF PCI free here */
2556 kfree(ar_pci);
2557
2558 return ret;
2559}
2560
2561static void ath10k_pci_remove(struct pci_dev *pdev)
2562{
2563 struct ath10k *ar = pci_get_drvdata(pdev);
2564 struct ath10k_pci *ar_pci;
2565
2566 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2567
2568 if (!ar)
2569 return;
2570
2571 ar_pci = ath10k_pci_priv(ar);
2572
2573 if (!ar_pci)
2574 return;
2575
2576 tasklet_kill(&ar_pci->msi_fw_err);
2577
2578 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002579
Kalle Valo5e3dd152013-06-12 20:52:10 +03002580 pci_iounmap(pdev, ar_pci->mem);
2581 pci_release_region(pdev, BAR_NUM);
2582 pci_clear_master(pdev);
2583 pci_disable_device(pdev);
2584
2585 ath10k_core_destroy(ar);
2586 kfree(ar_pci);
2587}
2588
Kalle Valo5e3dd152013-06-12 20:52:10 +03002589MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2590
2591static struct pci_driver ath10k_pci_driver = {
2592 .name = "ath10k_pci",
2593 .id_table = ath10k_pci_id_table,
2594 .probe = ath10k_pci_probe,
2595 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002596};
2597
2598static int __init ath10k_pci_init(void)
2599{
2600 int ret;
2601
2602 ret = pci_register_driver(&ath10k_pci_driver);
2603 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002604 ath10k_err("failed to register PCI driver: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002605
2606 return ret;
2607}
2608module_init(ath10k_pci_init);
2609
2610static void __exit ath10k_pci_exit(void)
2611{
2612 pci_unregister_driver(&ath10k_pci_driver);
2613}
2614
2615module_exit(ath10k_pci_exit);
2616
2617MODULE_AUTHOR("Qualcomm Atheros");
2618MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2619MODULE_LICENSE("Dual BSD/GPL");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002620MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2621MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2622MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);