blob: bfe856166bb0dc516486cddefcb62199794fd725 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22
23#include "core.h"
24#include "debug.h"
25
26#include "targaddrs.h"
27#include "bmi.h"
28
29#include "hif.h"
30#include "htc.h"
31
32#include "ce.h"
33#include "pci.h"
34
35unsigned int ath10k_target_ps;
36module_param(ath10k_target_ps, uint, 0644);
37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39#define QCA988X_1_0_DEVICE_ID (0xabcd)
40#define QCA988X_2_0_DEVICE_ID (0x003c)
41
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 {0}
46};
47
48static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 u32 *data);
50
51static void ath10k_pci_process_ce(struct ath10k *ar);
52static int ath10k_pci_post_rx(struct ath10k *ar);
53static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54 int num);
55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56static void ath10k_pci_stop_ce(struct ath10k *ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +020057static void ath10k_pci_device_reset(struct ath10k *ar);
58static int ath10k_pci_reset_target(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030059
60static const struct ce_attr host_ce_config_wlan[] = {
61 /* host->target HTC control and raw streams */
62 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
63 /* could be moved to share CE3 */
64 /* target->host HTT + HTC control */
65 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
66 /* target->host WMI */
67 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
68 /* host->target WMI */
69 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
70 /* host->target HTT */
71 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
72 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
73 /* unused */
74 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75 /* Target autonomous hif_memcpy */
76 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
77 /* ce_diag, the Diagnostic Window */
78 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
79};
80
81/* Target firmware's Copy Engine configuration. */
82static const struct ce_pipe_config target_ce_config_wlan[] = {
83 /* host->target HTC control and raw streams */
84 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
85 /* target->host HTT + HTC control */
86 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
87 /* target->host WMI */
88 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
89 /* host->target WMI */
90 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
91 /* host->target HTT */
92 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
93 /* NB: 50% of src nentries, since tx has 2 frags */
94 /* unused */
95 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
96 /* Reserved for target autonomous hif_memcpy */
97 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
98 /* CE7 used only by Host */
99};
100
101/*
102 * Diagnostic read/write access is provided for startup/config/debug usage.
103 * Caller must guarantee proper alignment, when applicable, and single user
104 * at any moment.
105 */
106static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
107 int nbytes)
108{
109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
110 int ret = 0;
111 u32 buf;
112 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
113 unsigned int id;
114 unsigned int flags;
115 struct ce_state *ce_diag;
116 /* Host buffer address in CE space */
117 u32 ce_data;
118 dma_addr_t ce_data_base = 0;
119 void *data_buf = NULL;
120 int i;
121
122 /*
123 * This code cannot handle reads to non-memory space. Redirect to the
124 * register read fn but preserve the multi word read capability of
125 * this fn
126 */
127 if (address < DRAM_BASE_ADDRESS) {
128 if (!IS_ALIGNED(address, 4) ||
129 !IS_ALIGNED((unsigned long)data, 4))
130 return -EIO;
131
132 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
133 ar, address, (u32 *)data)) == 0)) {
134 nbytes -= sizeof(u32);
135 address += sizeof(u32);
136 data += sizeof(u32);
137 }
138 return ret;
139 }
140
141 ce_diag = ar_pci->ce_diag;
142
143 /*
144 * Allocate a temporary bounce buffer to hold caller's data
145 * to be DMA'ed from Target. This guarantees
146 * 1) 4-byte alignment
147 * 2) Buffer in DMA-able space
148 */
149 orig_nbytes = nbytes;
150 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
151 orig_nbytes,
152 &ce_data_base);
153
154 if (!data_buf) {
155 ret = -ENOMEM;
156 goto done;
157 }
158 memset(data_buf, 0, orig_nbytes);
159
160 remaining_bytes = orig_nbytes;
161 ce_data = ce_data_base;
162 while (remaining_bytes) {
163 nbytes = min_t(unsigned int, remaining_bytes,
164 DIAG_TRANSFER_LIMIT);
165
166 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
167 if (ret != 0)
168 goto done;
169
170 /* Request CE to send from Target(!) address to Host buffer */
171 /*
172 * The address supplied by the caller is in the
173 * Target CPU virtual address space.
174 *
175 * In order to use this address with the diagnostic CE,
176 * convert it from Target CPU virtual address space
177 * to CE address space
178 */
179 ath10k_pci_wake(ar);
180 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
181 address);
182 ath10k_pci_sleep(ar);
183
184 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
185 0);
186 if (ret)
187 goto done;
188
189 i = 0;
190 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
191 &completed_nbytes,
192 &id) != 0) {
193 mdelay(1);
194 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
195 ret = -EBUSY;
196 goto done;
197 }
198 }
199
200 if (nbytes != completed_nbytes) {
201 ret = -EIO;
202 goto done;
203 }
204
205 if (buf != (u32) address) {
206 ret = -EIO;
207 goto done;
208 }
209
210 i = 0;
211 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
212 &completed_nbytes,
213 &id, &flags) != 0) {
214 mdelay(1);
215
216 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
217 ret = -EBUSY;
218 goto done;
219 }
220 }
221
222 if (nbytes != completed_nbytes) {
223 ret = -EIO;
224 goto done;
225 }
226
227 if (buf != ce_data) {
228 ret = -EIO;
229 goto done;
230 }
231
232 remaining_bytes -= nbytes;
233 address += nbytes;
234 ce_data += nbytes;
235 }
236
237done:
238 if (ret == 0) {
239 /* Copy data from allocated DMA buf to caller's buf */
240 WARN_ON_ONCE(orig_nbytes & 3);
241 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
242 ((u32 *)data)[i] =
243 __le32_to_cpu(((__le32 *)data_buf)[i]);
244 }
245 } else
246 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
247 __func__, address);
248
249 if (data_buf)
250 pci_free_consistent(ar_pci->pdev, orig_nbytes,
251 data_buf, ce_data_base);
252
253 return ret;
254}
255
256/* Read 4-byte aligned data from Target memory or register */
257static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
258 u32 *data)
259{
260 /* Assume range doesn't cross this boundary */
261 if (address >= DRAM_BASE_ADDRESS)
262 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
263
264 ath10k_pci_wake(ar);
265 *data = ath10k_pci_read32(ar, address);
266 ath10k_pci_sleep(ar);
267 return 0;
268}
269
270static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
271 const void *data, int nbytes)
272{
273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
274 int ret = 0;
275 u32 buf;
276 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
277 unsigned int id;
278 unsigned int flags;
279 struct ce_state *ce_diag;
280 void *data_buf = NULL;
281 u32 ce_data; /* Host buffer address in CE space */
282 dma_addr_t ce_data_base = 0;
283 int i;
284
285 ce_diag = ar_pci->ce_diag;
286
287 /*
288 * Allocate a temporary bounce buffer to hold caller's data
289 * to be DMA'ed to Target. This guarantees
290 * 1) 4-byte alignment
291 * 2) Buffer in DMA-able space
292 */
293 orig_nbytes = nbytes;
294 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
295 orig_nbytes,
296 &ce_data_base);
297 if (!data_buf) {
298 ret = -ENOMEM;
299 goto done;
300 }
301
302 /* Copy caller's data to allocated DMA buf */
303 WARN_ON_ONCE(orig_nbytes & 3);
304 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
305 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
306
307 /*
308 * The address supplied by the caller is in the
309 * Target CPU virtual address space.
310 *
311 * In order to use this address with the diagnostic CE,
312 * convert it from
313 * Target CPU virtual address space
314 * to
315 * CE address space
316 */
317 ath10k_pci_wake(ar);
318 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
319 ath10k_pci_sleep(ar);
320
321 remaining_bytes = orig_nbytes;
322 ce_data = ce_data_base;
323 while (remaining_bytes) {
324 /* FIXME: check cast */
325 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
326
327 /* Set up to receive directly into Target(!) address */
328 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
329 if (ret != 0)
330 goto done;
331
332 /*
333 * Request CE to send caller-supplied data that
334 * was copied to bounce buffer to Target(!) address.
335 */
336 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
337 nbytes, 0, 0);
338 if (ret != 0)
339 goto done;
340
341 i = 0;
342 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
343 &completed_nbytes,
344 &id) != 0) {
345 mdelay(1);
346
347 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
348 ret = -EBUSY;
349 goto done;
350 }
351 }
352
353 if (nbytes != completed_nbytes) {
354 ret = -EIO;
355 goto done;
356 }
357
358 if (buf != ce_data) {
359 ret = -EIO;
360 goto done;
361 }
362
363 i = 0;
364 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
365 &completed_nbytes,
366 &id, &flags) != 0) {
367 mdelay(1);
368
369 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
370 ret = -EBUSY;
371 goto done;
372 }
373 }
374
375 if (nbytes != completed_nbytes) {
376 ret = -EIO;
377 goto done;
378 }
379
380 if (buf != address) {
381 ret = -EIO;
382 goto done;
383 }
384
385 remaining_bytes -= nbytes;
386 address += nbytes;
387 ce_data += nbytes;
388 }
389
390done:
391 if (data_buf) {
392 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
393 ce_data_base);
394 }
395
396 if (ret != 0)
397 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
398 address);
399
400 return ret;
401}
402
403/* Write 4B data to Target memory or register */
404static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
405 u32 data)
406{
407 /* Assume range doesn't cross this boundary */
408 if (address >= DRAM_BASE_ADDRESS)
409 return ath10k_pci_diag_write_mem(ar, address, &data,
410 sizeof(u32));
411
412 ath10k_pci_wake(ar);
413 ath10k_pci_write32(ar, address, data);
414 ath10k_pci_sleep(ar);
415 return 0;
416}
417
418static bool ath10k_pci_target_is_awake(struct ath10k *ar)
419{
420 void __iomem *mem = ath10k_pci_priv(ar)->mem;
421 u32 val;
422 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
423 RTC_STATE_ADDRESS);
424 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
425}
426
427static void ath10k_pci_wait(struct ath10k *ar)
428{
429 int n = 100;
430
431 while (n-- && !ath10k_pci_target_is_awake(ar))
432 msleep(10);
433
434 if (n < 0)
435 ath10k_warn("Unable to wakeup target\n");
436}
437
438void ath10k_do_pci_wake(struct ath10k *ar)
439{
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441 void __iomem *pci_addr = ar_pci->mem;
442 int tot_delay = 0;
443 int curr_delay = 5;
444
445 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
446 /* Force AWAKE */
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
450 }
451 atomic_inc(&ar_pci->keep_awake_count);
452
453 if (ar_pci->verified_awake)
454 return;
455
456 for (;;) {
457 if (ath10k_pci_target_is_awake(ar)) {
458 ar_pci->verified_awake = true;
459 break;
460 }
461
462 if (tot_delay > PCIE_WAKE_TIMEOUT) {
463 ath10k_warn("target takes too long to wake up (awake count %d)\n",
464 atomic_read(&ar_pci->keep_awake_count));
465 break;
466 }
467
468 udelay(curr_delay);
469 tot_delay += curr_delay;
470
471 if (curr_delay < 50)
472 curr_delay += 5;
473 }
474}
475
476void ath10k_do_pci_sleep(struct ath10k *ar)
477{
478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
479 void __iomem *pci_addr = ar_pci->mem;
480
481 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
482 /* Allow sleep */
483 ar_pci->verified_awake = false;
484 iowrite32(PCIE_SOC_WAKE_RESET,
485 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
486 PCIE_SOC_WAKE_ADDRESS);
487 }
488}
489
490/*
491 * FIXME: Handle OOM properly.
492 */
493static inline
494struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
495{
496 struct ath10k_pci_compl *compl = NULL;
497
498 spin_lock_bh(&pipe_info->pipe_lock);
499 if (list_empty(&pipe_info->compl_free)) {
500 ath10k_warn("Completion buffers are full\n");
501 goto exit;
502 }
503 compl = list_first_entry(&pipe_info->compl_free,
504 struct ath10k_pci_compl, list);
505 list_del(&compl->list);
506exit:
507 spin_unlock_bh(&pipe_info->pipe_lock);
508 return compl;
509}
510
511/* Called by lower (CE) layer when a send to Target completes. */
512static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
513 void *transfer_context,
514 u32 ce_data,
515 unsigned int nbytes,
516 unsigned int transfer_id)
517{
518 struct ath10k *ar = ce_state->ar;
519 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
521 struct ath10k_pci_compl *compl;
522 bool process = false;
523
524 do {
525 /*
526 * For the send completion of an item in sendlist, just
527 * increment num_sends_allowed. The upper layer callback will
528 * be triggered when last fragment is done with send.
529 */
530 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
531 spin_lock_bh(&pipe_info->pipe_lock);
532 pipe_info->num_sends_allowed++;
533 spin_unlock_bh(&pipe_info->pipe_lock);
534 continue;
535 }
536
537 compl = get_free_compl(pipe_info);
538 if (!compl)
539 break;
540
541 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
542 compl->ce_state = ce_state;
543 compl->pipe_info = pipe_info;
544 compl->transfer_context = transfer_context;
545 compl->nbytes = nbytes;
546 compl->transfer_id = transfer_id;
547 compl->flags = 0;
548
549 /*
550 * Add the completion to the processing queue.
551 */
552 spin_lock_bh(&ar_pci->compl_lock);
553 list_add_tail(&compl->list, &ar_pci->compl_process);
554 spin_unlock_bh(&ar_pci->compl_lock);
555
556 process = true;
557 } while (ath10k_ce_completed_send_next(ce_state,
558 &transfer_context,
559 &ce_data, &nbytes,
560 &transfer_id) == 0);
561
562 /*
563 * If only some of the items within a sendlist have completed,
564 * don't invoke completion processing until the entire sendlist
565 * has been sent.
566 */
567 if (!process)
568 return;
569
570 ath10k_pci_process_ce(ar);
571}
572
573/* Called by lower (CE) layer when data is received from the Target. */
574static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
575 void *transfer_context, u32 ce_data,
576 unsigned int nbytes,
577 unsigned int transfer_id,
578 unsigned int flags)
579{
580 struct ath10k *ar = ce_state->ar;
581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
583 struct ath10k_pci_compl *compl;
584 struct sk_buff *skb;
585
586 do {
587 compl = get_free_compl(pipe_info);
588 if (!compl)
589 break;
590
591 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
592 compl->ce_state = ce_state;
593 compl->pipe_info = pipe_info;
594 compl->transfer_context = transfer_context;
595 compl->nbytes = nbytes;
596 compl->transfer_id = transfer_id;
597 compl->flags = flags;
598
599 skb = transfer_context;
600 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
601 skb->len + skb_tailroom(skb),
602 DMA_FROM_DEVICE);
603 /*
604 * Add the completion to the processing queue.
605 */
606 spin_lock_bh(&ar_pci->compl_lock);
607 list_add_tail(&compl->list, &ar_pci->compl_process);
608 spin_unlock_bh(&ar_pci->compl_lock);
609
610 } while (ath10k_ce_completed_recv_next(ce_state,
611 &transfer_context,
612 &ce_data, &nbytes,
613 &transfer_id,
614 &flags) == 0);
615
616 ath10k_pci_process_ce(ar);
617}
618
619/* Send the first nbytes bytes of the buffer */
620static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
621 unsigned int transfer_id,
622 unsigned int bytes, struct sk_buff *nbuf)
623{
624 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
625 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
626 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
627 struct ce_state *ce_hdl = pipe_info->ce_hdl;
628 struct ce_sendlist sendlist;
629 unsigned int len;
630 u32 flags = 0;
631 int ret;
632
633 memset(&sendlist, 0, sizeof(struct ce_sendlist));
634
635 len = min(bytes, nbuf->len);
636 bytes -= len;
637
638 if (len & 3)
639 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
640
641 ath10k_dbg(ATH10K_DBG_PCI,
642 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
643 nbuf->data, (unsigned long long) skb_cb->paddr,
644 nbuf->len, len);
645 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
646 "ath10k tx: data: ",
647 nbuf->data, nbuf->len);
648
649 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
650
651 /* Make sure we have resources to handle this request */
652 spin_lock_bh(&pipe_info->pipe_lock);
653 if (!pipe_info->num_sends_allowed) {
654 ath10k_warn("Pipe: %d is full\n", pipe_id);
655 spin_unlock_bh(&pipe_info->pipe_lock);
656 return -ENOSR;
657 }
658 pipe_info->num_sends_allowed--;
659 spin_unlock_bh(&pipe_info->pipe_lock);
660
661 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
662 if (ret)
663 ath10k_warn("CE send failed: %p\n", nbuf);
664
665 return ret;
666}
667
668static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
669{
670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
672 int ret;
673
674 spin_lock_bh(&pipe_info->pipe_lock);
675 ret = pipe_info->num_sends_allowed;
676 spin_unlock_bh(&pipe_info->pipe_lock);
677
678 return ret;
679}
680
681static void ath10k_pci_hif_dump_area(struct ath10k *ar)
682{
683 u32 reg_dump_area = 0;
684 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
685 u32 host_addr;
686 int ret;
687 u32 i;
688
689 ath10k_err("firmware crashed!\n");
690 ath10k_err("hardware name %s version 0x%x\n",
691 ar->hw_params.name, ar->target_version);
692 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
693 ar->fw_version_minor, ar->fw_version_release,
694 ar->fw_version_build);
695
696 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
697 if (ath10k_pci_diag_read_mem(ar, host_addr,
698 &reg_dump_area, sizeof(u32)) != 0) {
699 ath10k_warn("could not read hi_failure_state\n");
700 return;
701 }
702
703 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
704
705 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
706 &reg_dump_values[0],
707 REG_DUMP_COUNT_QCA988X * sizeof(u32));
708 if (ret != 0) {
709 ath10k_err("could not dump FW Dump Area\n");
710 return;
711 }
712
713 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
714
715 ath10k_err("target Register Dump\n");
716 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
717 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
718 i,
719 reg_dump_values[i],
720 reg_dump_values[i + 1],
721 reg_dump_values[i + 2],
722 reg_dump_values[i + 3]);
723}
724
725static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
726 int force)
727{
728 if (!force) {
729 int resources;
730 /*
731 * Decide whether to actually poll for completions, or just
732 * wait for a later chance.
733 * If there seem to be plenty of resources left, then just wait
734 * since checking involves reading a CE register, which is a
735 * relatively expensive operation.
736 */
737 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
738
739 /*
740 * If at least 50% of the total resources are still available,
741 * don't bother checking again yet.
742 */
743 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
744 return;
745 }
746 ath10k_ce_per_engine_service(ar, pipe);
747}
748
Michal Kaziore799bbf2013-07-05 16:15:12 +0300749static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
750 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300751{
752 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
753
754 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
755
756 memcpy(&ar_pci->msg_callbacks_current, callbacks,
757 sizeof(ar_pci->msg_callbacks_current));
758}
759
760static int ath10k_pci_start_ce(struct ath10k *ar)
761{
762 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
763 struct ce_state *ce_diag = ar_pci->ce_diag;
764 const struct ce_attr *attr;
765 struct hif_ce_pipe_info *pipe_info;
766 struct ath10k_pci_compl *compl;
767 int i, pipe_num, completions, disable_interrupts;
768
769 spin_lock_init(&ar_pci->compl_lock);
770 INIT_LIST_HEAD(&ar_pci->compl_process);
771
772 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
773 pipe_info = &ar_pci->pipe_info[pipe_num];
774
775 spin_lock_init(&pipe_info->pipe_lock);
776 INIT_LIST_HEAD(&pipe_info->compl_free);
777
778 /* Handle Diagnostic CE specially */
779 if (pipe_info->ce_hdl == ce_diag)
780 continue;
781
782 attr = &host_ce_config_wlan[pipe_num];
783 completions = 0;
784
785 if (attr->src_nentries) {
786 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
787 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
788 ath10k_pci_ce_send_done,
789 disable_interrupts);
790 completions += attr->src_nentries;
791 pipe_info->num_sends_allowed = attr->src_nentries - 1;
792 }
793
794 if (attr->dest_nentries) {
795 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
796 ath10k_pci_ce_recv_data);
797 completions += attr->dest_nentries;
798 }
799
800 if (completions == 0)
801 continue;
802
803 for (i = 0; i < completions; i++) {
804 compl = kmalloc(sizeof(struct ath10k_pci_compl),
805 GFP_KERNEL);
806 if (!compl) {
807 ath10k_warn("No memory for completion state\n");
808 ath10k_pci_stop_ce(ar);
809 return -ENOMEM;
810 }
811
812 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
813 list_add_tail(&compl->list, &pipe_info->compl_free);
814 }
815 }
816
817 return 0;
818}
819
820static void ath10k_pci_stop_ce(struct ath10k *ar)
821{
822 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
823 struct ath10k_pci_compl *compl;
824 struct sk_buff *skb;
825 int i;
826
827 ath10k_ce_disable_interrupts(ar);
828
829 /* Cancel the pending tasklet */
830 tasklet_kill(&ar_pci->intr_tq);
831
832 for (i = 0; i < CE_COUNT; i++)
833 tasklet_kill(&ar_pci->pipe_info[i].intr);
834
835 /* Mark pending completions as aborted, so that upper layers free up
836 * their associated resources */
837 spin_lock_bh(&ar_pci->compl_lock);
838 list_for_each_entry(compl, &ar_pci->compl_process, list) {
839 skb = (struct sk_buff *)compl->transfer_context;
840 ATH10K_SKB_CB(skb)->is_aborted = true;
841 }
842 spin_unlock_bh(&ar_pci->compl_lock);
843}
844
845static void ath10k_pci_cleanup_ce(struct ath10k *ar)
846{
847 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
848 struct ath10k_pci_compl *compl, *tmp;
849 struct hif_ce_pipe_info *pipe_info;
850 struct sk_buff *netbuf;
851 int pipe_num;
852
853 /* Free pending completions. */
854 spin_lock_bh(&ar_pci->compl_lock);
855 if (!list_empty(&ar_pci->compl_process))
856 ath10k_warn("pending completions still present! possible memory leaks.\n");
857
858 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
859 list_del(&compl->list);
860 netbuf = (struct sk_buff *)compl->transfer_context;
861 dev_kfree_skb_any(netbuf);
862 kfree(compl);
863 }
864 spin_unlock_bh(&ar_pci->compl_lock);
865
866 /* Free unused completions for each pipe. */
867 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
868 pipe_info = &ar_pci->pipe_info[pipe_num];
869
870 spin_lock_bh(&pipe_info->pipe_lock);
871 list_for_each_entry_safe(compl, tmp,
872 &pipe_info->compl_free, list) {
873 list_del(&compl->list);
874 kfree(compl);
875 }
876 spin_unlock_bh(&pipe_info->pipe_lock);
877 }
878}
879
880static void ath10k_pci_process_ce(struct ath10k *ar)
881{
882 struct ath10k_pci *ar_pci = ar->hif.priv;
883 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
884 struct ath10k_pci_compl *compl;
885 struct sk_buff *skb;
886 unsigned int nbytes;
887 int ret, send_done = 0;
888
889 /* Upper layers aren't ready to handle tx/rx completions in parallel so
890 * we must serialize all completion processing. */
891
892 spin_lock_bh(&ar_pci->compl_lock);
893 if (ar_pci->compl_processing) {
894 spin_unlock_bh(&ar_pci->compl_lock);
895 return;
896 }
897 ar_pci->compl_processing = true;
898 spin_unlock_bh(&ar_pci->compl_lock);
899
900 for (;;) {
901 spin_lock_bh(&ar_pci->compl_lock);
902 if (list_empty(&ar_pci->compl_process)) {
903 spin_unlock_bh(&ar_pci->compl_lock);
904 break;
905 }
906 compl = list_first_entry(&ar_pci->compl_process,
907 struct ath10k_pci_compl, list);
908 list_del(&compl->list);
909 spin_unlock_bh(&ar_pci->compl_lock);
910
911 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
912 cb->tx_completion(ar,
913 compl->transfer_context,
914 compl->transfer_id);
915 send_done = 1;
916 } else {
917 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
918 if (ret) {
919 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
920 compl->pipe_info->pipe_num);
921 break;
922 }
923
924 skb = (struct sk_buff *)compl->transfer_context;
925 nbytes = compl->nbytes;
926
927 ath10k_dbg(ATH10K_DBG_PCI,
928 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
929 skb, nbytes);
930 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
931 "ath10k rx: ", skb->data, nbytes);
932
933 if (skb->len + skb_tailroom(skb) >= nbytes) {
934 skb_trim(skb, 0);
935 skb_put(skb, nbytes);
936 cb->rx_completion(ar, skb,
937 compl->pipe_info->pipe_num);
938 } else {
939 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
940 nbytes,
941 skb->len + skb_tailroom(skb));
942 }
943 }
944
945 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
946
947 /*
948 * Add completion back to the pipe's free list.
949 */
950 spin_lock_bh(&compl->pipe_info->pipe_lock);
951 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
952 compl->pipe_info->num_sends_allowed += send_done;
953 spin_unlock_bh(&compl->pipe_info->pipe_lock);
954 }
955
956 spin_lock_bh(&ar_pci->compl_lock);
957 ar_pci->compl_processing = false;
958 spin_unlock_bh(&ar_pci->compl_lock);
959}
960
961/* TODO - temporary mapping while we have too few CE's */
962static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
963 u16 service_id, u8 *ul_pipe,
964 u8 *dl_pipe, int *ul_is_polled,
965 int *dl_is_polled)
966{
967 int ret = 0;
968
969 /* polling for received messages not supported */
970 *dl_is_polled = 0;
971
972 switch (service_id) {
973 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
974 /*
975 * Host->target HTT gets its own pipe, so it can be polled
976 * while other pipes are interrupt driven.
977 */
978 *ul_pipe = 4;
979 /*
980 * Use the same target->host pipe for HTC ctrl, HTC raw
981 * streams, and HTT.
982 */
983 *dl_pipe = 1;
984 break;
985
986 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
987 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
988 /*
989 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
990 * HTC_CTRL_RSVD_SVC could share the same pipe as the
991 * WMI services. So, if another CE is needed, change
992 * this to *ul_pipe = 3, which frees up CE 0.
993 */
994 /* *ul_pipe = 3; */
995 *ul_pipe = 0;
996 *dl_pipe = 1;
997 break;
998
999 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1000 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1001 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1002 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1003
1004 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1005 *ul_pipe = 3;
1006 *dl_pipe = 2;
1007 break;
1008
1009 /* pipe 5 unused */
1010 /* pipe 6 reserved */
1011 /* pipe 7 reserved */
1012
1013 default:
1014 ret = -1;
1015 break;
1016 }
1017 *ul_is_polled =
1018 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1019
1020 return ret;
1021}
1022
1023static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1024 u8 *ul_pipe, u8 *dl_pipe)
1025{
1026 int ul_is_polled, dl_is_polled;
1027
1028 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1029 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1030 ul_pipe,
1031 dl_pipe,
1032 &ul_is_polled,
1033 &dl_is_polled);
1034}
1035
1036static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1037 int num)
1038{
1039 struct ath10k *ar = pipe_info->hif_ce_state;
1040 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1041 struct ce_state *ce_state = pipe_info->ce_hdl;
1042 struct sk_buff *skb;
1043 dma_addr_t ce_data;
1044 int i, ret = 0;
1045
1046 if (pipe_info->buf_sz == 0)
1047 return 0;
1048
1049 for (i = 0; i < num; i++) {
1050 skb = dev_alloc_skb(pipe_info->buf_sz);
1051 if (!skb) {
1052 ath10k_warn("could not allocate skbuff for pipe %d\n",
1053 num);
1054 ret = -ENOMEM;
1055 goto err;
1056 }
1057
1058 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1059
1060 ce_data = dma_map_single(ar->dev, skb->data,
1061 skb->len + skb_tailroom(skb),
1062 DMA_FROM_DEVICE);
1063
1064 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1065 ath10k_warn("could not dma map skbuff\n");
1066 dev_kfree_skb_any(skb);
1067 ret = -EIO;
1068 goto err;
1069 }
1070
1071 ATH10K_SKB_CB(skb)->paddr = ce_data;
1072
1073 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1074 pipe_info->buf_sz,
1075 PCI_DMA_FROMDEVICE);
1076
1077 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1078 ce_data);
1079 if (ret) {
1080 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1081 num, ret);
1082 goto err;
1083 }
1084 }
1085
1086 return ret;
1087
1088err:
1089 ath10k_pci_rx_pipe_cleanup(pipe_info);
1090 return ret;
1091}
1092
1093static int ath10k_pci_post_rx(struct ath10k *ar)
1094{
1095 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1096 struct hif_ce_pipe_info *pipe_info;
1097 const struct ce_attr *attr;
1098 int pipe_num, ret = 0;
1099
1100 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1101 pipe_info = &ar_pci->pipe_info[pipe_num];
1102 attr = &host_ce_config_wlan[pipe_num];
1103
1104 if (attr->dest_nentries == 0)
1105 continue;
1106
1107 ret = ath10k_pci_post_rx_pipe(pipe_info,
1108 attr->dest_nentries - 1);
1109 if (ret) {
1110 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1111 pipe_num);
1112
1113 for (; pipe_num >= 0; pipe_num--) {
1114 pipe_info = &ar_pci->pipe_info[pipe_num];
1115 ath10k_pci_rx_pipe_cleanup(pipe_info);
1116 }
1117 return ret;
1118 }
1119 }
1120
1121 return 0;
1122}
1123
1124static int ath10k_pci_hif_start(struct ath10k *ar)
1125{
1126 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1127 int ret;
1128
1129 ret = ath10k_pci_start_ce(ar);
1130 if (ret) {
1131 ath10k_warn("could not start CE (%d)\n", ret);
1132 return ret;
1133 }
1134
1135 /* Post buffers once to start things off. */
1136 ret = ath10k_pci_post_rx(ar);
1137 if (ret) {
1138 ath10k_warn("could not post rx pipes (%d)\n", ret);
1139 return ret;
1140 }
1141
1142 ar_pci->started = 1;
1143 return 0;
1144}
1145
1146static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1147{
1148 struct ath10k *ar;
1149 struct ath10k_pci *ar_pci;
1150 struct ce_state *ce_hdl;
1151 u32 buf_sz;
1152 struct sk_buff *netbuf;
1153 u32 ce_data;
1154
1155 buf_sz = pipe_info->buf_sz;
1156
1157 /* Unused Copy Engine */
1158 if (buf_sz == 0)
1159 return;
1160
1161 ar = pipe_info->hif_ce_state;
1162 ar_pci = ath10k_pci_priv(ar);
1163
1164 if (!ar_pci->started)
1165 return;
1166
1167 ce_hdl = pipe_info->ce_hdl;
1168
1169 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1170 &ce_data) == 0) {
1171 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1172 netbuf->len + skb_tailroom(netbuf),
1173 DMA_FROM_DEVICE);
1174 dev_kfree_skb_any(netbuf);
1175 }
1176}
1177
1178static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1179{
1180 struct ath10k *ar;
1181 struct ath10k_pci *ar_pci;
1182 struct ce_state *ce_hdl;
1183 struct sk_buff *netbuf;
1184 u32 ce_data;
1185 unsigned int nbytes;
1186 unsigned int id;
1187 u32 buf_sz;
1188
1189 buf_sz = pipe_info->buf_sz;
1190
1191 /* Unused Copy Engine */
1192 if (buf_sz == 0)
1193 return;
1194
1195 ar = pipe_info->hif_ce_state;
1196 ar_pci = ath10k_pci_priv(ar);
1197
1198 if (!ar_pci->started)
1199 return;
1200
1201 ce_hdl = pipe_info->ce_hdl;
1202
1203 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1204 &ce_data, &nbytes, &id) == 0) {
1205 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1206 /*
1207 * Indicate the completion to higer layer to free
1208 * the buffer
1209 */
1210 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1211 ar_pci->msg_callbacks_current.tx_completion(ar,
1212 netbuf,
1213 id);
1214 }
1215}
1216
1217/*
1218 * Cleanup residual buffers for device shutdown:
1219 * buffers that were enqueued for receive
1220 * buffers that were to be sent
1221 * Note: Buffers that had completed but which were
1222 * not yet processed are on a completion queue. They
1223 * are handled when the completion thread shuts down.
1224 */
1225static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1226{
1227 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1228 int pipe_num;
1229
1230 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1231 struct hif_ce_pipe_info *pipe_info;
1232
1233 pipe_info = &ar_pci->pipe_info[pipe_num];
1234 ath10k_pci_rx_pipe_cleanup(pipe_info);
1235 ath10k_pci_tx_pipe_cleanup(pipe_info);
1236 }
1237}
1238
1239static void ath10k_pci_ce_deinit(struct ath10k *ar)
1240{
1241 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1242 struct hif_ce_pipe_info *pipe_info;
1243 int pipe_num;
1244
1245 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1246 pipe_info = &ar_pci->pipe_info[pipe_num];
1247 if (pipe_info->ce_hdl) {
1248 ath10k_ce_deinit(pipe_info->ce_hdl);
1249 pipe_info->ce_hdl = NULL;
1250 pipe_info->buf_sz = 0;
1251 }
1252 }
1253}
1254
1255static void ath10k_pci_hif_stop(struct ath10k *ar)
1256{
1257 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1258
1259 ath10k_pci_stop_ce(ar);
1260
1261 /* At this point, asynchronous threads are stopped, the target should
1262 * not DMA nor interrupt. We process the leftovers and then free
1263 * everything else up. */
1264
1265 ath10k_pci_process_ce(ar);
1266 ath10k_pci_cleanup_ce(ar);
1267 ath10k_pci_buffer_cleanup(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001268}
1269
1270static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1271 void *req, u32 req_len,
1272 void *resp, u32 *resp_len)
1273{
1274 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1275 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1276 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1277 dma_addr_t req_paddr = 0;
1278 dma_addr_t resp_paddr = 0;
1279 struct bmi_xfer xfer = {};
1280 void *treq, *tresp = NULL;
1281 int ret = 0;
1282
1283 if (resp && !resp_len)
1284 return -EINVAL;
1285
1286 if (resp && resp_len && *resp_len == 0)
1287 return -EINVAL;
1288
1289 treq = kmemdup(req, req_len, GFP_KERNEL);
1290 if (!treq)
1291 return -ENOMEM;
1292
1293 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1294 ret = dma_mapping_error(ar->dev, req_paddr);
1295 if (ret)
1296 goto err_dma;
1297
1298 if (resp && resp_len) {
1299 tresp = kzalloc(*resp_len, GFP_KERNEL);
1300 if (!tresp) {
1301 ret = -ENOMEM;
1302 goto err_req;
1303 }
1304
1305 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1306 DMA_FROM_DEVICE);
1307 ret = dma_mapping_error(ar->dev, resp_paddr);
1308 if (ret)
1309 goto err_req;
1310
1311 xfer.wait_for_resp = true;
1312 xfer.resp_len = 0;
1313
1314 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1315 }
1316
1317 init_completion(&xfer.done);
1318
1319 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1320 if (ret)
1321 goto err_resp;
1322
1323 ret = wait_for_completion_timeout(&xfer.done,
1324 BMI_COMMUNICATION_TIMEOUT_HZ);
1325 if (ret <= 0) {
1326 u32 unused_buffer;
1327 unsigned int unused_nbytes;
1328 unsigned int unused_id;
1329
1330 ret = -ETIMEDOUT;
1331 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1332 &unused_nbytes, &unused_id);
1333 } else {
1334 /* non-zero means we did not time out */
1335 ret = 0;
1336 }
1337
1338err_resp:
1339 if (resp) {
1340 u32 unused_buffer;
1341
1342 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1343 dma_unmap_single(ar->dev, resp_paddr,
1344 *resp_len, DMA_FROM_DEVICE);
1345 }
1346err_req:
1347 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1348
1349 if (ret == 0 && resp_len) {
1350 *resp_len = min(*resp_len, xfer.resp_len);
1351 memcpy(resp, tresp, xfer.resp_len);
1352 }
1353err_dma:
1354 kfree(treq);
1355 kfree(tresp);
1356
1357 return ret;
1358}
1359
1360static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1361 void *transfer_context,
1362 u32 data,
1363 unsigned int nbytes,
1364 unsigned int transfer_id)
1365{
1366 struct bmi_xfer *xfer = transfer_context;
1367
1368 if (xfer->wait_for_resp)
1369 return;
1370
1371 complete(&xfer->done);
1372}
1373
1374static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1375 void *transfer_context,
1376 u32 data,
1377 unsigned int nbytes,
1378 unsigned int transfer_id,
1379 unsigned int flags)
1380{
1381 struct bmi_xfer *xfer = transfer_context;
1382
1383 if (!xfer->wait_for_resp) {
1384 ath10k_warn("unexpected: BMI data received; ignoring\n");
1385 return;
1386 }
1387
1388 xfer->resp_len = nbytes;
1389 complete(&xfer->done);
1390}
1391
1392/*
1393 * Map from service/endpoint to Copy Engine.
1394 * This table is derived from the CE_PCI TABLE, above.
1395 * It is passed to the Target at startup for use by firmware.
1396 */
1397static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1398 {
1399 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1400 PIPEDIR_OUT, /* out = UL = host -> target */
1401 3,
1402 },
1403 {
1404 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1405 PIPEDIR_IN, /* in = DL = target -> host */
1406 2,
1407 },
1408 {
1409 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1410 PIPEDIR_OUT, /* out = UL = host -> target */
1411 3,
1412 },
1413 {
1414 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1415 PIPEDIR_IN, /* in = DL = target -> host */
1416 2,
1417 },
1418 {
1419 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1420 PIPEDIR_OUT, /* out = UL = host -> target */
1421 3,
1422 },
1423 {
1424 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1425 PIPEDIR_IN, /* in = DL = target -> host */
1426 2,
1427 },
1428 {
1429 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1430 PIPEDIR_OUT, /* out = UL = host -> target */
1431 3,
1432 },
1433 {
1434 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1435 PIPEDIR_IN, /* in = DL = target -> host */
1436 2,
1437 },
1438 {
1439 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1440 PIPEDIR_OUT, /* out = UL = host -> target */
1441 3,
1442 },
1443 {
1444 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1445 PIPEDIR_IN, /* in = DL = target -> host */
1446 2,
1447 },
1448 {
1449 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1450 PIPEDIR_OUT, /* out = UL = host -> target */
1451 0, /* could be moved to 3 (share with WMI) */
1452 },
1453 {
1454 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1455 PIPEDIR_IN, /* in = DL = target -> host */
1456 1,
1457 },
1458 {
1459 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1460 PIPEDIR_OUT, /* out = UL = host -> target */
1461 0,
1462 },
1463 {
1464 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1465 PIPEDIR_IN, /* in = DL = target -> host */
1466 1,
1467 },
1468 {
1469 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1470 PIPEDIR_OUT, /* out = UL = host -> target */
1471 4,
1472 },
1473 {
1474 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1475 PIPEDIR_IN, /* in = DL = target -> host */
1476 1,
1477 },
1478
1479 /* (Additions here) */
1480
1481 { /* Must be last */
1482 0,
1483 0,
1484 0,
1485 },
1486};
1487
1488/*
1489 * Send an interrupt to the device to wake up the Target CPU
1490 * so it has an opportunity to notice any changed state.
1491 */
1492static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1493{
1494 int ret;
1495 u32 core_ctrl;
1496
1497 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1498 CORE_CTRL_ADDRESS,
1499 &core_ctrl);
1500 if (ret) {
1501 ath10k_warn("Unable to read core ctrl\n");
1502 return ret;
1503 }
1504
1505 /* A_INUM_FIRMWARE interrupt to Target CPU */
1506 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1507
1508 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1509 CORE_CTRL_ADDRESS,
1510 core_ctrl);
1511 if (ret)
1512 ath10k_warn("Unable to set interrupt mask\n");
1513
1514 return ret;
1515}
1516
1517static int ath10k_pci_init_config(struct ath10k *ar)
1518{
1519 u32 interconnect_targ_addr;
1520 u32 pcie_state_targ_addr = 0;
1521 u32 pipe_cfg_targ_addr = 0;
1522 u32 svc_to_pipe_map = 0;
1523 u32 pcie_config_flags = 0;
1524 u32 ealloc_value;
1525 u32 ealloc_targ_addr;
1526 u32 flag2_value;
1527 u32 flag2_targ_addr;
1528 int ret = 0;
1529
1530 /* Download to Target the CE Config and the service-to-CE map */
1531 interconnect_targ_addr =
1532 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1533
1534 /* Supply Target-side CE configuration */
1535 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1536 &pcie_state_targ_addr);
1537 if (ret != 0) {
1538 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1539 return ret;
1540 }
1541
1542 if (pcie_state_targ_addr == 0) {
1543 ret = -EIO;
1544 ath10k_err("Invalid pcie state addr\n");
1545 return ret;
1546 }
1547
1548 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1549 offsetof(struct pcie_state,
1550 pipe_cfg_addr),
1551 &pipe_cfg_targ_addr);
1552 if (ret != 0) {
1553 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1554 return ret;
1555 }
1556
1557 if (pipe_cfg_targ_addr == 0) {
1558 ret = -EIO;
1559 ath10k_err("Invalid pipe cfg addr\n");
1560 return ret;
1561 }
1562
1563 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1564 target_ce_config_wlan,
1565 sizeof(target_ce_config_wlan));
1566
1567 if (ret != 0) {
1568 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1569 return ret;
1570 }
1571
1572 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1573 offsetof(struct pcie_state,
1574 svc_to_pipe_map),
1575 &svc_to_pipe_map);
1576 if (ret != 0) {
1577 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1578 return ret;
1579 }
1580
1581 if (svc_to_pipe_map == 0) {
1582 ret = -EIO;
1583 ath10k_err("Invalid svc_to_pipe map\n");
1584 return ret;
1585 }
1586
1587 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1588 target_service_to_ce_map_wlan,
1589 sizeof(target_service_to_ce_map_wlan));
1590 if (ret != 0) {
1591 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1592 return ret;
1593 }
1594
1595 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1596 offsetof(struct pcie_state,
1597 config_flags),
1598 &pcie_config_flags);
1599 if (ret != 0) {
1600 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1601 return ret;
1602 }
1603
1604 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1605
1606 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1607 offsetof(struct pcie_state, config_flags),
1608 &pcie_config_flags,
1609 sizeof(pcie_config_flags));
1610 if (ret != 0) {
1611 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1612 return ret;
1613 }
1614
1615 /* configure early allocation */
1616 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1617
1618 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1619 if (ret != 0) {
1620 ath10k_err("Faile to get early alloc val: %d\n", ret);
1621 return ret;
1622 }
1623
1624 /* first bank is switched to IRAM */
1625 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1626 HI_EARLY_ALLOC_MAGIC_MASK);
1627 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1628 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1629
1630 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1631 if (ret != 0) {
1632 ath10k_err("Failed to set early alloc val: %d\n", ret);
1633 return ret;
1634 }
1635
1636 /* Tell Target to proceed with initialization */
1637 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1638
1639 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1640 if (ret != 0) {
1641 ath10k_err("Failed to get option val: %d\n", ret);
1642 return ret;
1643 }
1644
1645 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1646
1647 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1648 if (ret != 0) {
1649 ath10k_err("Failed to set option val: %d\n", ret);
1650 return ret;
1651 }
1652
1653 return 0;
1654}
1655
1656
1657
1658static int ath10k_pci_ce_init(struct ath10k *ar)
1659{
1660 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1661 struct hif_ce_pipe_info *pipe_info;
1662 const struct ce_attr *attr;
1663 int pipe_num;
1664
1665 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1666 pipe_info = &ar_pci->pipe_info[pipe_num];
1667 pipe_info->pipe_num = pipe_num;
1668 pipe_info->hif_ce_state = ar;
1669 attr = &host_ce_config_wlan[pipe_num];
1670
1671 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1672 if (pipe_info->ce_hdl == NULL) {
1673 ath10k_err("Unable to initialize CE for pipe: %d\n",
1674 pipe_num);
1675
1676 /* It is safe to call it here. It checks if ce_hdl is
1677 * valid for each pipe */
1678 ath10k_pci_ce_deinit(ar);
1679 return -1;
1680 }
1681
1682 if (pipe_num == ar_pci->ce_count - 1) {
1683 /*
1684 * Reserve the ultimate CE for
1685 * diagnostic Window support
1686 */
1687 ar_pci->ce_diag =
1688 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1689 continue;
1690 }
1691
1692 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1693 }
1694
1695 /*
1696 * Initially, establish CE completion handlers for use with BMI.
1697 * These are overwritten with generic handlers after we exit BMI phase.
1698 */
1699 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1700 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1701 ath10k_pci_bmi_send_done, 0);
1702
1703 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1704 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1705 ath10k_pci_bmi_recv_data);
1706
1707 return 0;
1708}
1709
1710static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1711{
1712 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1713 u32 fw_indicator_address, fw_indicator;
1714
1715 ath10k_pci_wake(ar);
1716
1717 fw_indicator_address = ar_pci->fw_indicator_address;
1718 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1719
1720 if (fw_indicator & FW_IND_EVENT_PENDING) {
1721 /* ACK: clear Target-side pending event */
1722 ath10k_pci_write32(ar, fw_indicator_address,
1723 fw_indicator & ~FW_IND_EVENT_PENDING);
1724
1725 if (ar_pci->started) {
1726 ath10k_pci_hif_dump_area(ar);
1727 } else {
1728 /*
1729 * Probable Target failure before we're prepared
1730 * to handle it. Generally unexpected.
1731 */
1732 ath10k_warn("early firmware event indicated\n");
1733 }
1734 }
1735
1736 ath10k_pci_sleep(ar);
1737}
1738
Michal Kazior8c5c5362013-07-16 09:38:50 +02001739static int ath10k_pci_hif_power_up(struct ath10k *ar)
1740{
1741 int ret;
1742
1743 /*
1744 * Bring the target up cleanly.
1745 *
1746 * The target may be in an undefined state with an AUX-powered Target
1747 * and a Host in WoW mode. If the Host crashes, loses power, or is
1748 * restarted (without unloading the driver) then the Target is left
1749 * (aux) powered and running. On a subsequent driver load, the Target
1750 * is in an unexpected state. We try to catch that here in order to
1751 * reset the Target and retry the probe.
1752 */
1753 ath10k_pci_device_reset(ar);
1754
1755 ret = ath10k_pci_reset_target(ar);
1756 if (ret)
1757 goto err;
1758
1759 if (ath10k_target_ps) {
1760 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
1761 } else {
1762 /* Force AWAKE forever */
1763 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
1764 ath10k_do_pci_wake(ar);
1765 }
1766
1767 ret = ath10k_pci_ce_init(ar);
1768 if (ret)
1769 goto err_ps;
1770
1771 ret = ath10k_pci_init_config(ar);
1772 if (ret)
1773 goto err_ce;
1774
1775 ret = ath10k_pci_wake_target_cpu(ar);
1776 if (ret) {
1777 ath10k_err("could not wake up target CPU (%d)\n", ret);
1778 goto err_ce;
1779 }
1780
1781 return 0;
1782
1783err_ce:
1784 ath10k_pci_ce_deinit(ar);
1785err_ps:
1786 if (!ath10k_target_ps)
1787 ath10k_do_pci_sleep(ar);
1788err:
1789 return ret;
1790}
1791
1792static void ath10k_pci_hif_power_down(struct ath10k *ar)
1793{
1794 ath10k_pci_ce_deinit(ar);
1795 if (!ath10k_target_ps)
1796 ath10k_do_pci_sleep(ar);
1797}
1798
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001799#ifdef CONFIG_PM
1800
1801#define ATH10K_PCI_PM_CONTROL 0x44
1802
1803static int ath10k_pci_hif_suspend(struct ath10k *ar)
1804{
1805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1806 struct pci_dev *pdev = ar_pci->pdev;
1807 u32 val;
1808
1809 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1810
1811 if ((val & 0x000000ff) != 0x3) {
1812 pci_save_state(pdev);
1813 pci_disable_device(pdev);
1814 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1815 (val & 0xffffff00) | 0x03);
1816 }
1817
1818 return 0;
1819}
1820
1821static int ath10k_pci_hif_resume(struct ath10k *ar)
1822{
1823 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1824 struct pci_dev *pdev = ar_pci->pdev;
1825 u32 val;
1826
1827 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1828
1829 if ((val & 0x000000ff) != 0) {
1830 pci_restore_state(pdev);
1831 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1832 val & 0xffffff00);
1833 /*
1834 * Suspend/Resume resets the PCI configuration space,
1835 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1836 * to keep PCI Tx retries from interfering with C3 CPU state
1837 */
1838 pci_read_config_dword(pdev, 0x40, &val);
1839
1840 if ((val & 0x0000ff00) != 0)
1841 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1842 }
1843
1844 return 0;
1845}
1846#endif
1847
Kalle Valo5e3dd152013-06-12 20:52:10 +03001848static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1849 .send_head = ath10k_pci_hif_send_head,
1850 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1851 .start = ath10k_pci_hif_start,
1852 .stop = ath10k_pci_hif_stop,
1853 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1854 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1855 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03001856 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001857 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02001858 .power_up = ath10k_pci_hif_power_up,
1859 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001860#ifdef CONFIG_PM
1861 .suspend = ath10k_pci_hif_suspend,
1862 .resume = ath10k_pci_hif_resume,
1863#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03001864};
1865
1866static void ath10k_pci_ce_tasklet(unsigned long ptr)
1867{
1868 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1869 struct ath10k_pci *ar_pci = pipe->ar_pci;
1870
1871 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1872}
1873
1874static void ath10k_msi_err_tasklet(unsigned long data)
1875{
1876 struct ath10k *ar = (struct ath10k *)data;
1877
1878 ath10k_pci_fw_interrupt_handler(ar);
1879}
1880
1881/*
1882 * Handler for a per-engine interrupt on a PARTICULAR CE.
1883 * This is used in cases where each CE has a private MSI interrupt.
1884 */
1885static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1886{
1887 struct ath10k *ar = arg;
1888 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1889 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1890
Dan Carpentere5742672013-06-18 10:28:46 +03001891 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001892 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1893 return IRQ_HANDLED;
1894 }
1895
1896 /*
1897 * NOTE: We are able to derive ce_id from irq because we
1898 * use a one-to-one mapping for CE's 0..5.
1899 * CE's 6 & 7 do not use interrupts at all.
1900 *
1901 * This mapping must be kept in sync with the mapping
1902 * used by firmware.
1903 */
1904 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1905 return IRQ_HANDLED;
1906}
1907
1908static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1909{
1910 struct ath10k *ar = arg;
1911 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1912
1913 tasklet_schedule(&ar_pci->msi_fw_err);
1914 return IRQ_HANDLED;
1915}
1916
1917/*
1918 * Top-level interrupt handler for all PCI interrupts from a Target.
1919 * When a block of MSI interrupts is allocated, this top-level handler
1920 * is not used; instead, we directly call the correct sub-handler.
1921 */
1922static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1923{
1924 struct ath10k *ar = arg;
1925 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1926
1927 if (ar_pci->num_msi_intrs == 0) {
1928 /*
1929 * IMPORTANT: INTR_CLR regiser has to be set after
1930 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1931 * really cleared.
1932 */
1933 iowrite32(0, ar_pci->mem +
1934 (SOC_CORE_BASE_ADDRESS |
1935 PCIE_INTR_ENABLE_ADDRESS));
1936 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1937 PCIE_INTR_CE_MASK_ALL,
1938 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1939 PCIE_INTR_CLR_ADDRESS));
1940 /*
1941 * IMPORTANT: this extra read transaction is required to
1942 * flush the posted write buffer.
1943 */
1944 (void) ioread32(ar_pci->mem +
1945 (SOC_CORE_BASE_ADDRESS |
1946 PCIE_INTR_ENABLE_ADDRESS));
1947 }
1948
1949 tasklet_schedule(&ar_pci->intr_tq);
1950
1951 return IRQ_HANDLED;
1952}
1953
1954static void ath10k_pci_tasklet(unsigned long data)
1955{
1956 struct ath10k *ar = (struct ath10k *)data;
1957 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1958
1959 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1960 ath10k_ce_per_engine_service_any(ar);
1961
1962 if (ar_pci->num_msi_intrs == 0) {
1963 /* Enable Legacy PCI line interrupts */
1964 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1965 PCIE_INTR_CE_MASK_ALL,
1966 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1967 PCIE_INTR_ENABLE_ADDRESS));
1968 /*
1969 * IMPORTANT: this extra read transaction is required to
1970 * flush the posted write buffer
1971 */
1972 (void) ioread32(ar_pci->mem +
1973 (SOC_CORE_BASE_ADDRESS |
1974 PCIE_INTR_ENABLE_ADDRESS));
1975 }
1976}
1977
1978static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1979{
1980 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1981 int ret;
1982 int i;
1983
1984 ret = pci_enable_msi_block(ar_pci->pdev, num);
1985 if (ret)
1986 return ret;
1987
1988 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1989 ath10k_pci_msi_fw_handler,
1990 IRQF_SHARED, "ath10k_pci", ar);
1991 if (ret)
1992 return ret;
1993
1994 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1995 ret = request_irq(ar_pci->pdev->irq + i,
1996 ath10k_pci_per_engine_handler,
1997 IRQF_SHARED, "ath10k_pci", ar);
1998 if (ret) {
1999 ath10k_warn("request_irq(%d) failed %d\n",
2000 ar_pci->pdev->irq + i, ret);
2001
Michal Kazior87b14232013-06-26 08:50:50 +02002002 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2003 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002004
Michal Kazior87b14232013-06-26 08:50:50 +02002005 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002006 pci_disable_msi(ar_pci->pdev);
2007 return ret;
2008 }
2009 }
2010
2011 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2012 return 0;
2013}
2014
2015static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2016{
2017 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2018 int ret;
2019
2020 ret = pci_enable_msi(ar_pci->pdev);
2021 if (ret < 0)
2022 return ret;
2023
2024 ret = request_irq(ar_pci->pdev->irq,
2025 ath10k_pci_interrupt_handler,
2026 IRQF_SHARED, "ath10k_pci", ar);
2027 if (ret < 0) {
2028 pci_disable_msi(ar_pci->pdev);
2029 return ret;
2030 }
2031
2032 ath10k_info("MSI interrupt handling\n");
2033 return 0;
2034}
2035
2036static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2037{
2038 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2039 int ret;
2040
2041 ret = request_irq(ar_pci->pdev->irq,
2042 ath10k_pci_interrupt_handler,
2043 IRQF_SHARED, "ath10k_pci", ar);
2044 if (ret < 0)
2045 return ret;
2046
2047 /*
2048 * Make sure to wake the Target before enabling Legacy
2049 * Interrupt.
2050 */
2051 iowrite32(PCIE_SOC_WAKE_V_MASK,
2052 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2053 PCIE_SOC_WAKE_ADDRESS);
2054
2055 ath10k_pci_wait(ar);
2056
2057 /*
2058 * A potential race occurs here: The CORE_BASE write
2059 * depends on target correctly decoding AXI address but
2060 * host won't know when target writes BAR to CORE_CTRL.
2061 * This write might get lost if target has NOT written BAR.
2062 * For now, fix the race by repeating the write in below
2063 * synchronization checking.
2064 */
2065 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2066 PCIE_INTR_CE_MASK_ALL,
2067 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2068 PCIE_INTR_ENABLE_ADDRESS));
2069 iowrite32(PCIE_SOC_WAKE_RESET,
2070 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2071 PCIE_SOC_WAKE_ADDRESS);
2072
2073 ath10k_info("legacy interrupt handling\n");
2074 return 0;
2075}
2076
2077static int ath10k_pci_start_intr(struct ath10k *ar)
2078{
2079 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2080 int num = MSI_NUM_REQUEST;
2081 int ret;
2082 int i;
2083
2084 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2085 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2086 (unsigned long) ar);
2087
2088 for (i = 0; i < CE_COUNT; i++) {
2089 ar_pci->pipe_info[i].ar_pci = ar_pci;
2090 tasklet_init(&ar_pci->pipe_info[i].intr,
2091 ath10k_pci_ce_tasklet,
2092 (unsigned long)&ar_pci->pipe_info[i]);
2093 }
2094
2095 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2096 num = 1;
2097
2098 if (num > 1) {
2099 ret = ath10k_pci_start_intr_msix(ar, num);
2100 if (ret == 0)
2101 goto exit;
2102
2103 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2104 num = 1;
2105 }
2106
2107 if (num == 1) {
2108 ret = ath10k_pci_start_intr_msi(ar);
2109 if (ret == 0)
2110 goto exit;
2111
2112 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2113 ret);
2114 num = 0;
2115 }
2116
2117 ret = ath10k_pci_start_intr_legacy(ar);
2118
2119exit:
2120 ar_pci->num_msi_intrs = num;
2121 ar_pci->ce_count = CE_COUNT;
2122 return ret;
2123}
2124
2125static void ath10k_pci_stop_intr(struct ath10k *ar)
2126{
2127 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2128 int i;
2129
2130 /* There's at least one interrupt irregardless whether its legacy INTR
2131 * or MSI or MSI-X */
2132 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2133 free_irq(ar_pci->pdev->irq + i, ar);
2134
2135 if (ar_pci->num_msi_intrs > 0)
2136 pci_disable_msi(ar_pci->pdev);
2137}
2138
2139static int ath10k_pci_reset_target(struct ath10k *ar)
2140{
2141 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2142 int wait_limit = 300; /* 3 sec */
2143
2144 /* Wait for Target to finish initialization before we proceed. */
2145 iowrite32(PCIE_SOC_WAKE_V_MASK,
2146 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2147 PCIE_SOC_WAKE_ADDRESS);
2148
2149 ath10k_pci_wait(ar);
2150
2151 while (wait_limit-- &&
2152 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2153 FW_IND_INITIALIZED)) {
2154 if (ar_pci->num_msi_intrs == 0)
2155 /* Fix potential race by repeating CORE_BASE writes */
2156 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2157 PCIE_INTR_CE_MASK_ALL,
2158 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2159 PCIE_INTR_ENABLE_ADDRESS));
2160 mdelay(10);
2161 }
2162
2163 if (wait_limit < 0) {
2164 ath10k_err("Target stalled\n");
2165 iowrite32(PCIE_SOC_WAKE_RESET,
2166 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2167 PCIE_SOC_WAKE_ADDRESS);
2168 return -EIO;
2169 }
2170
2171 iowrite32(PCIE_SOC_WAKE_RESET,
2172 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2173 PCIE_SOC_WAKE_ADDRESS);
2174
2175 return 0;
2176}
2177
Michal Kazior7a5fe3f2013-07-05 16:15:11 +03002178static void ath10k_pci_device_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002179{
Michal Kazior7a5fe3f2013-07-05 16:15:11 +03002180 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002181 void __iomem *mem = ar_pci->mem;
2182 int i;
2183 u32 val;
2184
2185 if (!SOC_GLOBAL_RESET_ADDRESS)
2186 return;
2187
2188 if (!mem)
2189 return;
2190
2191 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2192 PCIE_SOC_WAKE_V_MASK);
2193 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2194 if (ath10k_pci_target_is_awake(ar))
2195 break;
2196 msleep(1);
2197 }
2198
2199 /* Put Target, including PCIe, into RESET. */
2200 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2201 val |= 1;
2202 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2203
2204 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2205 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2206 RTC_STATE_COLD_RESET_MASK)
2207 break;
2208 msleep(1);
2209 }
2210
2211 /* Pull Target, including PCIe, out of RESET. */
2212 val &= ~1;
2213 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2214
2215 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2216 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2217 RTC_STATE_COLD_RESET_MASK))
2218 break;
2219 msleep(1);
2220 }
2221
2222 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2223}
2224
2225static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2226{
2227 int i;
2228
2229 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2230 if (!test_bit(i, ar_pci->features))
2231 continue;
2232
2233 switch (i) {
2234 case ATH10K_PCI_FEATURE_MSI_X:
2235 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2236 break;
Michal Kaziorcba4ca72013-07-05 16:15:07 +03002237 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002238 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2239 break;
2240 }
2241 }
2242}
2243
2244static int ath10k_pci_probe(struct pci_dev *pdev,
2245 const struct pci_device_id *pci_dev)
2246{
2247 void __iomem *mem;
2248 int ret = 0;
2249 struct ath10k *ar;
2250 struct ath10k_pci *ar_pci;
2251 u32 lcr_val;
2252
2253 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2254
2255 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2256 if (ar_pci == NULL)
2257 return -ENOMEM;
2258
2259 ar_pci->pdev = pdev;
2260 ar_pci->dev = &pdev->dev;
2261
2262 switch (pci_dev->device) {
2263 case QCA988X_1_0_DEVICE_ID:
Michal Kaziorcba4ca72013-07-05 16:15:07 +03002264 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002265 break;
2266 case QCA988X_2_0_DEVICE_ID:
2267 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2268 break;
2269 default:
2270 ret = -ENODEV;
2271 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2272 goto err_ar_pci;
2273 }
2274
2275 ath10k_pci_dump_features(ar_pci);
2276
Michal Kazior3a0861f2013-07-05 16:15:06 +03002277 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002278 if (!ar) {
2279 ath10k_err("ath10k_core_create failed!\n");
2280 ret = -EINVAL;
2281 goto err_ar_pci;
2282 }
2283
2284 /* Enable QCA988X_1.0 HW workarounds */
Michal Kaziorcba4ca72013-07-05 16:15:07 +03002285 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
Kalle Valo5e3dd152013-06-12 20:52:10 +03002286 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2287
2288 ar_pci->ar = ar;
2289 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2290 atomic_set(&ar_pci->keep_awake_count, 0);
2291
2292 pci_set_drvdata(pdev, ar);
2293
2294 /*
2295 * Without any knowledge of the Host, the Target may have been reset or
2296 * power cycled and its Config Space may no longer reflect the PCI
2297 * address space that was assigned earlier by the PCI infrastructure.
2298 * Refresh it now.
2299 */
2300 ret = pci_assign_resource(pdev, BAR_NUM);
2301 if (ret) {
2302 ath10k_err("cannot assign PCI space: %d\n", ret);
2303 goto err_ar;
2304 }
2305
2306 ret = pci_enable_device(pdev);
2307 if (ret) {
2308 ath10k_err("cannot enable PCI device: %d\n", ret);
2309 goto err_ar;
2310 }
2311
2312 /* Request MMIO resources */
2313 ret = pci_request_region(pdev, BAR_NUM, "ath");
2314 if (ret) {
2315 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2316 goto err_device;
2317 }
2318
2319 /*
2320 * Target structures have a limit of 32 bit DMA pointers.
2321 * DMA pointers can be wider than 32 bits by default on some systems.
2322 */
2323 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2324 if (ret) {
2325 ath10k_err("32-bit DMA not available: %d\n", ret);
2326 goto err_region;
2327 }
2328
2329 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2330 if (ret) {
2331 ath10k_err("cannot enable 32-bit consistent DMA\n");
2332 goto err_region;
2333 }
2334
2335 /* Set bus master bit in PCI_COMMAND to enable DMA */
2336 pci_set_master(pdev);
2337
2338 /*
2339 * Temporary FIX: disable ASPM
2340 * Will be removed after the OTP is programmed
2341 */
2342 pci_read_config_dword(pdev, 0x80, &lcr_val);
2343 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2344
2345 /* Arrange for access to Target SoC registers. */
2346 mem = pci_iomap(pdev, BAR_NUM, 0);
2347 if (!mem) {
2348 ath10k_err("PCI iomap error\n");
2349 ret = -EIO;
2350 goto err_master;
2351 }
2352
2353 ar_pci->mem = mem;
2354
2355 spin_lock_init(&ar_pci->ce_lock);
2356
2357 ar_pci->cacheline_sz = dma_get_cache_alignment();
2358
2359 ret = ath10k_pci_start_intr(ar);
2360 if (ret) {
2361 ath10k_err("could not start interrupt handling (%d)\n", ret);
2362 goto err_iomap;
2363 }
2364
Kalle Valo5e3dd152013-06-12 20:52:10 +03002365 ret = ath10k_core_register(ar);
2366 if (ret) {
2367 ath10k_err("could not register driver core (%d)\n", ret);
Michal Kazior818bdd12013-07-16 09:38:57 +02002368 goto err_intr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002369 }
2370
2371 return 0;
2372
Kalle Valo5e3dd152013-06-12 20:52:10 +03002373err_intr:
2374 ath10k_pci_stop_intr(ar);
2375err_iomap:
2376 pci_iounmap(pdev, mem);
2377err_master:
2378 pci_clear_master(pdev);
2379err_region:
2380 pci_release_region(pdev, BAR_NUM);
2381err_device:
2382 pci_disable_device(pdev);
2383err_ar:
2384 pci_set_drvdata(pdev, NULL);
2385 ath10k_core_destroy(ar);
2386err_ar_pci:
2387 /* call HIF PCI free here */
2388 kfree(ar_pci);
2389
2390 return ret;
2391}
2392
2393static void ath10k_pci_remove(struct pci_dev *pdev)
2394{
2395 struct ath10k *ar = pci_get_drvdata(pdev);
2396 struct ath10k_pci *ar_pci;
2397
2398 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2399
2400 if (!ar)
2401 return;
2402
2403 ar_pci = ath10k_pci_priv(ar);
2404
2405 if (!ar_pci)
2406 return;
2407
2408 tasklet_kill(&ar_pci->msi_fw_err);
2409
2410 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002411 ath10k_pci_stop_intr(ar);
2412
2413 pci_set_drvdata(pdev, NULL);
2414 pci_iounmap(pdev, ar_pci->mem);
2415 pci_release_region(pdev, BAR_NUM);
2416 pci_clear_master(pdev);
2417 pci_disable_device(pdev);
2418
2419 ath10k_core_destroy(ar);
2420 kfree(ar_pci);
2421}
2422
Kalle Valo5e3dd152013-06-12 20:52:10 +03002423MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2424
2425static struct pci_driver ath10k_pci_driver = {
2426 .name = "ath10k_pci",
2427 .id_table = ath10k_pci_id_table,
2428 .probe = ath10k_pci_probe,
2429 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002430};
2431
2432static int __init ath10k_pci_init(void)
2433{
2434 int ret;
2435
2436 ret = pci_register_driver(&ath10k_pci_driver);
2437 if (ret)
2438 ath10k_err("pci_register_driver failed [%d]\n", ret);
2439
2440 return ret;
2441}
2442module_init(ath10k_pci_init);
2443
2444static void __exit ath10k_pci_exit(void)
2445{
2446 pci_unregister_driver(&ath10k_pci_driver);
2447}
2448
2449module_exit(ath10k_pci_exit);
2450
2451MODULE_AUTHOR("Qualcomm Atheros");
2452MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2453MODULE_LICENSE("Dual BSD/GPL");
2454MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2455MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2456MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2457MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2458MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2459MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);