blob: c71b488eba9fa59402fba5e0abbabedca3f614ac [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22
23#include "core.h"
24#include "debug.h"
25
26#include "targaddrs.h"
27#include "bmi.h"
28
29#include "hif.h"
30#include "htc.h"
31
32#include "ce.h"
33#include "pci.h"
34
35unsigned int ath10k_target_ps;
36module_param(ath10k_target_ps, uint, 0644);
37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39#define QCA988X_1_0_DEVICE_ID (0xabcd)
40#define QCA988X_2_0_DEVICE_ID (0x003c)
41
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 {0}
46};
47
48static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 u32 *data);
50
51static void ath10k_pci_process_ce(struct ath10k *ar);
52static int ath10k_pci_post_rx(struct ath10k *ar);
53static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54 int num);
55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56static void ath10k_pci_stop_ce(struct ath10k *ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +020057static void ath10k_pci_device_reset(struct ath10k *ar);
58static int ath10k_pci_reset_target(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030059
60static const struct ce_attr host_ce_config_wlan[] = {
61 /* host->target HTC control and raw streams */
62 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
63 /* could be moved to share CE3 */
64 /* target->host HTT + HTC control */
65 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
66 /* target->host WMI */
67 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
68 /* host->target WMI */
69 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
70 /* host->target HTT */
71 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
72 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
73 /* unused */
74 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75 /* Target autonomous hif_memcpy */
76 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
77 /* ce_diag, the Diagnostic Window */
78 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
79};
80
81/* Target firmware's Copy Engine configuration. */
82static const struct ce_pipe_config target_ce_config_wlan[] = {
83 /* host->target HTC control and raw streams */
84 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
85 /* target->host HTT + HTC control */
86 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
87 /* target->host WMI */
88 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
89 /* host->target WMI */
90 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
91 /* host->target HTT */
92 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
93 /* NB: 50% of src nentries, since tx has 2 frags */
94 /* unused */
95 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
96 /* Reserved for target autonomous hif_memcpy */
97 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
98 /* CE7 used only by Host */
99};
100
101/*
102 * Diagnostic read/write access is provided for startup/config/debug usage.
103 * Caller must guarantee proper alignment, when applicable, and single user
104 * at any moment.
105 */
106static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
107 int nbytes)
108{
109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
110 int ret = 0;
111 u32 buf;
112 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
113 unsigned int id;
114 unsigned int flags;
115 struct ce_state *ce_diag;
116 /* Host buffer address in CE space */
117 u32 ce_data;
118 dma_addr_t ce_data_base = 0;
119 void *data_buf = NULL;
120 int i;
121
122 /*
123 * This code cannot handle reads to non-memory space. Redirect to the
124 * register read fn but preserve the multi word read capability of
125 * this fn
126 */
127 if (address < DRAM_BASE_ADDRESS) {
128 if (!IS_ALIGNED(address, 4) ||
129 !IS_ALIGNED((unsigned long)data, 4))
130 return -EIO;
131
132 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
133 ar, address, (u32 *)data)) == 0)) {
134 nbytes -= sizeof(u32);
135 address += sizeof(u32);
136 data += sizeof(u32);
137 }
138 return ret;
139 }
140
141 ce_diag = ar_pci->ce_diag;
142
143 /*
144 * Allocate a temporary bounce buffer to hold caller's data
145 * to be DMA'ed from Target. This guarantees
146 * 1) 4-byte alignment
147 * 2) Buffer in DMA-able space
148 */
149 orig_nbytes = nbytes;
150 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
151 orig_nbytes,
152 &ce_data_base);
153
154 if (!data_buf) {
155 ret = -ENOMEM;
156 goto done;
157 }
158 memset(data_buf, 0, orig_nbytes);
159
160 remaining_bytes = orig_nbytes;
161 ce_data = ce_data_base;
162 while (remaining_bytes) {
163 nbytes = min_t(unsigned int, remaining_bytes,
164 DIAG_TRANSFER_LIMIT);
165
166 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
167 if (ret != 0)
168 goto done;
169
170 /* Request CE to send from Target(!) address to Host buffer */
171 /*
172 * The address supplied by the caller is in the
173 * Target CPU virtual address space.
174 *
175 * In order to use this address with the diagnostic CE,
176 * convert it from Target CPU virtual address space
177 * to CE address space
178 */
179 ath10k_pci_wake(ar);
180 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
181 address);
182 ath10k_pci_sleep(ar);
183
184 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
185 0);
186 if (ret)
187 goto done;
188
189 i = 0;
190 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
191 &completed_nbytes,
192 &id) != 0) {
193 mdelay(1);
194 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
195 ret = -EBUSY;
196 goto done;
197 }
198 }
199
200 if (nbytes != completed_nbytes) {
201 ret = -EIO;
202 goto done;
203 }
204
205 if (buf != (u32) address) {
206 ret = -EIO;
207 goto done;
208 }
209
210 i = 0;
211 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
212 &completed_nbytes,
213 &id, &flags) != 0) {
214 mdelay(1);
215
216 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
217 ret = -EBUSY;
218 goto done;
219 }
220 }
221
222 if (nbytes != completed_nbytes) {
223 ret = -EIO;
224 goto done;
225 }
226
227 if (buf != ce_data) {
228 ret = -EIO;
229 goto done;
230 }
231
232 remaining_bytes -= nbytes;
233 address += nbytes;
234 ce_data += nbytes;
235 }
236
237done:
238 if (ret == 0) {
239 /* Copy data from allocated DMA buf to caller's buf */
240 WARN_ON_ONCE(orig_nbytes & 3);
241 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
242 ((u32 *)data)[i] =
243 __le32_to_cpu(((__le32 *)data_buf)[i]);
244 }
245 } else
246 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
247 __func__, address);
248
249 if (data_buf)
250 pci_free_consistent(ar_pci->pdev, orig_nbytes,
251 data_buf, ce_data_base);
252
253 return ret;
254}
255
256/* Read 4-byte aligned data from Target memory or register */
257static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
258 u32 *data)
259{
260 /* Assume range doesn't cross this boundary */
261 if (address >= DRAM_BASE_ADDRESS)
262 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
263
264 ath10k_pci_wake(ar);
265 *data = ath10k_pci_read32(ar, address);
266 ath10k_pci_sleep(ar);
267 return 0;
268}
269
270static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
271 const void *data, int nbytes)
272{
273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
274 int ret = 0;
275 u32 buf;
276 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
277 unsigned int id;
278 unsigned int flags;
279 struct ce_state *ce_diag;
280 void *data_buf = NULL;
281 u32 ce_data; /* Host buffer address in CE space */
282 dma_addr_t ce_data_base = 0;
283 int i;
284
285 ce_diag = ar_pci->ce_diag;
286
287 /*
288 * Allocate a temporary bounce buffer to hold caller's data
289 * to be DMA'ed to Target. This guarantees
290 * 1) 4-byte alignment
291 * 2) Buffer in DMA-able space
292 */
293 orig_nbytes = nbytes;
294 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
295 orig_nbytes,
296 &ce_data_base);
297 if (!data_buf) {
298 ret = -ENOMEM;
299 goto done;
300 }
301
302 /* Copy caller's data to allocated DMA buf */
303 WARN_ON_ONCE(orig_nbytes & 3);
304 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
305 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
306
307 /*
308 * The address supplied by the caller is in the
309 * Target CPU virtual address space.
310 *
311 * In order to use this address with the diagnostic CE,
312 * convert it from
313 * Target CPU virtual address space
314 * to
315 * CE address space
316 */
317 ath10k_pci_wake(ar);
318 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
319 ath10k_pci_sleep(ar);
320
321 remaining_bytes = orig_nbytes;
322 ce_data = ce_data_base;
323 while (remaining_bytes) {
324 /* FIXME: check cast */
325 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
326
327 /* Set up to receive directly into Target(!) address */
328 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
329 if (ret != 0)
330 goto done;
331
332 /*
333 * Request CE to send caller-supplied data that
334 * was copied to bounce buffer to Target(!) address.
335 */
336 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
337 nbytes, 0, 0);
338 if (ret != 0)
339 goto done;
340
341 i = 0;
342 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
343 &completed_nbytes,
344 &id) != 0) {
345 mdelay(1);
346
347 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
348 ret = -EBUSY;
349 goto done;
350 }
351 }
352
353 if (nbytes != completed_nbytes) {
354 ret = -EIO;
355 goto done;
356 }
357
358 if (buf != ce_data) {
359 ret = -EIO;
360 goto done;
361 }
362
363 i = 0;
364 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
365 &completed_nbytes,
366 &id, &flags) != 0) {
367 mdelay(1);
368
369 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
370 ret = -EBUSY;
371 goto done;
372 }
373 }
374
375 if (nbytes != completed_nbytes) {
376 ret = -EIO;
377 goto done;
378 }
379
380 if (buf != address) {
381 ret = -EIO;
382 goto done;
383 }
384
385 remaining_bytes -= nbytes;
386 address += nbytes;
387 ce_data += nbytes;
388 }
389
390done:
391 if (data_buf) {
392 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
393 ce_data_base);
394 }
395
396 if (ret != 0)
397 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
398 address);
399
400 return ret;
401}
402
403/* Write 4B data to Target memory or register */
404static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
405 u32 data)
406{
407 /* Assume range doesn't cross this boundary */
408 if (address >= DRAM_BASE_ADDRESS)
409 return ath10k_pci_diag_write_mem(ar, address, &data,
410 sizeof(u32));
411
412 ath10k_pci_wake(ar);
413 ath10k_pci_write32(ar, address, data);
414 ath10k_pci_sleep(ar);
415 return 0;
416}
417
418static bool ath10k_pci_target_is_awake(struct ath10k *ar)
419{
420 void __iomem *mem = ath10k_pci_priv(ar)->mem;
421 u32 val;
422 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
423 RTC_STATE_ADDRESS);
424 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
425}
426
427static void ath10k_pci_wait(struct ath10k *ar)
428{
429 int n = 100;
430
431 while (n-- && !ath10k_pci_target_is_awake(ar))
432 msleep(10);
433
434 if (n < 0)
435 ath10k_warn("Unable to wakeup target\n");
436}
437
438void ath10k_do_pci_wake(struct ath10k *ar)
439{
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441 void __iomem *pci_addr = ar_pci->mem;
442 int tot_delay = 0;
443 int curr_delay = 5;
444
445 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
446 /* Force AWAKE */
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
450 }
451 atomic_inc(&ar_pci->keep_awake_count);
452
453 if (ar_pci->verified_awake)
454 return;
455
456 for (;;) {
457 if (ath10k_pci_target_is_awake(ar)) {
458 ar_pci->verified_awake = true;
459 break;
460 }
461
462 if (tot_delay > PCIE_WAKE_TIMEOUT) {
463 ath10k_warn("target takes too long to wake up (awake count %d)\n",
464 atomic_read(&ar_pci->keep_awake_count));
465 break;
466 }
467
468 udelay(curr_delay);
469 tot_delay += curr_delay;
470
471 if (curr_delay < 50)
472 curr_delay += 5;
473 }
474}
475
476void ath10k_do_pci_sleep(struct ath10k *ar)
477{
478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
479 void __iomem *pci_addr = ar_pci->mem;
480
481 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
482 /* Allow sleep */
483 ar_pci->verified_awake = false;
484 iowrite32(PCIE_SOC_WAKE_RESET,
485 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
486 PCIE_SOC_WAKE_ADDRESS);
487 }
488}
489
490/*
491 * FIXME: Handle OOM properly.
492 */
493static inline
494struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
495{
496 struct ath10k_pci_compl *compl = NULL;
497
498 spin_lock_bh(&pipe_info->pipe_lock);
499 if (list_empty(&pipe_info->compl_free)) {
500 ath10k_warn("Completion buffers are full\n");
501 goto exit;
502 }
503 compl = list_first_entry(&pipe_info->compl_free,
504 struct ath10k_pci_compl, list);
505 list_del(&compl->list);
506exit:
507 spin_unlock_bh(&pipe_info->pipe_lock);
508 return compl;
509}
510
511/* Called by lower (CE) layer when a send to Target completes. */
512static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
513 void *transfer_context,
514 u32 ce_data,
515 unsigned int nbytes,
516 unsigned int transfer_id)
517{
518 struct ath10k *ar = ce_state->ar;
519 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
521 struct ath10k_pci_compl *compl;
522 bool process = false;
523
524 do {
525 /*
526 * For the send completion of an item in sendlist, just
527 * increment num_sends_allowed. The upper layer callback will
528 * be triggered when last fragment is done with send.
529 */
530 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
531 spin_lock_bh(&pipe_info->pipe_lock);
532 pipe_info->num_sends_allowed++;
533 spin_unlock_bh(&pipe_info->pipe_lock);
534 continue;
535 }
536
537 compl = get_free_compl(pipe_info);
538 if (!compl)
539 break;
540
541 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
542 compl->ce_state = ce_state;
543 compl->pipe_info = pipe_info;
544 compl->transfer_context = transfer_context;
545 compl->nbytes = nbytes;
546 compl->transfer_id = transfer_id;
547 compl->flags = 0;
548
549 /*
550 * Add the completion to the processing queue.
551 */
552 spin_lock_bh(&ar_pci->compl_lock);
553 list_add_tail(&compl->list, &ar_pci->compl_process);
554 spin_unlock_bh(&ar_pci->compl_lock);
555
556 process = true;
557 } while (ath10k_ce_completed_send_next(ce_state,
558 &transfer_context,
559 &ce_data, &nbytes,
560 &transfer_id) == 0);
561
562 /*
563 * If only some of the items within a sendlist have completed,
564 * don't invoke completion processing until the entire sendlist
565 * has been sent.
566 */
567 if (!process)
568 return;
569
570 ath10k_pci_process_ce(ar);
571}
572
573/* Called by lower (CE) layer when data is received from the Target. */
574static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
575 void *transfer_context, u32 ce_data,
576 unsigned int nbytes,
577 unsigned int transfer_id,
578 unsigned int flags)
579{
580 struct ath10k *ar = ce_state->ar;
581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
583 struct ath10k_pci_compl *compl;
584 struct sk_buff *skb;
585
586 do {
587 compl = get_free_compl(pipe_info);
588 if (!compl)
589 break;
590
591 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
592 compl->ce_state = ce_state;
593 compl->pipe_info = pipe_info;
594 compl->transfer_context = transfer_context;
595 compl->nbytes = nbytes;
596 compl->transfer_id = transfer_id;
597 compl->flags = flags;
598
599 skb = transfer_context;
600 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
601 skb->len + skb_tailroom(skb),
602 DMA_FROM_DEVICE);
603 /*
604 * Add the completion to the processing queue.
605 */
606 spin_lock_bh(&ar_pci->compl_lock);
607 list_add_tail(&compl->list, &ar_pci->compl_process);
608 spin_unlock_bh(&ar_pci->compl_lock);
609
610 } while (ath10k_ce_completed_recv_next(ce_state,
611 &transfer_context,
612 &ce_data, &nbytes,
613 &transfer_id,
614 &flags) == 0);
615
616 ath10k_pci_process_ce(ar);
617}
618
619/* Send the first nbytes bytes of the buffer */
620static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
621 unsigned int transfer_id,
622 unsigned int bytes, struct sk_buff *nbuf)
623{
624 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
625 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
626 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
627 struct ce_state *ce_hdl = pipe_info->ce_hdl;
628 struct ce_sendlist sendlist;
629 unsigned int len;
630 u32 flags = 0;
631 int ret;
632
633 memset(&sendlist, 0, sizeof(struct ce_sendlist));
634
635 len = min(bytes, nbuf->len);
636 bytes -= len;
637
638 if (len & 3)
639 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
640
641 ath10k_dbg(ATH10K_DBG_PCI,
642 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
643 nbuf->data, (unsigned long long) skb_cb->paddr,
644 nbuf->len, len);
645 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
646 "ath10k tx: data: ",
647 nbuf->data, nbuf->len);
648
649 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
650
651 /* Make sure we have resources to handle this request */
652 spin_lock_bh(&pipe_info->pipe_lock);
653 if (!pipe_info->num_sends_allowed) {
654 ath10k_warn("Pipe: %d is full\n", pipe_id);
655 spin_unlock_bh(&pipe_info->pipe_lock);
656 return -ENOSR;
657 }
658 pipe_info->num_sends_allowed--;
659 spin_unlock_bh(&pipe_info->pipe_lock);
660
661 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
662 if (ret)
663 ath10k_warn("CE send failed: %p\n", nbuf);
664
665 return ret;
666}
667
668static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
669{
670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
672 int ret;
673
674 spin_lock_bh(&pipe_info->pipe_lock);
675 ret = pipe_info->num_sends_allowed;
676 spin_unlock_bh(&pipe_info->pipe_lock);
677
678 return ret;
679}
680
681static void ath10k_pci_hif_dump_area(struct ath10k *ar)
682{
683 u32 reg_dump_area = 0;
684 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
685 u32 host_addr;
686 int ret;
687 u32 i;
688
689 ath10k_err("firmware crashed!\n");
690 ath10k_err("hardware name %s version 0x%x\n",
691 ar->hw_params.name, ar->target_version);
692 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
693 ar->fw_version_minor, ar->fw_version_release,
694 ar->fw_version_build);
695
696 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
697 if (ath10k_pci_diag_read_mem(ar, host_addr,
698 &reg_dump_area, sizeof(u32)) != 0) {
699 ath10k_warn("could not read hi_failure_state\n");
700 return;
701 }
702
703 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
704
705 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
706 &reg_dump_values[0],
707 REG_DUMP_COUNT_QCA988X * sizeof(u32));
708 if (ret != 0) {
709 ath10k_err("could not dump FW Dump Area\n");
710 return;
711 }
712
713 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
714
715 ath10k_err("target Register Dump\n");
716 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
717 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
718 i,
719 reg_dump_values[i],
720 reg_dump_values[i + 1],
721 reg_dump_values[i + 2],
722 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200723
724 ieee80211_queue_work(ar->hw, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300725}
726
727static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
728 int force)
729{
730 if (!force) {
731 int resources;
732 /*
733 * Decide whether to actually poll for completions, or just
734 * wait for a later chance.
735 * If there seem to be plenty of resources left, then just wait
736 * since checking involves reading a CE register, which is a
737 * relatively expensive operation.
738 */
739 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
740
741 /*
742 * If at least 50% of the total resources are still available,
743 * don't bother checking again yet.
744 */
745 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
746 return;
747 }
748 ath10k_ce_per_engine_service(ar, pipe);
749}
750
Michal Kaziore799bbf2013-07-05 16:15:12 +0300751static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
752 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300753{
754 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
755
756 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
757
758 memcpy(&ar_pci->msg_callbacks_current, callbacks,
759 sizeof(ar_pci->msg_callbacks_current));
760}
761
762static int ath10k_pci_start_ce(struct ath10k *ar)
763{
764 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
765 struct ce_state *ce_diag = ar_pci->ce_diag;
766 const struct ce_attr *attr;
767 struct hif_ce_pipe_info *pipe_info;
768 struct ath10k_pci_compl *compl;
769 int i, pipe_num, completions, disable_interrupts;
770
771 spin_lock_init(&ar_pci->compl_lock);
772 INIT_LIST_HEAD(&ar_pci->compl_process);
773
774 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
775 pipe_info = &ar_pci->pipe_info[pipe_num];
776
777 spin_lock_init(&pipe_info->pipe_lock);
778 INIT_LIST_HEAD(&pipe_info->compl_free);
779
780 /* Handle Diagnostic CE specially */
781 if (pipe_info->ce_hdl == ce_diag)
782 continue;
783
784 attr = &host_ce_config_wlan[pipe_num];
785 completions = 0;
786
787 if (attr->src_nentries) {
788 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
789 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
790 ath10k_pci_ce_send_done,
791 disable_interrupts);
792 completions += attr->src_nentries;
793 pipe_info->num_sends_allowed = attr->src_nentries - 1;
794 }
795
796 if (attr->dest_nentries) {
797 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
798 ath10k_pci_ce_recv_data);
799 completions += attr->dest_nentries;
800 }
801
802 if (completions == 0)
803 continue;
804
805 for (i = 0; i < completions; i++) {
806 compl = kmalloc(sizeof(struct ath10k_pci_compl),
807 GFP_KERNEL);
808 if (!compl) {
809 ath10k_warn("No memory for completion state\n");
810 ath10k_pci_stop_ce(ar);
811 return -ENOMEM;
812 }
813
814 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
815 list_add_tail(&compl->list, &pipe_info->compl_free);
816 }
817 }
818
819 return 0;
820}
821
822static void ath10k_pci_stop_ce(struct ath10k *ar)
823{
824 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
825 struct ath10k_pci_compl *compl;
826 struct sk_buff *skb;
827 int i;
828
829 ath10k_ce_disable_interrupts(ar);
830
831 /* Cancel the pending tasklet */
832 tasklet_kill(&ar_pci->intr_tq);
833
834 for (i = 0; i < CE_COUNT; i++)
835 tasklet_kill(&ar_pci->pipe_info[i].intr);
836
837 /* Mark pending completions as aborted, so that upper layers free up
838 * their associated resources */
839 spin_lock_bh(&ar_pci->compl_lock);
840 list_for_each_entry(compl, &ar_pci->compl_process, list) {
841 skb = (struct sk_buff *)compl->transfer_context;
842 ATH10K_SKB_CB(skb)->is_aborted = true;
843 }
844 spin_unlock_bh(&ar_pci->compl_lock);
845}
846
847static void ath10k_pci_cleanup_ce(struct ath10k *ar)
848{
849 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
850 struct ath10k_pci_compl *compl, *tmp;
851 struct hif_ce_pipe_info *pipe_info;
852 struct sk_buff *netbuf;
853 int pipe_num;
854
855 /* Free pending completions. */
856 spin_lock_bh(&ar_pci->compl_lock);
857 if (!list_empty(&ar_pci->compl_process))
858 ath10k_warn("pending completions still present! possible memory leaks.\n");
859
860 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
861 list_del(&compl->list);
862 netbuf = (struct sk_buff *)compl->transfer_context;
863 dev_kfree_skb_any(netbuf);
864 kfree(compl);
865 }
866 spin_unlock_bh(&ar_pci->compl_lock);
867
868 /* Free unused completions for each pipe. */
869 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
870 pipe_info = &ar_pci->pipe_info[pipe_num];
871
872 spin_lock_bh(&pipe_info->pipe_lock);
873 list_for_each_entry_safe(compl, tmp,
874 &pipe_info->compl_free, list) {
875 list_del(&compl->list);
876 kfree(compl);
877 }
878 spin_unlock_bh(&pipe_info->pipe_lock);
879 }
880}
881
882static void ath10k_pci_process_ce(struct ath10k *ar)
883{
884 struct ath10k_pci *ar_pci = ar->hif.priv;
885 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
886 struct ath10k_pci_compl *compl;
887 struct sk_buff *skb;
888 unsigned int nbytes;
889 int ret, send_done = 0;
890
891 /* Upper layers aren't ready to handle tx/rx completions in parallel so
892 * we must serialize all completion processing. */
893
894 spin_lock_bh(&ar_pci->compl_lock);
895 if (ar_pci->compl_processing) {
896 spin_unlock_bh(&ar_pci->compl_lock);
897 return;
898 }
899 ar_pci->compl_processing = true;
900 spin_unlock_bh(&ar_pci->compl_lock);
901
902 for (;;) {
903 spin_lock_bh(&ar_pci->compl_lock);
904 if (list_empty(&ar_pci->compl_process)) {
905 spin_unlock_bh(&ar_pci->compl_lock);
906 break;
907 }
908 compl = list_first_entry(&ar_pci->compl_process,
909 struct ath10k_pci_compl, list);
910 list_del(&compl->list);
911 spin_unlock_bh(&ar_pci->compl_lock);
912
913 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
914 cb->tx_completion(ar,
915 compl->transfer_context,
916 compl->transfer_id);
917 send_done = 1;
918 } else {
919 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
920 if (ret) {
921 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
922 compl->pipe_info->pipe_num);
923 break;
924 }
925
926 skb = (struct sk_buff *)compl->transfer_context;
927 nbytes = compl->nbytes;
928
929 ath10k_dbg(ATH10K_DBG_PCI,
930 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
931 skb, nbytes);
932 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
933 "ath10k rx: ", skb->data, nbytes);
934
935 if (skb->len + skb_tailroom(skb) >= nbytes) {
936 skb_trim(skb, 0);
937 skb_put(skb, nbytes);
938 cb->rx_completion(ar, skb,
939 compl->pipe_info->pipe_num);
940 } else {
941 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
942 nbytes,
943 skb->len + skb_tailroom(skb));
944 }
945 }
946
947 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
948
949 /*
950 * Add completion back to the pipe's free list.
951 */
952 spin_lock_bh(&compl->pipe_info->pipe_lock);
953 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
954 compl->pipe_info->num_sends_allowed += send_done;
955 spin_unlock_bh(&compl->pipe_info->pipe_lock);
956 }
957
958 spin_lock_bh(&ar_pci->compl_lock);
959 ar_pci->compl_processing = false;
960 spin_unlock_bh(&ar_pci->compl_lock);
961}
962
963/* TODO - temporary mapping while we have too few CE's */
964static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
965 u16 service_id, u8 *ul_pipe,
966 u8 *dl_pipe, int *ul_is_polled,
967 int *dl_is_polled)
968{
969 int ret = 0;
970
971 /* polling for received messages not supported */
972 *dl_is_polled = 0;
973
974 switch (service_id) {
975 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
976 /*
977 * Host->target HTT gets its own pipe, so it can be polled
978 * while other pipes are interrupt driven.
979 */
980 *ul_pipe = 4;
981 /*
982 * Use the same target->host pipe for HTC ctrl, HTC raw
983 * streams, and HTT.
984 */
985 *dl_pipe = 1;
986 break;
987
988 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
989 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
990 /*
991 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
992 * HTC_CTRL_RSVD_SVC could share the same pipe as the
993 * WMI services. So, if another CE is needed, change
994 * this to *ul_pipe = 3, which frees up CE 0.
995 */
996 /* *ul_pipe = 3; */
997 *ul_pipe = 0;
998 *dl_pipe = 1;
999 break;
1000
1001 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1002 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1003 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1004 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1005
1006 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1007 *ul_pipe = 3;
1008 *dl_pipe = 2;
1009 break;
1010
1011 /* pipe 5 unused */
1012 /* pipe 6 reserved */
1013 /* pipe 7 reserved */
1014
1015 default:
1016 ret = -1;
1017 break;
1018 }
1019 *ul_is_polled =
1020 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1021
1022 return ret;
1023}
1024
1025static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1026 u8 *ul_pipe, u8 *dl_pipe)
1027{
1028 int ul_is_polled, dl_is_polled;
1029
1030 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1031 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1032 ul_pipe,
1033 dl_pipe,
1034 &ul_is_polled,
1035 &dl_is_polled);
1036}
1037
1038static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1039 int num)
1040{
1041 struct ath10k *ar = pipe_info->hif_ce_state;
1042 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1043 struct ce_state *ce_state = pipe_info->ce_hdl;
1044 struct sk_buff *skb;
1045 dma_addr_t ce_data;
1046 int i, ret = 0;
1047
1048 if (pipe_info->buf_sz == 0)
1049 return 0;
1050
1051 for (i = 0; i < num; i++) {
1052 skb = dev_alloc_skb(pipe_info->buf_sz);
1053 if (!skb) {
1054 ath10k_warn("could not allocate skbuff for pipe %d\n",
1055 num);
1056 ret = -ENOMEM;
1057 goto err;
1058 }
1059
1060 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1061
1062 ce_data = dma_map_single(ar->dev, skb->data,
1063 skb->len + skb_tailroom(skb),
1064 DMA_FROM_DEVICE);
1065
1066 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1067 ath10k_warn("could not dma map skbuff\n");
1068 dev_kfree_skb_any(skb);
1069 ret = -EIO;
1070 goto err;
1071 }
1072
1073 ATH10K_SKB_CB(skb)->paddr = ce_data;
1074
1075 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1076 pipe_info->buf_sz,
1077 PCI_DMA_FROMDEVICE);
1078
1079 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1080 ce_data);
1081 if (ret) {
1082 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1083 num, ret);
1084 goto err;
1085 }
1086 }
1087
1088 return ret;
1089
1090err:
1091 ath10k_pci_rx_pipe_cleanup(pipe_info);
1092 return ret;
1093}
1094
1095static int ath10k_pci_post_rx(struct ath10k *ar)
1096{
1097 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1098 struct hif_ce_pipe_info *pipe_info;
1099 const struct ce_attr *attr;
1100 int pipe_num, ret = 0;
1101
1102 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1103 pipe_info = &ar_pci->pipe_info[pipe_num];
1104 attr = &host_ce_config_wlan[pipe_num];
1105
1106 if (attr->dest_nentries == 0)
1107 continue;
1108
1109 ret = ath10k_pci_post_rx_pipe(pipe_info,
1110 attr->dest_nentries - 1);
1111 if (ret) {
1112 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1113 pipe_num);
1114
1115 for (; pipe_num >= 0; pipe_num--) {
1116 pipe_info = &ar_pci->pipe_info[pipe_num];
1117 ath10k_pci_rx_pipe_cleanup(pipe_info);
1118 }
1119 return ret;
1120 }
1121 }
1122
1123 return 0;
1124}
1125
1126static int ath10k_pci_hif_start(struct ath10k *ar)
1127{
1128 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1129 int ret;
1130
1131 ret = ath10k_pci_start_ce(ar);
1132 if (ret) {
1133 ath10k_warn("could not start CE (%d)\n", ret);
1134 return ret;
1135 }
1136
1137 /* Post buffers once to start things off. */
1138 ret = ath10k_pci_post_rx(ar);
1139 if (ret) {
1140 ath10k_warn("could not post rx pipes (%d)\n", ret);
1141 return ret;
1142 }
1143
1144 ar_pci->started = 1;
1145 return 0;
1146}
1147
1148static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1149{
1150 struct ath10k *ar;
1151 struct ath10k_pci *ar_pci;
1152 struct ce_state *ce_hdl;
1153 u32 buf_sz;
1154 struct sk_buff *netbuf;
1155 u32 ce_data;
1156
1157 buf_sz = pipe_info->buf_sz;
1158
1159 /* Unused Copy Engine */
1160 if (buf_sz == 0)
1161 return;
1162
1163 ar = pipe_info->hif_ce_state;
1164 ar_pci = ath10k_pci_priv(ar);
1165
1166 if (!ar_pci->started)
1167 return;
1168
1169 ce_hdl = pipe_info->ce_hdl;
1170
1171 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1172 &ce_data) == 0) {
1173 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1174 netbuf->len + skb_tailroom(netbuf),
1175 DMA_FROM_DEVICE);
1176 dev_kfree_skb_any(netbuf);
1177 }
1178}
1179
1180static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1181{
1182 struct ath10k *ar;
1183 struct ath10k_pci *ar_pci;
1184 struct ce_state *ce_hdl;
1185 struct sk_buff *netbuf;
1186 u32 ce_data;
1187 unsigned int nbytes;
1188 unsigned int id;
1189 u32 buf_sz;
1190
1191 buf_sz = pipe_info->buf_sz;
1192
1193 /* Unused Copy Engine */
1194 if (buf_sz == 0)
1195 return;
1196
1197 ar = pipe_info->hif_ce_state;
1198 ar_pci = ath10k_pci_priv(ar);
1199
1200 if (!ar_pci->started)
1201 return;
1202
1203 ce_hdl = pipe_info->ce_hdl;
1204
1205 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1206 &ce_data, &nbytes, &id) == 0) {
1207 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1208 /*
1209 * Indicate the completion to higer layer to free
1210 * the buffer
1211 */
1212 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1213 ar_pci->msg_callbacks_current.tx_completion(ar,
1214 netbuf,
1215 id);
1216 }
1217}
1218
1219/*
1220 * Cleanup residual buffers for device shutdown:
1221 * buffers that were enqueued for receive
1222 * buffers that were to be sent
1223 * Note: Buffers that had completed but which were
1224 * not yet processed are on a completion queue. They
1225 * are handled when the completion thread shuts down.
1226 */
1227static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1228{
1229 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1230 int pipe_num;
1231
1232 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1233 struct hif_ce_pipe_info *pipe_info;
1234
1235 pipe_info = &ar_pci->pipe_info[pipe_num];
1236 ath10k_pci_rx_pipe_cleanup(pipe_info);
1237 ath10k_pci_tx_pipe_cleanup(pipe_info);
1238 }
1239}
1240
1241static void ath10k_pci_ce_deinit(struct ath10k *ar)
1242{
1243 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1244 struct hif_ce_pipe_info *pipe_info;
1245 int pipe_num;
1246
1247 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1248 pipe_info = &ar_pci->pipe_info[pipe_num];
1249 if (pipe_info->ce_hdl) {
1250 ath10k_ce_deinit(pipe_info->ce_hdl);
1251 pipe_info->ce_hdl = NULL;
1252 pipe_info->buf_sz = 0;
1253 }
1254 }
1255}
1256
1257static void ath10k_pci_hif_stop(struct ath10k *ar)
1258{
1259 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1260
1261 ath10k_pci_stop_ce(ar);
1262
1263 /* At this point, asynchronous threads are stopped, the target should
1264 * not DMA nor interrupt. We process the leftovers and then free
1265 * everything else up. */
1266
1267 ath10k_pci_process_ce(ar);
1268 ath10k_pci_cleanup_ce(ar);
1269 ath10k_pci_buffer_cleanup(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001270}
1271
1272static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1273 void *req, u32 req_len,
1274 void *resp, u32 *resp_len)
1275{
1276 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1277 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1278 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1279 dma_addr_t req_paddr = 0;
1280 dma_addr_t resp_paddr = 0;
1281 struct bmi_xfer xfer = {};
1282 void *treq, *tresp = NULL;
1283 int ret = 0;
1284
1285 if (resp && !resp_len)
1286 return -EINVAL;
1287
1288 if (resp && resp_len && *resp_len == 0)
1289 return -EINVAL;
1290
1291 treq = kmemdup(req, req_len, GFP_KERNEL);
1292 if (!treq)
1293 return -ENOMEM;
1294
1295 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1296 ret = dma_mapping_error(ar->dev, req_paddr);
1297 if (ret)
1298 goto err_dma;
1299
1300 if (resp && resp_len) {
1301 tresp = kzalloc(*resp_len, GFP_KERNEL);
1302 if (!tresp) {
1303 ret = -ENOMEM;
1304 goto err_req;
1305 }
1306
1307 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1308 DMA_FROM_DEVICE);
1309 ret = dma_mapping_error(ar->dev, resp_paddr);
1310 if (ret)
1311 goto err_req;
1312
1313 xfer.wait_for_resp = true;
1314 xfer.resp_len = 0;
1315
1316 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1317 }
1318
1319 init_completion(&xfer.done);
1320
1321 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1322 if (ret)
1323 goto err_resp;
1324
1325 ret = wait_for_completion_timeout(&xfer.done,
1326 BMI_COMMUNICATION_TIMEOUT_HZ);
1327 if (ret <= 0) {
1328 u32 unused_buffer;
1329 unsigned int unused_nbytes;
1330 unsigned int unused_id;
1331
1332 ret = -ETIMEDOUT;
1333 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1334 &unused_nbytes, &unused_id);
1335 } else {
1336 /* non-zero means we did not time out */
1337 ret = 0;
1338 }
1339
1340err_resp:
1341 if (resp) {
1342 u32 unused_buffer;
1343
1344 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1345 dma_unmap_single(ar->dev, resp_paddr,
1346 *resp_len, DMA_FROM_DEVICE);
1347 }
1348err_req:
1349 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1350
1351 if (ret == 0 && resp_len) {
1352 *resp_len = min(*resp_len, xfer.resp_len);
1353 memcpy(resp, tresp, xfer.resp_len);
1354 }
1355err_dma:
1356 kfree(treq);
1357 kfree(tresp);
1358
1359 return ret;
1360}
1361
1362static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1363 void *transfer_context,
1364 u32 data,
1365 unsigned int nbytes,
1366 unsigned int transfer_id)
1367{
1368 struct bmi_xfer *xfer = transfer_context;
1369
1370 if (xfer->wait_for_resp)
1371 return;
1372
1373 complete(&xfer->done);
1374}
1375
1376static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1377 void *transfer_context,
1378 u32 data,
1379 unsigned int nbytes,
1380 unsigned int transfer_id,
1381 unsigned int flags)
1382{
1383 struct bmi_xfer *xfer = transfer_context;
1384
1385 if (!xfer->wait_for_resp) {
1386 ath10k_warn("unexpected: BMI data received; ignoring\n");
1387 return;
1388 }
1389
1390 xfer->resp_len = nbytes;
1391 complete(&xfer->done);
1392}
1393
1394/*
1395 * Map from service/endpoint to Copy Engine.
1396 * This table is derived from the CE_PCI TABLE, above.
1397 * It is passed to the Target at startup for use by firmware.
1398 */
1399static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1400 {
1401 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1402 PIPEDIR_OUT, /* out = UL = host -> target */
1403 3,
1404 },
1405 {
1406 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1407 PIPEDIR_IN, /* in = DL = target -> host */
1408 2,
1409 },
1410 {
1411 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1412 PIPEDIR_OUT, /* out = UL = host -> target */
1413 3,
1414 },
1415 {
1416 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1417 PIPEDIR_IN, /* in = DL = target -> host */
1418 2,
1419 },
1420 {
1421 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1422 PIPEDIR_OUT, /* out = UL = host -> target */
1423 3,
1424 },
1425 {
1426 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1427 PIPEDIR_IN, /* in = DL = target -> host */
1428 2,
1429 },
1430 {
1431 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1432 PIPEDIR_OUT, /* out = UL = host -> target */
1433 3,
1434 },
1435 {
1436 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1437 PIPEDIR_IN, /* in = DL = target -> host */
1438 2,
1439 },
1440 {
1441 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1442 PIPEDIR_OUT, /* out = UL = host -> target */
1443 3,
1444 },
1445 {
1446 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1447 PIPEDIR_IN, /* in = DL = target -> host */
1448 2,
1449 },
1450 {
1451 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1452 PIPEDIR_OUT, /* out = UL = host -> target */
1453 0, /* could be moved to 3 (share with WMI) */
1454 },
1455 {
1456 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1457 PIPEDIR_IN, /* in = DL = target -> host */
1458 1,
1459 },
1460 {
1461 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1462 PIPEDIR_OUT, /* out = UL = host -> target */
1463 0,
1464 },
1465 {
1466 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1467 PIPEDIR_IN, /* in = DL = target -> host */
1468 1,
1469 },
1470 {
1471 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1472 PIPEDIR_OUT, /* out = UL = host -> target */
1473 4,
1474 },
1475 {
1476 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1477 PIPEDIR_IN, /* in = DL = target -> host */
1478 1,
1479 },
1480
1481 /* (Additions here) */
1482
1483 { /* Must be last */
1484 0,
1485 0,
1486 0,
1487 },
1488};
1489
1490/*
1491 * Send an interrupt to the device to wake up the Target CPU
1492 * so it has an opportunity to notice any changed state.
1493 */
1494static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1495{
1496 int ret;
1497 u32 core_ctrl;
1498
1499 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1500 CORE_CTRL_ADDRESS,
1501 &core_ctrl);
1502 if (ret) {
1503 ath10k_warn("Unable to read core ctrl\n");
1504 return ret;
1505 }
1506
1507 /* A_INUM_FIRMWARE interrupt to Target CPU */
1508 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1509
1510 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1511 CORE_CTRL_ADDRESS,
1512 core_ctrl);
1513 if (ret)
1514 ath10k_warn("Unable to set interrupt mask\n");
1515
1516 return ret;
1517}
1518
1519static int ath10k_pci_init_config(struct ath10k *ar)
1520{
1521 u32 interconnect_targ_addr;
1522 u32 pcie_state_targ_addr = 0;
1523 u32 pipe_cfg_targ_addr = 0;
1524 u32 svc_to_pipe_map = 0;
1525 u32 pcie_config_flags = 0;
1526 u32 ealloc_value;
1527 u32 ealloc_targ_addr;
1528 u32 flag2_value;
1529 u32 flag2_targ_addr;
1530 int ret = 0;
1531
1532 /* Download to Target the CE Config and the service-to-CE map */
1533 interconnect_targ_addr =
1534 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1535
1536 /* Supply Target-side CE configuration */
1537 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1538 &pcie_state_targ_addr);
1539 if (ret != 0) {
1540 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1541 return ret;
1542 }
1543
1544 if (pcie_state_targ_addr == 0) {
1545 ret = -EIO;
1546 ath10k_err("Invalid pcie state addr\n");
1547 return ret;
1548 }
1549
1550 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1551 offsetof(struct pcie_state,
1552 pipe_cfg_addr),
1553 &pipe_cfg_targ_addr);
1554 if (ret != 0) {
1555 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1556 return ret;
1557 }
1558
1559 if (pipe_cfg_targ_addr == 0) {
1560 ret = -EIO;
1561 ath10k_err("Invalid pipe cfg addr\n");
1562 return ret;
1563 }
1564
1565 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1566 target_ce_config_wlan,
1567 sizeof(target_ce_config_wlan));
1568
1569 if (ret != 0) {
1570 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1571 return ret;
1572 }
1573
1574 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1575 offsetof(struct pcie_state,
1576 svc_to_pipe_map),
1577 &svc_to_pipe_map);
1578 if (ret != 0) {
1579 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1580 return ret;
1581 }
1582
1583 if (svc_to_pipe_map == 0) {
1584 ret = -EIO;
1585 ath10k_err("Invalid svc_to_pipe map\n");
1586 return ret;
1587 }
1588
1589 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1590 target_service_to_ce_map_wlan,
1591 sizeof(target_service_to_ce_map_wlan));
1592 if (ret != 0) {
1593 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1594 return ret;
1595 }
1596
1597 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1598 offsetof(struct pcie_state,
1599 config_flags),
1600 &pcie_config_flags);
1601 if (ret != 0) {
1602 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1603 return ret;
1604 }
1605
1606 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1607
1608 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1609 offsetof(struct pcie_state, config_flags),
1610 &pcie_config_flags,
1611 sizeof(pcie_config_flags));
1612 if (ret != 0) {
1613 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1614 return ret;
1615 }
1616
1617 /* configure early allocation */
1618 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1619
1620 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1621 if (ret != 0) {
1622 ath10k_err("Faile to get early alloc val: %d\n", ret);
1623 return ret;
1624 }
1625
1626 /* first bank is switched to IRAM */
1627 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1628 HI_EARLY_ALLOC_MAGIC_MASK);
1629 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1630 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1631
1632 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1633 if (ret != 0) {
1634 ath10k_err("Failed to set early alloc val: %d\n", ret);
1635 return ret;
1636 }
1637
1638 /* Tell Target to proceed with initialization */
1639 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1640
1641 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1642 if (ret != 0) {
1643 ath10k_err("Failed to get option val: %d\n", ret);
1644 return ret;
1645 }
1646
1647 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1648
1649 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1650 if (ret != 0) {
1651 ath10k_err("Failed to set option val: %d\n", ret);
1652 return ret;
1653 }
1654
1655 return 0;
1656}
1657
1658
1659
1660static int ath10k_pci_ce_init(struct ath10k *ar)
1661{
1662 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1663 struct hif_ce_pipe_info *pipe_info;
1664 const struct ce_attr *attr;
1665 int pipe_num;
1666
1667 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1668 pipe_info = &ar_pci->pipe_info[pipe_num];
1669 pipe_info->pipe_num = pipe_num;
1670 pipe_info->hif_ce_state = ar;
1671 attr = &host_ce_config_wlan[pipe_num];
1672
1673 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1674 if (pipe_info->ce_hdl == NULL) {
1675 ath10k_err("Unable to initialize CE for pipe: %d\n",
1676 pipe_num);
1677
1678 /* It is safe to call it here. It checks if ce_hdl is
1679 * valid for each pipe */
1680 ath10k_pci_ce_deinit(ar);
1681 return -1;
1682 }
1683
1684 if (pipe_num == ar_pci->ce_count - 1) {
1685 /*
1686 * Reserve the ultimate CE for
1687 * diagnostic Window support
1688 */
1689 ar_pci->ce_diag =
1690 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1691 continue;
1692 }
1693
1694 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1695 }
1696
1697 /*
1698 * Initially, establish CE completion handlers for use with BMI.
1699 * These are overwritten with generic handlers after we exit BMI phase.
1700 */
1701 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1702 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1703 ath10k_pci_bmi_send_done, 0);
1704
1705 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1706 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1707 ath10k_pci_bmi_recv_data);
1708
1709 return 0;
1710}
1711
1712static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1713{
1714 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1715 u32 fw_indicator_address, fw_indicator;
1716
1717 ath10k_pci_wake(ar);
1718
1719 fw_indicator_address = ar_pci->fw_indicator_address;
1720 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1721
1722 if (fw_indicator & FW_IND_EVENT_PENDING) {
1723 /* ACK: clear Target-side pending event */
1724 ath10k_pci_write32(ar, fw_indicator_address,
1725 fw_indicator & ~FW_IND_EVENT_PENDING);
1726
1727 if (ar_pci->started) {
1728 ath10k_pci_hif_dump_area(ar);
1729 } else {
1730 /*
1731 * Probable Target failure before we're prepared
1732 * to handle it. Generally unexpected.
1733 */
1734 ath10k_warn("early firmware event indicated\n");
1735 }
1736 }
1737
1738 ath10k_pci_sleep(ar);
1739}
1740
Michal Kazior8c5c5362013-07-16 09:38:50 +02001741static int ath10k_pci_hif_power_up(struct ath10k *ar)
1742{
1743 int ret;
1744
1745 /*
1746 * Bring the target up cleanly.
1747 *
1748 * The target may be in an undefined state with an AUX-powered Target
1749 * and a Host in WoW mode. If the Host crashes, loses power, or is
1750 * restarted (without unloading the driver) then the Target is left
1751 * (aux) powered and running. On a subsequent driver load, the Target
1752 * is in an unexpected state. We try to catch that here in order to
1753 * reset the Target and retry the probe.
1754 */
1755 ath10k_pci_device_reset(ar);
1756
1757 ret = ath10k_pci_reset_target(ar);
1758 if (ret)
1759 goto err;
1760
1761 if (ath10k_target_ps) {
1762 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
1763 } else {
1764 /* Force AWAKE forever */
1765 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
1766 ath10k_do_pci_wake(ar);
1767 }
1768
1769 ret = ath10k_pci_ce_init(ar);
1770 if (ret)
1771 goto err_ps;
1772
1773 ret = ath10k_pci_init_config(ar);
1774 if (ret)
1775 goto err_ce;
1776
1777 ret = ath10k_pci_wake_target_cpu(ar);
1778 if (ret) {
1779 ath10k_err("could not wake up target CPU (%d)\n", ret);
1780 goto err_ce;
1781 }
1782
1783 return 0;
1784
1785err_ce:
1786 ath10k_pci_ce_deinit(ar);
1787err_ps:
1788 if (!ath10k_target_ps)
1789 ath10k_do_pci_sleep(ar);
1790err:
1791 return ret;
1792}
1793
1794static void ath10k_pci_hif_power_down(struct ath10k *ar)
1795{
1796 ath10k_pci_ce_deinit(ar);
1797 if (!ath10k_target_ps)
1798 ath10k_do_pci_sleep(ar);
1799}
1800
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001801#ifdef CONFIG_PM
1802
1803#define ATH10K_PCI_PM_CONTROL 0x44
1804
1805static int ath10k_pci_hif_suspend(struct ath10k *ar)
1806{
1807 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1808 struct pci_dev *pdev = ar_pci->pdev;
1809 u32 val;
1810
1811 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1812
1813 if ((val & 0x000000ff) != 0x3) {
1814 pci_save_state(pdev);
1815 pci_disable_device(pdev);
1816 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1817 (val & 0xffffff00) | 0x03);
1818 }
1819
1820 return 0;
1821}
1822
1823static int ath10k_pci_hif_resume(struct ath10k *ar)
1824{
1825 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1826 struct pci_dev *pdev = ar_pci->pdev;
1827 u32 val;
1828
1829 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1830
1831 if ((val & 0x000000ff) != 0) {
1832 pci_restore_state(pdev);
1833 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1834 val & 0xffffff00);
1835 /*
1836 * Suspend/Resume resets the PCI configuration space,
1837 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1838 * to keep PCI Tx retries from interfering with C3 CPU state
1839 */
1840 pci_read_config_dword(pdev, 0x40, &val);
1841
1842 if ((val & 0x0000ff00) != 0)
1843 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1844 }
1845
1846 return 0;
1847}
1848#endif
1849
Kalle Valo5e3dd152013-06-12 20:52:10 +03001850static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1851 .send_head = ath10k_pci_hif_send_head,
1852 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1853 .start = ath10k_pci_hif_start,
1854 .stop = ath10k_pci_hif_stop,
1855 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1856 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1857 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03001858 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001859 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02001860 .power_up = ath10k_pci_hif_power_up,
1861 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001862#ifdef CONFIG_PM
1863 .suspend = ath10k_pci_hif_suspend,
1864 .resume = ath10k_pci_hif_resume,
1865#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03001866};
1867
1868static void ath10k_pci_ce_tasklet(unsigned long ptr)
1869{
1870 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1871 struct ath10k_pci *ar_pci = pipe->ar_pci;
1872
1873 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1874}
1875
1876static void ath10k_msi_err_tasklet(unsigned long data)
1877{
1878 struct ath10k *ar = (struct ath10k *)data;
1879
1880 ath10k_pci_fw_interrupt_handler(ar);
1881}
1882
1883/*
1884 * Handler for a per-engine interrupt on a PARTICULAR CE.
1885 * This is used in cases where each CE has a private MSI interrupt.
1886 */
1887static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1888{
1889 struct ath10k *ar = arg;
1890 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1891 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1892
Dan Carpentere5742672013-06-18 10:28:46 +03001893 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001894 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1895 return IRQ_HANDLED;
1896 }
1897
1898 /*
1899 * NOTE: We are able to derive ce_id from irq because we
1900 * use a one-to-one mapping for CE's 0..5.
1901 * CE's 6 & 7 do not use interrupts at all.
1902 *
1903 * This mapping must be kept in sync with the mapping
1904 * used by firmware.
1905 */
1906 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1907 return IRQ_HANDLED;
1908}
1909
1910static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1911{
1912 struct ath10k *ar = arg;
1913 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1914
1915 tasklet_schedule(&ar_pci->msi_fw_err);
1916 return IRQ_HANDLED;
1917}
1918
1919/*
1920 * Top-level interrupt handler for all PCI interrupts from a Target.
1921 * When a block of MSI interrupts is allocated, this top-level handler
1922 * is not used; instead, we directly call the correct sub-handler.
1923 */
1924static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1925{
1926 struct ath10k *ar = arg;
1927 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1928
1929 if (ar_pci->num_msi_intrs == 0) {
1930 /*
1931 * IMPORTANT: INTR_CLR regiser has to be set after
1932 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1933 * really cleared.
1934 */
1935 iowrite32(0, ar_pci->mem +
1936 (SOC_CORE_BASE_ADDRESS |
1937 PCIE_INTR_ENABLE_ADDRESS));
1938 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1939 PCIE_INTR_CE_MASK_ALL,
1940 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1941 PCIE_INTR_CLR_ADDRESS));
1942 /*
1943 * IMPORTANT: this extra read transaction is required to
1944 * flush the posted write buffer.
1945 */
1946 (void) ioread32(ar_pci->mem +
1947 (SOC_CORE_BASE_ADDRESS |
1948 PCIE_INTR_ENABLE_ADDRESS));
1949 }
1950
1951 tasklet_schedule(&ar_pci->intr_tq);
1952
1953 return IRQ_HANDLED;
1954}
1955
1956static void ath10k_pci_tasklet(unsigned long data)
1957{
1958 struct ath10k *ar = (struct ath10k *)data;
1959 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1960
1961 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1962 ath10k_ce_per_engine_service_any(ar);
1963
1964 if (ar_pci->num_msi_intrs == 0) {
1965 /* Enable Legacy PCI line interrupts */
1966 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1967 PCIE_INTR_CE_MASK_ALL,
1968 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1969 PCIE_INTR_ENABLE_ADDRESS));
1970 /*
1971 * IMPORTANT: this extra read transaction is required to
1972 * flush the posted write buffer
1973 */
1974 (void) ioread32(ar_pci->mem +
1975 (SOC_CORE_BASE_ADDRESS |
1976 PCIE_INTR_ENABLE_ADDRESS));
1977 }
1978}
1979
1980static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1981{
1982 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1983 int ret;
1984 int i;
1985
1986 ret = pci_enable_msi_block(ar_pci->pdev, num);
1987 if (ret)
1988 return ret;
1989
1990 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1991 ath10k_pci_msi_fw_handler,
1992 IRQF_SHARED, "ath10k_pci", ar);
1993 if (ret)
1994 return ret;
1995
1996 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1997 ret = request_irq(ar_pci->pdev->irq + i,
1998 ath10k_pci_per_engine_handler,
1999 IRQF_SHARED, "ath10k_pci", ar);
2000 if (ret) {
2001 ath10k_warn("request_irq(%d) failed %d\n",
2002 ar_pci->pdev->irq + i, ret);
2003
Michal Kazior87b14232013-06-26 08:50:50 +02002004 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2005 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002006
Michal Kazior87b14232013-06-26 08:50:50 +02002007 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002008 pci_disable_msi(ar_pci->pdev);
2009 return ret;
2010 }
2011 }
2012
2013 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2014 return 0;
2015}
2016
2017static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2018{
2019 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2020 int ret;
2021
2022 ret = pci_enable_msi(ar_pci->pdev);
2023 if (ret < 0)
2024 return ret;
2025
2026 ret = request_irq(ar_pci->pdev->irq,
2027 ath10k_pci_interrupt_handler,
2028 IRQF_SHARED, "ath10k_pci", ar);
2029 if (ret < 0) {
2030 pci_disable_msi(ar_pci->pdev);
2031 return ret;
2032 }
2033
2034 ath10k_info("MSI interrupt handling\n");
2035 return 0;
2036}
2037
2038static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2039{
2040 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2041 int ret;
2042
2043 ret = request_irq(ar_pci->pdev->irq,
2044 ath10k_pci_interrupt_handler,
2045 IRQF_SHARED, "ath10k_pci", ar);
2046 if (ret < 0)
2047 return ret;
2048
2049 /*
2050 * Make sure to wake the Target before enabling Legacy
2051 * Interrupt.
2052 */
2053 iowrite32(PCIE_SOC_WAKE_V_MASK,
2054 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2055 PCIE_SOC_WAKE_ADDRESS);
2056
2057 ath10k_pci_wait(ar);
2058
2059 /*
2060 * A potential race occurs here: The CORE_BASE write
2061 * depends on target correctly decoding AXI address but
2062 * host won't know when target writes BAR to CORE_CTRL.
2063 * This write might get lost if target has NOT written BAR.
2064 * For now, fix the race by repeating the write in below
2065 * synchronization checking.
2066 */
2067 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2068 PCIE_INTR_CE_MASK_ALL,
2069 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2070 PCIE_INTR_ENABLE_ADDRESS));
2071 iowrite32(PCIE_SOC_WAKE_RESET,
2072 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2073 PCIE_SOC_WAKE_ADDRESS);
2074
2075 ath10k_info("legacy interrupt handling\n");
2076 return 0;
2077}
2078
2079static int ath10k_pci_start_intr(struct ath10k *ar)
2080{
2081 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2082 int num = MSI_NUM_REQUEST;
2083 int ret;
2084 int i;
2085
2086 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2087 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2088 (unsigned long) ar);
2089
2090 for (i = 0; i < CE_COUNT; i++) {
2091 ar_pci->pipe_info[i].ar_pci = ar_pci;
2092 tasklet_init(&ar_pci->pipe_info[i].intr,
2093 ath10k_pci_ce_tasklet,
2094 (unsigned long)&ar_pci->pipe_info[i]);
2095 }
2096
2097 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2098 num = 1;
2099
2100 if (num > 1) {
2101 ret = ath10k_pci_start_intr_msix(ar, num);
2102 if (ret == 0)
2103 goto exit;
2104
2105 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2106 num = 1;
2107 }
2108
2109 if (num == 1) {
2110 ret = ath10k_pci_start_intr_msi(ar);
2111 if (ret == 0)
2112 goto exit;
2113
2114 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2115 ret);
2116 num = 0;
2117 }
2118
2119 ret = ath10k_pci_start_intr_legacy(ar);
2120
2121exit:
2122 ar_pci->num_msi_intrs = num;
2123 ar_pci->ce_count = CE_COUNT;
2124 return ret;
2125}
2126
2127static void ath10k_pci_stop_intr(struct ath10k *ar)
2128{
2129 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2130 int i;
2131
2132 /* There's at least one interrupt irregardless whether its legacy INTR
2133 * or MSI or MSI-X */
2134 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2135 free_irq(ar_pci->pdev->irq + i, ar);
2136
2137 if (ar_pci->num_msi_intrs > 0)
2138 pci_disable_msi(ar_pci->pdev);
2139}
2140
2141static int ath10k_pci_reset_target(struct ath10k *ar)
2142{
2143 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2144 int wait_limit = 300; /* 3 sec */
2145
2146 /* Wait for Target to finish initialization before we proceed. */
2147 iowrite32(PCIE_SOC_WAKE_V_MASK,
2148 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2149 PCIE_SOC_WAKE_ADDRESS);
2150
2151 ath10k_pci_wait(ar);
2152
2153 while (wait_limit-- &&
2154 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2155 FW_IND_INITIALIZED)) {
2156 if (ar_pci->num_msi_intrs == 0)
2157 /* Fix potential race by repeating CORE_BASE writes */
2158 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2159 PCIE_INTR_CE_MASK_ALL,
2160 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2161 PCIE_INTR_ENABLE_ADDRESS));
2162 mdelay(10);
2163 }
2164
2165 if (wait_limit < 0) {
2166 ath10k_err("Target stalled\n");
2167 iowrite32(PCIE_SOC_WAKE_RESET,
2168 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2169 PCIE_SOC_WAKE_ADDRESS);
2170 return -EIO;
2171 }
2172
2173 iowrite32(PCIE_SOC_WAKE_RESET,
2174 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2175 PCIE_SOC_WAKE_ADDRESS);
2176
2177 return 0;
2178}
2179
Michal Kazior7a5fe3f2013-07-05 16:15:11 +03002180static void ath10k_pci_device_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002181{
Michal Kazior7a5fe3f2013-07-05 16:15:11 +03002182 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002183 void __iomem *mem = ar_pci->mem;
2184 int i;
2185 u32 val;
2186
2187 if (!SOC_GLOBAL_RESET_ADDRESS)
2188 return;
2189
2190 if (!mem)
2191 return;
2192
2193 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2194 PCIE_SOC_WAKE_V_MASK);
2195 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2196 if (ath10k_pci_target_is_awake(ar))
2197 break;
2198 msleep(1);
2199 }
2200
2201 /* Put Target, including PCIe, into RESET. */
2202 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2203 val |= 1;
2204 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2205
2206 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2207 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2208 RTC_STATE_COLD_RESET_MASK)
2209 break;
2210 msleep(1);
2211 }
2212
2213 /* Pull Target, including PCIe, out of RESET. */
2214 val &= ~1;
2215 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2216
2217 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2218 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2219 RTC_STATE_COLD_RESET_MASK))
2220 break;
2221 msleep(1);
2222 }
2223
2224 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2225}
2226
2227static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2228{
2229 int i;
2230
2231 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2232 if (!test_bit(i, ar_pci->features))
2233 continue;
2234
2235 switch (i) {
2236 case ATH10K_PCI_FEATURE_MSI_X:
2237 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2238 break;
Michal Kaziorcba4ca72013-07-05 16:15:07 +03002239 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002240 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2241 break;
2242 }
2243 }
2244}
2245
2246static int ath10k_pci_probe(struct pci_dev *pdev,
2247 const struct pci_device_id *pci_dev)
2248{
2249 void __iomem *mem;
2250 int ret = 0;
2251 struct ath10k *ar;
2252 struct ath10k_pci *ar_pci;
2253 u32 lcr_val;
2254
2255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2256
2257 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2258 if (ar_pci == NULL)
2259 return -ENOMEM;
2260
2261 ar_pci->pdev = pdev;
2262 ar_pci->dev = &pdev->dev;
2263
2264 switch (pci_dev->device) {
2265 case QCA988X_1_0_DEVICE_ID:
Michal Kaziorcba4ca72013-07-05 16:15:07 +03002266 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002267 break;
2268 case QCA988X_2_0_DEVICE_ID:
2269 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2270 break;
2271 default:
2272 ret = -ENODEV;
2273 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2274 goto err_ar_pci;
2275 }
2276
2277 ath10k_pci_dump_features(ar_pci);
2278
Michal Kazior3a0861f2013-07-05 16:15:06 +03002279 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002280 if (!ar) {
2281 ath10k_err("ath10k_core_create failed!\n");
2282 ret = -EINVAL;
2283 goto err_ar_pci;
2284 }
2285
2286 /* Enable QCA988X_1.0 HW workarounds */
Michal Kaziorcba4ca72013-07-05 16:15:07 +03002287 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
Kalle Valo5e3dd152013-06-12 20:52:10 +03002288 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2289
2290 ar_pci->ar = ar;
2291 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2292 atomic_set(&ar_pci->keep_awake_count, 0);
2293
2294 pci_set_drvdata(pdev, ar);
2295
2296 /*
2297 * Without any knowledge of the Host, the Target may have been reset or
2298 * power cycled and its Config Space may no longer reflect the PCI
2299 * address space that was assigned earlier by the PCI infrastructure.
2300 * Refresh it now.
2301 */
2302 ret = pci_assign_resource(pdev, BAR_NUM);
2303 if (ret) {
2304 ath10k_err("cannot assign PCI space: %d\n", ret);
2305 goto err_ar;
2306 }
2307
2308 ret = pci_enable_device(pdev);
2309 if (ret) {
2310 ath10k_err("cannot enable PCI device: %d\n", ret);
2311 goto err_ar;
2312 }
2313
2314 /* Request MMIO resources */
2315 ret = pci_request_region(pdev, BAR_NUM, "ath");
2316 if (ret) {
2317 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2318 goto err_device;
2319 }
2320
2321 /*
2322 * Target structures have a limit of 32 bit DMA pointers.
2323 * DMA pointers can be wider than 32 bits by default on some systems.
2324 */
2325 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2326 if (ret) {
2327 ath10k_err("32-bit DMA not available: %d\n", ret);
2328 goto err_region;
2329 }
2330
2331 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2332 if (ret) {
2333 ath10k_err("cannot enable 32-bit consistent DMA\n");
2334 goto err_region;
2335 }
2336
2337 /* Set bus master bit in PCI_COMMAND to enable DMA */
2338 pci_set_master(pdev);
2339
2340 /*
2341 * Temporary FIX: disable ASPM
2342 * Will be removed after the OTP is programmed
2343 */
2344 pci_read_config_dword(pdev, 0x80, &lcr_val);
2345 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2346
2347 /* Arrange for access to Target SoC registers. */
2348 mem = pci_iomap(pdev, BAR_NUM, 0);
2349 if (!mem) {
2350 ath10k_err("PCI iomap error\n");
2351 ret = -EIO;
2352 goto err_master;
2353 }
2354
2355 ar_pci->mem = mem;
2356
2357 spin_lock_init(&ar_pci->ce_lock);
2358
2359 ar_pci->cacheline_sz = dma_get_cache_alignment();
2360
2361 ret = ath10k_pci_start_intr(ar);
2362 if (ret) {
2363 ath10k_err("could not start interrupt handling (%d)\n", ret);
2364 goto err_iomap;
2365 }
2366
Kalle Valo5e3dd152013-06-12 20:52:10 +03002367 ret = ath10k_core_register(ar);
2368 if (ret) {
2369 ath10k_err("could not register driver core (%d)\n", ret);
Michal Kazior818bdd12013-07-16 09:38:57 +02002370 goto err_intr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002371 }
2372
2373 return 0;
2374
Kalle Valo5e3dd152013-06-12 20:52:10 +03002375err_intr:
2376 ath10k_pci_stop_intr(ar);
2377err_iomap:
2378 pci_iounmap(pdev, mem);
2379err_master:
2380 pci_clear_master(pdev);
2381err_region:
2382 pci_release_region(pdev, BAR_NUM);
2383err_device:
2384 pci_disable_device(pdev);
2385err_ar:
2386 pci_set_drvdata(pdev, NULL);
2387 ath10k_core_destroy(ar);
2388err_ar_pci:
2389 /* call HIF PCI free here */
2390 kfree(ar_pci);
2391
2392 return ret;
2393}
2394
2395static void ath10k_pci_remove(struct pci_dev *pdev)
2396{
2397 struct ath10k *ar = pci_get_drvdata(pdev);
2398 struct ath10k_pci *ar_pci;
2399
2400 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2401
2402 if (!ar)
2403 return;
2404
2405 ar_pci = ath10k_pci_priv(ar);
2406
2407 if (!ar_pci)
2408 return;
2409
2410 tasklet_kill(&ar_pci->msi_fw_err);
2411
2412 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002413 ath10k_pci_stop_intr(ar);
2414
2415 pci_set_drvdata(pdev, NULL);
2416 pci_iounmap(pdev, ar_pci->mem);
2417 pci_release_region(pdev, BAR_NUM);
2418 pci_clear_master(pdev);
2419 pci_disable_device(pdev);
2420
2421 ath10k_core_destroy(ar);
2422 kfree(ar_pci);
2423}
2424
Kalle Valo5e3dd152013-06-12 20:52:10 +03002425MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2426
2427static struct pci_driver ath10k_pci_driver = {
2428 .name = "ath10k_pci",
2429 .id_table = ath10k_pci_id_table,
2430 .probe = ath10k_pci_probe,
2431 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002432};
2433
2434static int __init ath10k_pci_init(void)
2435{
2436 int ret;
2437
2438 ret = pci_register_driver(&ath10k_pci_driver);
2439 if (ret)
2440 ath10k_err("pci_register_driver failed [%d]\n", ret);
2441
2442 return ret;
2443}
2444module_init(ath10k_pci_init);
2445
2446static void __exit ath10k_pci_exit(void)
2447{
2448 pci_unregister_driver(&ath10k_pci_driver);
2449}
2450
2451module_exit(ath10k_pci_exit);
2452
2453MODULE_AUTHOR("Qualcomm Atheros");
2454MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2455MODULE_LICENSE("Dual BSD/GPL");
2456MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2457MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2458MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2459MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2460MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2461MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);