blob: df904d8c48c08ba80852fa031fd7e6245696c123 [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "target.h"
19#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h"
22
23#define MAILBOX_FOR_BLOCK_SIZE 1
24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26
27static void ath6kl_add_io_pkt(struct ath6kl_device *dev,
28 struct htc_packet *packet)
29{
30 spin_lock_bh(&dev->lock);
31 list_add_tail(&packet->list, &dev->reg_io);
32 spin_unlock_bh(&dev->lock);
33}
34
35static struct htc_packet *ath6kl_get_io_pkt(struct ath6kl_device *dev)
36{
37 struct htc_packet *packet = NULL;
38
39 spin_lock_bh(&dev->lock);
40 if (!list_empty(&dev->reg_io)) {
41 packet = list_first_entry(&dev->reg_io,
42 struct htc_packet, list);
43 list_del(&packet->list);
44 }
45 spin_unlock_bh(&dev->lock);
46
47 return packet;
48}
49
50static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
51{
52 u8 *buf;
53 int i;
54
55 buf = req->virt_dma_buf;
56
57 for (i = 0; i < req->scat_entries; i++) {
58
59 if (from_dma)
60 memcpy(req->scat_list[i].buf, buf,
61 req->scat_list[i].len);
62 else
63 memcpy(buf, req->scat_list[i].buf,
64 req->scat_list[i].len);
65
66 buf += req->scat_list[i].len;
67 }
68
69 return 0;
70}
71
72int ath6kldev_rw_comp_handler(void *context, int status)
73{
74 struct htc_packet *packet = context;
75
76 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
77 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
78 packet, status);
79
80 packet->status = status;
81 packet->completion(packet->context, packet);
82
83 return 0;
84}
85
86static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
87{
88 u32 dummy;
89 int status;
90
91 ath6kl_err("target debug interrupt\n");
92
93 ath6kl_target_failure(dev->ar);
94
95 /*
96 * read counter to clear the interrupt, the debug error interrupt is
97 * counter 0.
98 */
99 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
100 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
101 if (status)
102 WARN_ON(1);
103
104 return status;
105}
106
107/* mailbox recv message polling */
108int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
109 int timeout)
110{
111 struct ath6kl_irq_proc_registers *rg;
112 int status = 0, i;
113 u8 htc_mbox = 1 << HTC_MAILBOX;
114
115 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
116 /* this is the standard HIF way, load the reg table */
117 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
118 (u8 *) &dev->irq_proc_reg,
119 sizeof(dev->irq_proc_reg),
120 HIF_RD_SYNC_BYTE_INC);
121
122 if (status) {
123 ath6kl_err("failed to read reg table\n");
124 return status;
125 }
126
127 /* check for MBOX data and valid lookahead */
128 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
129 if (dev->irq_proc_reg.rx_lkahd_valid &
130 htc_mbox) {
131 /*
132 * Mailbox has a message and the look ahead
133 * is valid.
134 */
135 rg = &dev->irq_proc_reg;
136 *lk_ahd =
137 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
138 break;
139 }
140 }
141
142 /* delay a little */
143 mdelay(ATH6KL_TIME_QUANTUM);
144 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
145 }
146
147 if (i == 0) {
148 ath6kl_err("timeout waiting for recv message\n");
149 status = -ETIME;
150 /* check if the target asserted */
151 if (dev->irq_proc_reg.counter_int_status &
152 ATH6KL_TARGET_DEBUG_INTR_MASK)
153 /*
154 * Target failure handler will be called in case of
155 * an assert.
156 */
157 ath6kldev_proc_dbg_intr(dev);
158 }
159
160 return status;
161}
162
163/*
164 * Disable packet reception (used in case the host runs out of buffers)
165 * using the interrupt enable registers through the host I/F
166 */
167int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
168{
169 struct ath6kl_irq_enable_reg regs;
170 int status = 0;
171
172 /* take the lock to protect interrupt enable shadows */
173 spin_lock_bh(&dev->lock);
174
175 if (enable_rx)
176 dev->irq_en_reg.int_status_en |=
177 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
178 else
179 dev->irq_en_reg.int_status_en &=
180 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
181
182 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
183
184 spin_unlock_bh(&dev->lock);
185
186 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
187 &regs.int_status_en,
188 sizeof(struct ath6kl_irq_enable_reg),
189 HIF_WR_SYNC_BYTE_INC);
190
191 return status;
192}
193
194static void ath6kldev_rw_async_handler(struct htc_target *target,
195 struct htc_packet *packet)
196{
197 struct ath6kl_device *dev = target->dev;
198 struct hif_scatter_req *req = packet->pkt_cntxt;
199
200 req->status = packet->status;
201
202 ath6kl_add_io_pkt(dev, packet);
203
204 req->complete(req);
205}
206
207static int ath6kldev_rw_scatter(struct ath6kl *ar, struct hif_scatter_req *req)
208{
209 struct ath6kl_device *dev = ar->htc_target->dev;
210 struct htc_packet *packet = NULL;
211 int status = 0;
212 u32 request = req->req;
213 u8 *virt_dma_buf;
214
215 if (!req->len)
216 return 0;
217
218 if (request & HIF_ASYNCHRONOUS) {
219 /* use an I/O packet to carry this request */
220 packet = ath6kl_get_io_pkt(dev);
221 if (!packet) {
222 status = -ENOMEM;
223 goto out;
224 }
225
226 packet->pkt_cntxt = req;
227 packet->completion = ath6kldev_rw_async_handler;
228 packet->context = ar->htc_target;
229 }
230
231 virt_dma_buf = req->virt_dma_buf;
232
233 if (request & HIF_ASYNCHRONOUS)
234 status = hif_write_async(dev->ar, req->addr, virt_dma_buf,
235 req->len, request, packet);
236 else
237 status = hif_read_write_sync(dev->ar, req->addr, virt_dma_buf,
238 req->len, request);
239
240out:
241 if (status)
242 if (request & HIF_ASYNCHRONOUS) {
243 if (packet != NULL)
244 ath6kl_add_io_pkt(dev, packet);
245 req->status = status;
246 req->complete(req);
247 status = 0;
248 }
249
250 return status;
251}
252
253int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
254 struct hif_scatter_req *scat_req, bool read)
255{
256 int status = 0;
257
258 if (read) {
259 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
260 scat_req->addr = dev->ar->mbox_info.htc_addr;
261 } else {
262 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
263
264 scat_req->addr =
265 (scat_req->len > HIF_MBOX_WIDTH) ?
266 dev->ar->mbox_info.htc_ext_addr :
267 dev->ar->mbox_info.htc_addr;
268 }
269
270 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
271 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
272 scat_req->scat_entries, scat_req->len,
273 scat_req->addr, !read ? "async" : "sync",
274 (read) ? "rd" : "wr");
275
276 if (!read && dev->virt_scat)
277 status = ath6kldev_cp_scat_dma_buf(scat_req, false);
278
279 if (status) {
280 if (!read) {
281 scat_req->status = status;
282 scat_req->complete(scat_req);
283 return 0;
284 }
285 return status;
286 }
287
Vasanthakumar Thiagarajanf74a7362011-07-16 20:29:05 +0530288 if (dev->virt_scat)
289 status = ath6kldev_rw_scatter(dev->ar, scat_req);
290 else
291 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300292
293 if (read) {
294 /* in sync mode, we can touch the scatter request */
295 scat_req->status = status;
296 if (!status && dev->virt_scat)
297 scat_req->status =
298 ath6kldev_cp_scat_dma_buf(scat_req, true);
299 }
300
301 return status;
302}
303
304/*
305 * function to set up virtual scatter support if HIF
306 * layer has not implemented the interface.
307 */
308static int ath6kldev_setup_virt_scat_sup(struct ath6kl_device *dev)
309{
310 struct hif_scatter_req *scat_req;
311 int buf_sz, scat_req_sz, scat_list_sz;
312 int i, status = 0;
313 u8 *virt_dma_buf;
314
315 buf_sz = 2 * L1_CACHE_BYTES + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
316
317 scat_list_sz = (ATH6KL_SCATTER_ENTRIES_PER_REQ - 1) *
318 sizeof(struct hif_scatter_item);
319 scat_req_sz = sizeof(*scat_req) + scat_list_sz;
320
321 for (i = 0; i < ATH6KL_SCATTER_REQS; i++) {
322 scat_req = kzalloc(scat_req_sz, GFP_KERNEL);
323
324 if (!scat_req) {
325 status = -ENOMEM;
326 break;
327 }
328
329 virt_dma_buf = kzalloc(buf_sz, GFP_KERNEL);
330 if (!virt_dma_buf) {
331 kfree(scat_req);
332 status = -ENOMEM;
333 break;
334 }
335
336 scat_req->virt_dma_buf =
337 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_dma_buf);
338
339 /* we emulate a DMA bounce interface */
340 hif_scatter_req_add(dev->ar, scat_req);
341 }
342
343 if (status)
344 ath6kl_hif_cleanup_scatter(dev->ar);
345 else {
Kalle Valobdcd8172011-07-18 00:22:30 +0300346 dev->hif_scat_info.max_scat_entries =
347 ATH6KL_SCATTER_ENTRIES_PER_REQ;
348 dev->hif_scat_info.max_xfer_szper_scatreq =
349 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
350 dev->virt_scat = true;
351 }
352
353 return status;
354}
355
356int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_trans)
357{
358 int status;
359
360 status = ath6kl_hif_enable_scatter(dev->ar, &dev->hif_scat_info);
361
362 if (status) {
363 ath6kl_warn("hif does not support scatter requests (%d)\n",
364 status);
365
366 /* we can try to use a virtual DMA scatter mechanism */
367 status = ath6kldev_setup_virt_scat_sup(dev);
368 }
369
370 if (!status)
371 ath6kl_dbg(ATH6KL_DBG_ANY, "max scatter items:%d: maxlen:%d\n",
372 dev->hif_scat_info.max_scat_entries,
373 dev->hif_scat_info.max_xfer_szper_scatreq);
374
375 return status;
376}
377
378static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
379{
380 u8 counter_int_status;
381
382 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
383
384 counter_int_status = dev->irq_proc_reg.counter_int_status &
385 dev->irq_en_reg.cntr_int_status_en;
386
387 ath6kl_dbg(ATH6KL_DBG_IRQ,
388 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
389 counter_int_status);
390
391 /*
392 * NOTE: other modules like GMBOX may use the counter interrupt for
393 * credit flow control on other counters, we only need to check for
394 * the debug assertion counter interrupt.
395 */
396 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
397 return ath6kldev_proc_dbg_intr(dev);
398
399 return 0;
400}
401
402static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
403{
404 int status;
405 u8 error_int_status;
406 u8 reg_buf[4];
407
408 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
409
410 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
411 if (!error_int_status) {
412 WARN_ON(1);
413 return -EIO;
414 }
415
416 ath6kl_dbg(ATH6KL_DBG_IRQ,
417 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
418 error_int_status);
419
420 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
421 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
422
423 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
424 ath6kl_err("rx underflow\n");
425
426 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
427 ath6kl_err("tx overflow\n");
428
429 /* Clear the interrupt */
430 dev->irq_proc_reg.error_int_status &= ~error_int_status;
431
432 /* set W1C value to clear the interrupt, this hits the register first */
433 reg_buf[0] = error_int_status;
434 reg_buf[1] = 0;
435 reg_buf[2] = 0;
436 reg_buf[3] = 0;
437
438 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
439 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
440
441 if (status)
442 WARN_ON(1);
443
444 return status;
445}
446
447static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
448{
449 int status;
450 u8 cpu_int_status;
451 u8 reg_buf[4];
452
453 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
454
455 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
456 dev->irq_en_reg.cpu_int_status_en;
457 if (!cpu_int_status) {
458 WARN_ON(1);
459 return -EIO;
460 }
461
462 ath6kl_dbg(ATH6KL_DBG_IRQ,
463 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
464 cpu_int_status);
465
466 /* Clear the interrupt */
467 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
468
469 /*
470 * Set up the register transfer buffer to hit the register 4 times ,
471 * this is done to make the access 4-byte aligned to mitigate issues
472 * with host bus interconnects that restrict bus transfer lengths to
473 * be a multiple of 4-bytes.
474 */
475
476 /* set W1C value to clear the interrupt, this hits the register first */
477 reg_buf[0] = cpu_int_status;
478 /* the remaining are set to zero which have no-effect */
479 reg_buf[1] = 0;
480 reg_buf[2] = 0;
481 reg_buf[3] = 0;
482
483 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
484 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
485
486 if (status)
487 WARN_ON(1);
488
489 return status;
490}
491
492/* process pending interrupts synchronously */
493static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
494{
495 struct ath6kl_irq_proc_registers *rg;
496 int status = 0;
497 u8 host_int_status = 0;
498 u32 lk_ahd = 0;
499 u8 htc_mbox = 1 << HTC_MAILBOX;
500
501 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
502
503 /*
504 * NOTE: HIF implementation guarantees that the context of this
505 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
506 * sleep or call any API that can block or switch thread/task
507 * contexts. This is a fully schedulable context.
508 */
509
510 /*
511 * Process pending intr only when int_status_en is clear, it may
512 * result in unnecessary bus transaction otherwise. Target may be
513 * unresponsive at the time.
514 */
515 if (dev->irq_en_reg.int_status_en) {
516 /*
517 * Read the first 28 bytes of the HTC register table. This
518 * will yield us the value of different int status
519 * registers and the lookahead registers.
520 *
521 * length = sizeof(int_status) + sizeof(cpu_int_status)
522 * + sizeof(error_int_status) +
523 * sizeof(counter_int_status) +
524 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
525 * + sizeof(hole) + sizeof(rx_lkahd) +
526 * sizeof(int_status_en) +
527 * sizeof(cpu_int_status_en) +
528 * sizeof(err_int_status_en) +
529 * sizeof(cntr_int_status_en);
530 */
531 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
532 (u8 *) &dev->irq_proc_reg,
533 sizeof(dev->irq_proc_reg),
534 HIF_RD_SYNC_BYTE_INC);
535 if (status)
536 goto out;
537
538 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
539 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
540 &dev->irq_en_reg);
541
542 /* Update only those registers that are enabled */
543 host_int_status = dev->irq_proc_reg.host_int_status &
544 dev->irq_en_reg.int_status_en;
545
546 /* Look at mbox status */
547 if (host_int_status & htc_mbox) {
548 /*
549 * Mask out pending mbox value, we use "lookAhead as
550 * the real flag for mbox processing.
551 */
552 host_int_status &= ~htc_mbox;
553 if (dev->irq_proc_reg.rx_lkahd_valid &
554 htc_mbox) {
555 rg = &dev->irq_proc_reg;
556 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
557 if (!lk_ahd)
558 ath6kl_err("lookAhead is zero!\n");
559 }
560 }
561 }
562
563 if (!host_int_status && !lk_ahd) {
564 *done = true;
565 goto out;
566 }
567
568 if (lk_ahd) {
569 int fetched = 0;
570
571 ath6kl_dbg(ATH6KL_DBG_IRQ,
572 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
573 /*
574 * Mailbox Interrupt, the HTC layer may issue async
575 * requests to empty the mailbox. When emptying the recv
576 * mailbox we use the async handler above called from the
577 * completion routine of the callers read request. This can
578 * improve performance by reducing context switching when
579 * we rapidly pull packets.
580 */
581 status = dev->msg_pending(dev->htc_cnxt, &lk_ahd, &fetched);
582 if (status)
583 goto out;
584
585 if (!fetched)
586 /*
587 * HTC could not pull any messages out due to lack
588 * of resources.
589 */
590 dev->chk_irq_status_cnt = 0;
591 }
592
593 /* now handle the rest of them */
594 ath6kl_dbg(ATH6KL_DBG_IRQ,
595 "valid interrupt source(s) for other interrupts: 0x%x\n",
596 host_int_status);
597
598 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
599 /* CPU Interrupt */
600 status = ath6kldev_proc_cpu_intr(dev);
601 if (status)
602 goto out;
603 }
604
605 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
606 /* Error Interrupt */
607 status = ath6kldev_proc_err_intr(dev);
608 if (status)
609 goto out;
610 }
611
612 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
613 /* Counter Interrupt */
614 status = ath6kldev_proc_counter_intr(dev);
615
616out:
617 /*
618 * An optimization to bypass reading the IRQ status registers
619 * unecessarily which can re-wake the target, if upper layers
620 * determine that we are in a low-throughput mode, we can rely on
621 * taking another interrupt rather than re-checking the status
622 * registers which can re-wake the target.
623 *
624 * NOTE : for host interfaces that makes use of detecting pending
625 * mbox messages at hif can not use this optimization due to
626 * possible side effects, SPI requires the host to drain all
627 * messages from the mailbox before exiting the ISR routine.
628 */
629
630 ath6kl_dbg(ATH6KL_DBG_IRQ,
631 "bypassing irq status re-check, forcing done\n");
632
633 *done = true;
634
635 ath6kl_dbg(ATH6KL_DBG_IRQ,
636 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
637
638 return status;
639}
640
641/* interrupt handler, kicks off all interrupt processing */
642int ath6kldev_intr_bh_handler(struct ath6kl *ar)
643{
644 struct ath6kl_device *dev = ar->htc_target->dev;
645 int status = 0;
646 bool done = false;
647
648 /*
649 * Reset counter used to flag a re-scan of IRQ status registers on
650 * the target.
651 */
652 dev->chk_irq_status_cnt = 0;
653
654 /*
655 * IRQ processing is synchronous, interrupt status registers can be
656 * re-read.
657 */
658 while (!done) {
659 status = proc_pending_irqs(dev, &done);
660 if (status)
661 break;
662 }
663
664 return status;
665}
666
667static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
668{
669 struct ath6kl_irq_enable_reg regs;
670 int status;
671
672 spin_lock_bh(&dev->lock);
673
674 /* Enable all but ATH6KL CPU interrupts */
675 dev->irq_en_reg.int_status_en =
676 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
677 SM(INT_STATUS_ENABLE_CPU, 0x01) |
678 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
679
680 /*
681 * NOTE: There are some cases where HIF can do detection of
682 * pending mbox messages which is disabled now.
683 */
684 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
685
686 /* Set up the CPU Interrupt status Register */
687 dev->irq_en_reg.cpu_int_status_en = 0;
688
689 /* Set up the Error Interrupt status Register */
690 dev->irq_en_reg.err_int_status_en =
691 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
692 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
693
694 /*
695 * Enable Counter interrupt status register to get fatal errors for
696 * debugging.
697 */
698 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
699 ATH6KL_TARGET_DEBUG_INTR_MASK);
700 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
701
702 spin_unlock_bh(&dev->lock);
703
704 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
705 &regs.int_status_en, sizeof(regs),
706 HIF_WR_SYNC_BYTE_INC);
707
708 if (status)
709 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
710 status);
711
712 return status;
713}
714
715int ath6kldev_disable_intrs(struct ath6kl_device *dev)
716{
717 struct ath6kl_irq_enable_reg regs;
718
719 spin_lock_bh(&dev->lock);
720 /* Disable all interrupts */
721 dev->irq_en_reg.int_status_en = 0;
722 dev->irq_en_reg.cpu_int_status_en = 0;
723 dev->irq_en_reg.err_int_status_en = 0;
724 dev->irq_en_reg.cntr_int_status_en = 0;
725 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
726 spin_unlock_bh(&dev->lock);
727
728 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
729 &regs.int_status_en, sizeof(regs),
730 HIF_WR_SYNC_BYTE_INC);
731}
732
733/* enable device interrupts */
734int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
735{
736 int status = 0;
737
738 /*
739 * Make sure interrupt are disabled before unmasking at the HIF
740 * layer. The rationale here is that between device insertion
741 * (where we clear the interrupts the first time) and when HTC
742 * is finally ready to handle interrupts, other software can perform
743 * target "soft" resets. The ATH6KL interrupt enables reset back to an
744 * "enabled" state when this happens.
745 */
746 ath6kldev_disable_intrs(dev);
747
748 /* unmask the host controller interrupts */
749 ath6kl_hif_irq_enable(dev->ar);
750 status = ath6kldev_enable_intrs(dev);
751
752 return status;
753}
754
755/* disable all device interrupts */
756int ath6kldev_mask_intrs(struct ath6kl_device *dev)
757{
758 /*
759 * Mask the interrupt at the HIF layer to avoid any stray interrupt
760 * taken while we zero out our shadow registers in
761 * ath6kldev_disable_intrs().
762 */
763 ath6kl_hif_irq_disable(dev->ar);
764
765 return ath6kldev_disable_intrs(dev);
766}
767
768int ath6kldev_setup(struct ath6kl_device *dev)
769{
770 int status = 0;
771 int i;
772 struct htc_packet *packet;
773
774 /* initialize our free list of IO packets */
775 INIT_LIST_HEAD(&dev->reg_io);
776 spin_lock_init(&dev->lock);
777
778 /* carve up register I/O packets (these are for ASYNC register I/O ) */
779 for (i = 0; i < ATH6KL_MAX_REG_IO_BUFFERS; i++) {
780 packet = &dev->reg_io_buf[i].packet;
781 set_htc_rxpkt_info(packet, dev, dev->reg_io_buf[i].buf,
782 ATH6KL_REG_IO_BUFFER_SIZE, 0);
783 ath6kl_add_io_pkt(dev, packet);
784 }
785
786 /*
787 * NOTE: we actually get the block size of a mailbox other than 0,
788 * for SDIO the block size on mailbox 0 is artificially set to 1.
789 * So we use the block size that is set for the other 3 mailboxes.
790 */
791 dev->block_sz = dev->ar->mbox_info.block_size;
792
793 /* must be a power of 2 */
794 if ((dev->block_sz & (dev->block_sz - 1)) != 0) {
795 WARN_ON(1);
796 goto fail_setup;
797 }
798
799 /* assemble mask, used for padding to a block */
800 dev->block_mask = dev->block_sz - 1;
801
802 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
803 dev->block_sz, dev->ar->mbox_info.htc_addr);
804
805 ath6kl_dbg(ATH6KL_DBG_TRC,
806 "hif interrupt processing is sync only\n");
807
808 status = ath6kldev_disable_intrs(dev);
809
810fail_setup:
811 return status;
812
813}