blob: 44bee90bc9a64d3438d1c1d233a6c01aea770be3 [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "target.h"
19#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h"
22
23#define MAILBOX_FOR_BLOCK_SIZE 1
24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26
27static void ath6kl_add_io_pkt(struct ath6kl_device *dev,
28 struct htc_packet *packet)
29{
30 spin_lock_bh(&dev->lock);
31 list_add_tail(&packet->list, &dev->reg_io);
32 spin_unlock_bh(&dev->lock);
33}
34
35static struct htc_packet *ath6kl_get_io_pkt(struct ath6kl_device *dev)
36{
37 struct htc_packet *packet = NULL;
38
39 spin_lock_bh(&dev->lock);
40 if (!list_empty(&dev->reg_io)) {
41 packet = list_first_entry(&dev->reg_io,
42 struct htc_packet, list);
43 list_del(&packet->list);
44 }
45 spin_unlock_bh(&dev->lock);
46
47 return packet;
48}
49
50static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
51{
52 u8 *buf;
53 int i;
54
55 buf = req->virt_dma_buf;
56
57 for (i = 0; i < req->scat_entries; i++) {
58
59 if (from_dma)
60 memcpy(req->scat_list[i].buf, buf,
61 req->scat_list[i].len);
62 else
63 memcpy(buf, req->scat_list[i].buf,
64 req->scat_list[i].len);
65
66 buf += req->scat_list[i].len;
67 }
68
69 return 0;
70}
71
72int ath6kldev_rw_comp_handler(void *context, int status)
73{
74 struct htc_packet *packet = context;
75
76 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
77 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
78 packet, status);
79
80 packet->status = status;
81 packet->completion(packet->context, packet);
82
83 return 0;
84}
85
86static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
87{
88 u32 dummy;
89 int status;
90
91 ath6kl_err("target debug interrupt\n");
92
93 ath6kl_target_failure(dev->ar);
94
95 /*
96 * read counter to clear the interrupt, the debug error interrupt is
97 * counter 0.
98 */
99 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
100 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
101 if (status)
102 WARN_ON(1);
103
104 return status;
105}
106
107/* mailbox recv message polling */
108int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
109 int timeout)
110{
111 struct ath6kl_irq_proc_registers *rg;
112 int status = 0, i;
113 u8 htc_mbox = 1 << HTC_MAILBOX;
114
115 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
116 /* this is the standard HIF way, load the reg table */
117 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
118 (u8 *) &dev->irq_proc_reg,
119 sizeof(dev->irq_proc_reg),
120 HIF_RD_SYNC_BYTE_INC);
121
122 if (status) {
123 ath6kl_err("failed to read reg table\n");
124 return status;
125 }
126
127 /* check for MBOX data and valid lookahead */
128 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
129 if (dev->irq_proc_reg.rx_lkahd_valid &
130 htc_mbox) {
131 /*
132 * Mailbox has a message and the look ahead
133 * is valid.
134 */
135 rg = &dev->irq_proc_reg;
136 *lk_ahd =
137 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
138 break;
139 }
140 }
141
142 /* delay a little */
143 mdelay(ATH6KL_TIME_QUANTUM);
144 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
145 }
146
147 if (i == 0) {
148 ath6kl_err("timeout waiting for recv message\n");
149 status = -ETIME;
150 /* check if the target asserted */
151 if (dev->irq_proc_reg.counter_int_status &
152 ATH6KL_TARGET_DEBUG_INTR_MASK)
153 /*
154 * Target failure handler will be called in case of
155 * an assert.
156 */
157 ath6kldev_proc_dbg_intr(dev);
158 }
159
160 return status;
161}
162
163/*
164 * Disable packet reception (used in case the host runs out of buffers)
165 * using the interrupt enable registers through the host I/F
166 */
167int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
168{
169 struct ath6kl_irq_enable_reg regs;
170 int status = 0;
171
172 /* take the lock to protect interrupt enable shadows */
173 spin_lock_bh(&dev->lock);
174
175 if (enable_rx)
176 dev->irq_en_reg.int_status_en |=
177 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
178 else
179 dev->irq_en_reg.int_status_en &=
180 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
181
182 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
183
184 spin_unlock_bh(&dev->lock);
185
186 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
187 &regs.int_status_en,
188 sizeof(struct ath6kl_irq_enable_reg),
189 HIF_WR_SYNC_BYTE_INC);
190
191 return status;
192}
193
194static void ath6kldev_rw_async_handler(struct htc_target *target,
195 struct htc_packet *packet)
196{
197 struct ath6kl_device *dev = target->dev;
198 struct hif_scatter_req *req = packet->pkt_cntxt;
199
200 req->status = packet->status;
201
202 ath6kl_add_io_pkt(dev, packet);
203
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530204 req->complete(target, req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300205}
206
207static int ath6kldev_rw_scatter(struct ath6kl *ar, struct hif_scatter_req *req)
208{
209 struct ath6kl_device *dev = ar->htc_target->dev;
210 struct htc_packet *packet = NULL;
211 int status = 0;
212 u32 request = req->req;
213 u8 *virt_dma_buf;
214
215 if (!req->len)
216 return 0;
217
218 if (request & HIF_ASYNCHRONOUS) {
219 /* use an I/O packet to carry this request */
220 packet = ath6kl_get_io_pkt(dev);
221 if (!packet) {
222 status = -ENOMEM;
223 goto out;
224 }
225
226 packet->pkt_cntxt = req;
227 packet->completion = ath6kldev_rw_async_handler;
228 packet->context = ar->htc_target;
229 }
230
231 virt_dma_buf = req->virt_dma_buf;
232
233 if (request & HIF_ASYNCHRONOUS)
234 status = hif_write_async(dev->ar, req->addr, virt_dma_buf,
235 req->len, request, packet);
236 else
237 status = hif_read_write_sync(dev->ar, req->addr, virt_dma_buf,
238 req->len, request);
239
240out:
241 if (status)
242 if (request & HIF_ASYNCHRONOUS) {
243 if (packet != NULL)
244 ath6kl_add_io_pkt(dev, packet);
245 req->status = status;
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530246 req->complete(ar->htc_target, req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300247 status = 0;
248 }
249
250 return status;
251}
252
253int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
254 struct hif_scatter_req *scat_req, bool read)
255{
256 int status = 0;
257
258 if (read) {
259 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
260 scat_req->addr = dev->ar->mbox_info.htc_addr;
261 } else {
262 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
263
264 scat_req->addr =
265 (scat_req->len > HIF_MBOX_WIDTH) ?
266 dev->ar->mbox_info.htc_ext_addr :
267 dev->ar->mbox_info.htc_addr;
268 }
269
270 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
271 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
272 scat_req->scat_entries, scat_req->len,
273 scat_req->addr, !read ? "async" : "sync",
274 (read) ? "rd" : "wr");
275
Vasanthakumar Thiagarajan4a005c32011-07-16 20:29:15 +0530276 if (!read && scat_req->virt_scat)
Kalle Valobdcd8172011-07-18 00:22:30 +0300277 status = ath6kldev_cp_scat_dma_buf(scat_req, false);
278
279 if (status) {
280 if (!read) {
281 scat_req->status = status;
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530282 scat_req->complete(dev->ar->htc_target, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300283 return 0;
284 }
285 return status;
286 }
287
Vasanthakumar Thiagarajan4a005c32011-07-16 20:29:15 +0530288 if (scat_req->virt_scat)
Vasanthakumar Thiagarajanf74a7362011-07-16 20:29:05 +0530289 status = ath6kldev_rw_scatter(dev->ar, scat_req);
290 else
291 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300292
293 if (read) {
294 /* in sync mode, we can touch the scatter request */
295 scat_req->status = status;
Vasanthakumar Thiagarajan4a005c32011-07-16 20:29:15 +0530296 if (!status && scat_req->virt_scat)
Kalle Valobdcd8172011-07-18 00:22:30 +0300297 scat_req->status =
298 ath6kldev_cp_scat_dma_buf(scat_req, true);
299 }
300
301 return status;
302}
303
Kalle Valobdcd8172011-07-18 00:22:30 +0300304int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_trans)
305{
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530306 return ath6kl_hif_enable_scatter(dev->ar, &dev->hif_scat_info);
Kalle Valobdcd8172011-07-18 00:22:30 +0300307}
308
309static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
310{
311 u8 counter_int_status;
312
313 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
314
315 counter_int_status = dev->irq_proc_reg.counter_int_status &
316 dev->irq_en_reg.cntr_int_status_en;
317
318 ath6kl_dbg(ATH6KL_DBG_IRQ,
319 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
320 counter_int_status);
321
322 /*
323 * NOTE: other modules like GMBOX may use the counter interrupt for
324 * credit flow control on other counters, we only need to check for
325 * the debug assertion counter interrupt.
326 */
327 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
328 return ath6kldev_proc_dbg_intr(dev);
329
330 return 0;
331}
332
333static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
334{
335 int status;
336 u8 error_int_status;
337 u8 reg_buf[4];
338
339 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
340
341 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
342 if (!error_int_status) {
343 WARN_ON(1);
344 return -EIO;
345 }
346
347 ath6kl_dbg(ATH6KL_DBG_IRQ,
348 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
349 error_int_status);
350
351 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
352 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
353
354 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
355 ath6kl_err("rx underflow\n");
356
357 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
358 ath6kl_err("tx overflow\n");
359
360 /* Clear the interrupt */
361 dev->irq_proc_reg.error_int_status &= ~error_int_status;
362
363 /* set W1C value to clear the interrupt, this hits the register first */
364 reg_buf[0] = error_int_status;
365 reg_buf[1] = 0;
366 reg_buf[2] = 0;
367 reg_buf[3] = 0;
368
369 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
370 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
371
372 if (status)
373 WARN_ON(1);
374
375 return status;
376}
377
378static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
379{
380 int status;
381 u8 cpu_int_status;
382 u8 reg_buf[4];
383
384 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
385
386 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
387 dev->irq_en_reg.cpu_int_status_en;
388 if (!cpu_int_status) {
389 WARN_ON(1);
390 return -EIO;
391 }
392
393 ath6kl_dbg(ATH6KL_DBG_IRQ,
394 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
395 cpu_int_status);
396
397 /* Clear the interrupt */
398 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
399
400 /*
401 * Set up the register transfer buffer to hit the register 4 times ,
402 * this is done to make the access 4-byte aligned to mitigate issues
403 * with host bus interconnects that restrict bus transfer lengths to
404 * be a multiple of 4-bytes.
405 */
406
407 /* set W1C value to clear the interrupt, this hits the register first */
408 reg_buf[0] = cpu_int_status;
409 /* the remaining are set to zero which have no-effect */
410 reg_buf[1] = 0;
411 reg_buf[2] = 0;
412 reg_buf[3] = 0;
413
414 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
415 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
416
417 if (status)
418 WARN_ON(1);
419
420 return status;
421}
422
423/* process pending interrupts synchronously */
424static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
425{
426 struct ath6kl_irq_proc_registers *rg;
427 int status = 0;
428 u8 host_int_status = 0;
429 u32 lk_ahd = 0;
430 u8 htc_mbox = 1 << HTC_MAILBOX;
431
432 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
433
434 /*
435 * NOTE: HIF implementation guarantees that the context of this
436 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
437 * sleep or call any API that can block or switch thread/task
438 * contexts. This is a fully schedulable context.
439 */
440
441 /*
442 * Process pending intr only when int_status_en is clear, it may
443 * result in unnecessary bus transaction otherwise. Target may be
444 * unresponsive at the time.
445 */
446 if (dev->irq_en_reg.int_status_en) {
447 /*
448 * Read the first 28 bytes of the HTC register table. This
449 * will yield us the value of different int status
450 * registers and the lookahead registers.
451 *
452 * length = sizeof(int_status) + sizeof(cpu_int_status)
453 * + sizeof(error_int_status) +
454 * sizeof(counter_int_status) +
455 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
456 * + sizeof(hole) + sizeof(rx_lkahd) +
457 * sizeof(int_status_en) +
458 * sizeof(cpu_int_status_en) +
459 * sizeof(err_int_status_en) +
460 * sizeof(cntr_int_status_en);
461 */
462 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
463 (u8 *) &dev->irq_proc_reg,
464 sizeof(dev->irq_proc_reg),
465 HIF_RD_SYNC_BYTE_INC);
466 if (status)
467 goto out;
468
469 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
470 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
471 &dev->irq_en_reg);
472
473 /* Update only those registers that are enabled */
474 host_int_status = dev->irq_proc_reg.host_int_status &
475 dev->irq_en_reg.int_status_en;
476
477 /* Look at mbox status */
478 if (host_int_status & htc_mbox) {
479 /*
480 * Mask out pending mbox value, we use "lookAhead as
481 * the real flag for mbox processing.
482 */
483 host_int_status &= ~htc_mbox;
484 if (dev->irq_proc_reg.rx_lkahd_valid &
485 htc_mbox) {
486 rg = &dev->irq_proc_reg;
487 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
488 if (!lk_ahd)
489 ath6kl_err("lookAhead is zero!\n");
490 }
491 }
492 }
493
494 if (!host_int_status && !lk_ahd) {
495 *done = true;
496 goto out;
497 }
498
499 if (lk_ahd) {
500 int fetched = 0;
501
502 ath6kl_dbg(ATH6KL_DBG_IRQ,
503 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
504 /*
505 * Mailbox Interrupt, the HTC layer may issue async
506 * requests to empty the mailbox. When emptying the recv
507 * mailbox we use the async handler above called from the
508 * completion routine of the callers read request. This can
509 * improve performance by reducing context switching when
510 * we rapidly pull packets.
511 */
512 status = dev->msg_pending(dev->htc_cnxt, &lk_ahd, &fetched);
513 if (status)
514 goto out;
515
516 if (!fetched)
517 /*
518 * HTC could not pull any messages out due to lack
519 * of resources.
520 */
521 dev->chk_irq_status_cnt = 0;
522 }
523
524 /* now handle the rest of them */
525 ath6kl_dbg(ATH6KL_DBG_IRQ,
526 "valid interrupt source(s) for other interrupts: 0x%x\n",
527 host_int_status);
528
529 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
530 /* CPU Interrupt */
531 status = ath6kldev_proc_cpu_intr(dev);
532 if (status)
533 goto out;
534 }
535
536 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
537 /* Error Interrupt */
538 status = ath6kldev_proc_err_intr(dev);
539 if (status)
540 goto out;
541 }
542
543 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
544 /* Counter Interrupt */
545 status = ath6kldev_proc_counter_intr(dev);
546
547out:
548 /*
549 * An optimization to bypass reading the IRQ status registers
550 * unecessarily which can re-wake the target, if upper layers
551 * determine that we are in a low-throughput mode, we can rely on
552 * taking another interrupt rather than re-checking the status
553 * registers which can re-wake the target.
554 *
555 * NOTE : for host interfaces that makes use of detecting pending
556 * mbox messages at hif can not use this optimization due to
557 * possible side effects, SPI requires the host to drain all
558 * messages from the mailbox before exiting the ISR routine.
559 */
560
561 ath6kl_dbg(ATH6KL_DBG_IRQ,
562 "bypassing irq status re-check, forcing done\n");
563
564 *done = true;
565
566 ath6kl_dbg(ATH6KL_DBG_IRQ,
567 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
568
569 return status;
570}
571
572/* interrupt handler, kicks off all interrupt processing */
573int ath6kldev_intr_bh_handler(struct ath6kl *ar)
574{
575 struct ath6kl_device *dev = ar->htc_target->dev;
576 int status = 0;
577 bool done = false;
578
579 /*
580 * Reset counter used to flag a re-scan of IRQ status registers on
581 * the target.
582 */
583 dev->chk_irq_status_cnt = 0;
584
585 /*
586 * IRQ processing is synchronous, interrupt status registers can be
587 * re-read.
588 */
589 while (!done) {
590 status = proc_pending_irqs(dev, &done);
591 if (status)
592 break;
593 }
594
595 return status;
596}
597
598static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
599{
600 struct ath6kl_irq_enable_reg regs;
601 int status;
602
603 spin_lock_bh(&dev->lock);
604
605 /* Enable all but ATH6KL CPU interrupts */
606 dev->irq_en_reg.int_status_en =
607 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
608 SM(INT_STATUS_ENABLE_CPU, 0x01) |
609 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
610
611 /*
612 * NOTE: There are some cases where HIF can do detection of
613 * pending mbox messages which is disabled now.
614 */
615 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
616
617 /* Set up the CPU Interrupt status Register */
618 dev->irq_en_reg.cpu_int_status_en = 0;
619
620 /* Set up the Error Interrupt status Register */
621 dev->irq_en_reg.err_int_status_en =
622 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
623 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
624
625 /*
626 * Enable Counter interrupt status register to get fatal errors for
627 * debugging.
628 */
629 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
630 ATH6KL_TARGET_DEBUG_INTR_MASK);
631 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
632
633 spin_unlock_bh(&dev->lock);
634
635 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
636 &regs.int_status_en, sizeof(regs),
637 HIF_WR_SYNC_BYTE_INC);
638
639 if (status)
640 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
641 status);
642
643 return status;
644}
645
646int ath6kldev_disable_intrs(struct ath6kl_device *dev)
647{
648 struct ath6kl_irq_enable_reg regs;
649
650 spin_lock_bh(&dev->lock);
651 /* Disable all interrupts */
652 dev->irq_en_reg.int_status_en = 0;
653 dev->irq_en_reg.cpu_int_status_en = 0;
654 dev->irq_en_reg.err_int_status_en = 0;
655 dev->irq_en_reg.cntr_int_status_en = 0;
656 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
657 spin_unlock_bh(&dev->lock);
658
659 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
660 &regs.int_status_en, sizeof(regs),
661 HIF_WR_SYNC_BYTE_INC);
662}
663
664/* enable device interrupts */
665int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
666{
667 int status = 0;
668
669 /*
670 * Make sure interrupt are disabled before unmasking at the HIF
671 * layer. The rationale here is that between device insertion
672 * (where we clear the interrupts the first time) and when HTC
673 * is finally ready to handle interrupts, other software can perform
674 * target "soft" resets. The ATH6KL interrupt enables reset back to an
675 * "enabled" state when this happens.
676 */
677 ath6kldev_disable_intrs(dev);
678
679 /* unmask the host controller interrupts */
680 ath6kl_hif_irq_enable(dev->ar);
681 status = ath6kldev_enable_intrs(dev);
682
683 return status;
684}
685
686/* disable all device interrupts */
687int ath6kldev_mask_intrs(struct ath6kl_device *dev)
688{
689 /*
690 * Mask the interrupt at the HIF layer to avoid any stray interrupt
691 * taken while we zero out our shadow registers in
692 * ath6kldev_disable_intrs().
693 */
694 ath6kl_hif_irq_disable(dev->ar);
695
696 return ath6kldev_disable_intrs(dev);
697}
698
699int ath6kldev_setup(struct ath6kl_device *dev)
700{
701 int status = 0;
702 int i;
703 struct htc_packet *packet;
704
705 /* initialize our free list of IO packets */
706 INIT_LIST_HEAD(&dev->reg_io);
707 spin_lock_init(&dev->lock);
708
709 /* carve up register I/O packets (these are for ASYNC register I/O ) */
710 for (i = 0; i < ATH6KL_MAX_REG_IO_BUFFERS; i++) {
711 packet = &dev->reg_io_buf[i].packet;
712 set_htc_rxpkt_info(packet, dev, dev->reg_io_buf[i].buf,
713 ATH6KL_REG_IO_BUFFER_SIZE, 0);
714 ath6kl_add_io_pkt(dev, packet);
715 }
716
717 /*
718 * NOTE: we actually get the block size of a mailbox other than 0,
719 * for SDIO the block size on mailbox 0 is artificially set to 1.
720 * So we use the block size that is set for the other 3 mailboxes.
721 */
722 dev->block_sz = dev->ar->mbox_info.block_size;
723
724 /* must be a power of 2 */
725 if ((dev->block_sz & (dev->block_sz - 1)) != 0) {
726 WARN_ON(1);
727 goto fail_setup;
728 }
729
730 /* assemble mask, used for padding to a block */
731 dev->block_mask = dev->block_sz - 1;
732
733 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
734 dev->block_sz, dev->ar->mbox_info.htc_addr);
735
736 ath6kl_dbg(ATH6KL_DBG_TRC,
737 "hif interrupt processing is sync only\n");
738
739 status = ath6kldev_disable_intrs(dev);
740
741fail_setup:
742 return status;
743
744}