blob: 043f85fc6dbbb2452bdbdd9f1f7a704b4d81735f [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "target.h"
19#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h"
22
23#define MAILBOX_FOR_BLOCK_SIZE 1
24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26
Kalle Valobdcd8172011-07-18 00:22:30 +030027static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
28{
29 u8 *buf;
30 int i;
31
32 buf = req->virt_dma_buf;
33
34 for (i = 0; i < req->scat_entries; i++) {
35
36 if (from_dma)
37 memcpy(req->scat_list[i].buf, buf,
38 req->scat_list[i].len);
39 else
40 memcpy(buf, req->scat_list[i].buf,
41 req->scat_list[i].len);
42
43 buf += req->scat_list[i].len;
44 }
45
46 return 0;
47}
48
49int ath6kldev_rw_comp_handler(void *context, int status)
50{
51 struct htc_packet *packet = context;
52
53 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
54 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
55 packet, status);
56
57 packet->status = status;
58 packet->completion(packet->context, packet);
59
60 return 0;
61}
62
63static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
64{
65 u32 dummy;
66 int status;
67
68 ath6kl_err("target debug interrupt\n");
69
70 ath6kl_target_failure(dev->ar);
71
72 /*
73 * read counter to clear the interrupt, the debug error interrupt is
74 * counter 0.
75 */
76 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
77 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
78 if (status)
79 WARN_ON(1);
80
81 return status;
82}
83
84/* mailbox recv message polling */
85int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
86 int timeout)
87{
88 struct ath6kl_irq_proc_registers *rg;
89 int status = 0, i;
90 u8 htc_mbox = 1 << HTC_MAILBOX;
91
92 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
93 /* this is the standard HIF way, load the reg table */
94 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
95 (u8 *) &dev->irq_proc_reg,
96 sizeof(dev->irq_proc_reg),
97 HIF_RD_SYNC_BYTE_INC);
98
99 if (status) {
100 ath6kl_err("failed to read reg table\n");
101 return status;
102 }
103
104 /* check for MBOX data and valid lookahead */
105 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
106 if (dev->irq_proc_reg.rx_lkahd_valid &
107 htc_mbox) {
108 /*
109 * Mailbox has a message and the look ahead
110 * is valid.
111 */
112 rg = &dev->irq_proc_reg;
113 *lk_ahd =
114 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
115 break;
116 }
117 }
118
119 /* delay a little */
120 mdelay(ATH6KL_TIME_QUANTUM);
121 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
122 }
123
124 if (i == 0) {
125 ath6kl_err("timeout waiting for recv message\n");
126 status = -ETIME;
127 /* check if the target asserted */
128 if (dev->irq_proc_reg.counter_int_status &
129 ATH6KL_TARGET_DEBUG_INTR_MASK)
130 /*
131 * Target failure handler will be called in case of
132 * an assert.
133 */
134 ath6kldev_proc_dbg_intr(dev);
135 }
136
137 return status;
138}
139
140/*
141 * Disable packet reception (used in case the host runs out of buffers)
142 * using the interrupt enable registers through the host I/F
143 */
144int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
145{
146 struct ath6kl_irq_enable_reg regs;
147 int status = 0;
148
149 /* take the lock to protect interrupt enable shadows */
150 spin_lock_bh(&dev->lock);
151
152 if (enable_rx)
153 dev->irq_en_reg.int_status_en |=
154 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
155 else
156 dev->irq_en_reg.int_status_en &=
157 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
158
159 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
160
161 spin_unlock_bh(&dev->lock);
162
163 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
164 &regs.int_status_en,
165 sizeof(struct ath6kl_irq_enable_reg),
166 HIF_WR_SYNC_BYTE_INC);
167
168 return status;
169}
170
Kalle Valobdcd8172011-07-18 00:22:30 +0300171int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
172 struct hif_scatter_req *scat_req, bool read)
173{
174 int status = 0;
175
176 if (read) {
177 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
178 scat_req->addr = dev->ar->mbox_info.htc_addr;
179 } else {
180 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
181
182 scat_req->addr =
183 (scat_req->len > HIF_MBOX_WIDTH) ?
184 dev->ar->mbox_info.htc_ext_addr :
185 dev->ar->mbox_info.htc_addr;
186 }
187
188 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
189 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
190 scat_req->scat_entries, scat_req->len,
191 scat_req->addr, !read ? "async" : "sync",
192 (read) ? "rd" : "wr");
193
Vasanthakumar Thiagarajan23b78402011-07-18 14:23:25 +0530194 if (!read && scat_req->virt_scat) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300195 status = ath6kldev_cp_scat_dma_buf(scat_req, false);
Vasanthakumar Thiagarajan23b78402011-07-18 14:23:25 +0530196 if (status) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300197 scat_req->status = status;
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530198 scat_req->complete(dev->ar->htc_target, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300199 return 0;
200 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300201 }
202
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530203 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300204
205 if (read) {
206 /* in sync mode, we can touch the scatter request */
207 scat_req->status = status;
Vasanthakumar Thiagarajan4a005c32011-07-16 20:29:15 +0530208 if (!status && scat_req->virt_scat)
Kalle Valobdcd8172011-07-18 00:22:30 +0300209 scat_req->status =
210 ath6kldev_cp_scat_dma_buf(scat_req, true);
211 }
212
213 return status;
214}
215
Kalle Valobdcd8172011-07-18 00:22:30 +0300216static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
217{
218 u8 counter_int_status;
219
220 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
221
222 counter_int_status = dev->irq_proc_reg.counter_int_status &
223 dev->irq_en_reg.cntr_int_status_en;
224
225 ath6kl_dbg(ATH6KL_DBG_IRQ,
226 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
227 counter_int_status);
228
229 /*
230 * NOTE: other modules like GMBOX may use the counter interrupt for
231 * credit flow control on other counters, we only need to check for
232 * the debug assertion counter interrupt.
233 */
234 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
235 return ath6kldev_proc_dbg_intr(dev);
236
237 return 0;
238}
239
240static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
241{
242 int status;
243 u8 error_int_status;
244 u8 reg_buf[4];
245
246 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
247
248 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
249 if (!error_int_status) {
250 WARN_ON(1);
251 return -EIO;
252 }
253
254 ath6kl_dbg(ATH6KL_DBG_IRQ,
255 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
256 error_int_status);
257
258 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
259 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
260
261 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
262 ath6kl_err("rx underflow\n");
263
264 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
265 ath6kl_err("tx overflow\n");
266
267 /* Clear the interrupt */
268 dev->irq_proc_reg.error_int_status &= ~error_int_status;
269
270 /* set W1C value to clear the interrupt, this hits the register first */
271 reg_buf[0] = error_int_status;
272 reg_buf[1] = 0;
273 reg_buf[2] = 0;
274 reg_buf[3] = 0;
275
276 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
277 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
278
279 if (status)
280 WARN_ON(1);
281
282 return status;
283}
284
285static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
286{
287 int status;
288 u8 cpu_int_status;
289 u8 reg_buf[4];
290
291 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
292
293 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
294 dev->irq_en_reg.cpu_int_status_en;
295 if (!cpu_int_status) {
296 WARN_ON(1);
297 return -EIO;
298 }
299
300 ath6kl_dbg(ATH6KL_DBG_IRQ,
301 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
302 cpu_int_status);
303
304 /* Clear the interrupt */
305 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
306
307 /*
308 * Set up the register transfer buffer to hit the register 4 times ,
309 * this is done to make the access 4-byte aligned to mitigate issues
310 * with host bus interconnects that restrict bus transfer lengths to
311 * be a multiple of 4-bytes.
312 */
313
314 /* set W1C value to clear the interrupt, this hits the register first */
315 reg_buf[0] = cpu_int_status;
316 /* the remaining are set to zero which have no-effect */
317 reg_buf[1] = 0;
318 reg_buf[2] = 0;
319 reg_buf[3] = 0;
320
321 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
322 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
323
324 if (status)
325 WARN_ON(1);
326
327 return status;
328}
329
330/* process pending interrupts synchronously */
331static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
332{
333 struct ath6kl_irq_proc_registers *rg;
334 int status = 0;
335 u8 host_int_status = 0;
336 u32 lk_ahd = 0;
337 u8 htc_mbox = 1 << HTC_MAILBOX;
338
339 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
340
341 /*
342 * NOTE: HIF implementation guarantees that the context of this
343 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
344 * sleep or call any API that can block or switch thread/task
345 * contexts. This is a fully schedulable context.
346 */
347
348 /*
349 * Process pending intr only when int_status_en is clear, it may
350 * result in unnecessary bus transaction otherwise. Target may be
351 * unresponsive at the time.
352 */
353 if (dev->irq_en_reg.int_status_en) {
354 /*
355 * Read the first 28 bytes of the HTC register table. This
356 * will yield us the value of different int status
357 * registers and the lookahead registers.
358 *
359 * length = sizeof(int_status) + sizeof(cpu_int_status)
360 * + sizeof(error_int_status) +
361 * sizeof(counter_int_status) +
362 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
363 * + sizeof(hole) + sizeof(rx_lkahd) +
364 * sizeof(int_status_en) +
365 * sizeof(cpu_int_status_en) +
366 * sizeof(err_int_status_en) +
367 * sizeof(cntr_int_status_en);
368 */
369 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
370 (u8 *) &dev->irq_proc_reg,
371 sizeof(dev->irq_proc_reg),
372 HIF_RD_SYNC_BYTE_INC);
373 if (status)
374 goto out;
375
376 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
377 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
378 &dev->irq_en_reg);
379
380 /* Update only those registers that are enabled */
381 host_int_status = dev->irq_proc_reg.host_int_status &
382 dev->irq_en_reg.int_status_en;
383
384 /* Look at mbox status */
385 if (host_int_status & htc_mbox) {
386 /*
387 * Mask out pending mbox value, we use "lookAhead as
388 * the real flag for mbox processing.
389 */
390 host_int_status &= ~htc_mbox;
391 if (dev->irq_proc_reg.rx_lkahd_valid &
392 htc_mbox) {
393 rg = &dev->irq_proc_reg;
394 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
395 if (!lk_ahd)
396 ath6kl_err("lookAhead is zero!\n");
397 }
398 }
399 }
400
401 if (!host_int_status && !lk_ahd) {
402 *done = true;
403 goto out;
404 }
405
406 if (lk_ahd) {
407 int fetched = 0;
408
409 ath6kl_dbg(ATH6KL_DBG_IRQ,
410 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
411 /*
412 * Mailbox Interrupt, the HTC layer may issue async
413 * requests to empty the mailbox. When emptying the recv
414 * mailbox we use the async handler above called from the
415 * completion routine of the callers read request. This can
416 * improve performance by reducing context switching when
417 * we rapidly pull packets.
418 */
419 status = dev->msg_pending(dev->htc_cnxt, &lk_ahd, &fetched);
420 if (status)
421 goto out;
422
423 if (!fetched)
424 /*
425 * HTC could not pull any messages out due to lack
426 * of resources.
427 */
428 dev->chk_irq_status_cnt = 0;
429 }
430
431 /* now handle the rest of them */
432 ath6kl_dbg(ATH6KL_DBG_IRQ,
433 "valid interrupt source(s) for other interrupts: 0x%x\n",
434 host_int_status);
435
436 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
437 /* CPU Interrupt */
438 status = ath6kldev_proc_cpu_intr(dev);
439 if (status)
440 goto out;
441 }
442
443 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
444 /* Error Interrupt */
445 status = ath6kldev_proc_err_intr(dev);
446 if (status)
447 goto out;
448 }
449
450 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
451 /* Counter Interrupt */
452 status = ath6kldev_proc_counter_intr(dev);
453
454out:
455 /*
456 * An optimization to bypass reading the IRQ status registers
457 * unecessarily which can re-wake the target, if upper layers
458 * determine that we are in a low-throughput mode, we can rely on
459 * taking another interrupt rather than re-checking the status
460 * registers which can re-wake the target.
461 *
462 * NOTE : for host interfaces that makes use of detecting pending
463 * mbox messages at hif can not use this optimization due to
464 * possible side effects, SPI requires the host to drain all
465 * messages from the mailbox before exiting the ISR routine.
466 */
467
468 ath6kl_dbg(ATH6KL_DBG_IRQ,
469 "bypassing irq status re-check, forcing done\n");
470
471 *done = true;
472
473 ath6kl_dbg(ATH6KL_DBG_IRQ,
474 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
475
476 return status;
477}
478
479/* interrupt handler, kicks off all interrupt processing */
480int ath6kldev_intr_bh_handler(struct ath6kl *ar)
481{
482 struct ath6kl_device *dev = ar->htc_target->dev;
483 int status = 0;
484 bool done = false;
485
486 /*
487 * Reset counter used to flag a re-scan of IRQ status registers on
488 * the target.
489 */
490 dev->chk_irq_status_cnt = 0;
491
492 /*
493 * IRQ processing is synchronous, interrupt status registers can be
494 * re-read.
495 */
496 while (!done) {
497 status = proc_pending_irqs(dev, &done);
498 if (status)
499 break;
500 }
501
502 return status;
503}
504
505static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
506{
507 struct ath6kl_irq_enable_reg regs;
508 int status;
509
510 spin_lock_bh(&dev->lock);
511
512 /* Enable all but ATH6KL CPU interrupts */
513 dev->irq_en_reg.int_status_en =
514 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
515 SM(INT_STATUS_ENABLE_CPU, 0x01) |
516 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
517
518 /*
519 * NOTE: There are some cases where HIF can do detection of
520 * pending mbox messages which is disabled now.
521 */
522 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
523
524 /* Set up the CPU Interrupt status Register */
525 dev->irq_en_reg.cpu_int_status_en = 0;
526
527 /* Set up the Error Interrupt status Register */
528 dev->irq_en_reg.err_int_status_en =
529 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
530 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
531
532 /*
533 * Enable Counter interrupt status register to get fatal errors for
534 * debugging.
535 */
536 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
537 ATH6KL_TARGET_DEBUG_INTR_MASK);
538 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
539
540 spin_unlock_bh(&dev->lock);
541
542 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
543 &regs.int_status_en, sizeof(regs),
544 HIF_WR_SYNC_BYTE_INC);
545
546 if (status)
547 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
548 status);
549
550 return status;
551}
552
553int ath6kldev_disable_intrs(struct ath6kl_device *dev)
554{
555 struct ath6kl_irq_enable_reg regs;
556
557 spin_lock_bh(&dev->lock);
558 /* Disable all interrupts */
559 dev->irq_en_reg.int_status_en = 0;
560 dev->irq_en_reg.cpu_int_status_en = 0;
561 dev->irq_en_reg.err_int_status_en = 0;
562 dev->irq_en_reg.cntr_int_status_en = 0;
563 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
564 spin_unlock_bh(&dev->lock);
565
566 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
567 &regs.int_status_en, sizeof(regs),
568 HIF_WR_SYNC_BYTE_INC);
569}
570
571/* enable device interrupts */
572int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
573{
574 int status = 0;
575
576 /*
577 * Make sure interrupt are disabled before unmasking at the HIF
578 * layer. The rationale here is that between device insertion
579 * (where we clear the interrupts the first time) and when HTC
580 * is finally ready to handle interrupts, other software can perform
581 * target "soft" resets. The ATH6KL interrupt enables reset back to an
582 * "enabled" state when this happens.
583 */
584 ath6kldev_disable_intrs(dev);
585
586 /* unmask the host controller interrupts */
587 ath6kl_hif_irq_enable(dev->ar);
588 status = ath6kldev_enable_intrs(dev);
589
590 return status;
591}
592
593/* disable all device interrupts */
594int ath6kldev_mask_intrs(struct ath6kl_device *dev)
595{
596 /*
597 * Mask the interrupt at the HIF layer to avoid any stray interrupt
598 * taken while we zero out our shadow registers in
599 * ath6kldev_disable_intrs().
600 */
601 ath6kl_hif_irq_disable(dev->ar);
602
603 return ath6kldev_disable_intrs(dev);
604}
605
606int ath6kldev_setup(struct ath6kl_device *dev)
607{
608 int status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300609
Kalle Valobdcd8172011-07-18 00:22:30 +0300610 spin_lock_init(&dev->lock);
611
Kalle Valobdcd8172011-07-18 00:22:30 +0300612 /*
613 * NOTE: we actually get the block size of a mailbox other than 0,
614 * for SDIO the block size on mailbox 0 is artificially set to 1.
615 * So we use the block size that is set for the other 3 mailboxes.
616 */
617 dev->block_sz = dev->ar->mbox_info.block_size;
618
619 /* must be a power of 2 */
620 if ((dev->block_sz & (dev->block_sz - 1)) != 0) {
621 WARN_ON(1);
622 goto fail_setup;
623 }
624
625 /* assemble mask, used for padding to a block */
626 dev->block_mask = dev->block_sz - 1;
627
628 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
629 dev->block_sz, dev->ar->mbox_info.htc_addr);
630
631 ath6kl_dbg(ATH6KL_DBG_TRC,
632 "hif interrupt processing is sync only\n");
633
634 status = ath6kldev_disable_intrs(dev);
635
636fail_setup:
637 return status;
638
639}