blob: 3fe634f588c2dd7cbe723a1f1d1b775d2e143d41 [file] [log] [blame]
Nick Kossifidisc6e387a2008-08-29 22:45:39 +03001/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* DMA and interrupt masking functions *
21\*************************************/
22
23/*
24 * dma.c - DMA and interrupt masking functions
25 *
26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
28 * Also we setup interrupt mask register (IMR) and read the various iterrupt
29 * status registers (ISR).
30 *
31 * TODO: Handle SISR on 5211+ and introduce a function to return the queue
32 * number that resulted the interrupt.
33 */
34
35#include "ath5k.h"
36#include "reg.h"
37#include "debug.h"
38#include "base.h"
39
Nick Kossifidis9320b5c2010-11-23 20:36:45 +020040
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030041/*********\
42* Receive *
43\*********/
44
45/**
46 * ath5k_hw_start_rx_dma - Start DMA receive
47 *
48 * @ah: The &struct ath5k_hw
49 */
50void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
51{
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030052 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
53 ath5k_hw_reg_read(ah, AR5K_CR);
54}
55
56/**
57 * ath5k_hw_stop_rx_dma - Stop DMA receive
58 *
59 * @ah: The &struct ath5k_hw
60 */
61int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
62{
63 unsigned int i;
64
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030065 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
66
67 /*
68 * It may take some time to disable the DMA receive unit
69 */
Nick Kossifidis509a1062008-09-29 01:23:07 +030070 for (i = 1000; i > 0 &&
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030071 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
72 i--)
Nick Kossifidisb3a28e62010-11-23 20:47:31 +020073 udelay(100);
74
75 if (i)
76 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
77 "failed to stop RX DMA !\n");
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030078
79 return i ? 0 : -EBUSY;
80}
81
82/**
83 * ath5k_hw_get_rxdp - Get RX Descriptor's address
84 *
85 * @ah: The &struct ath5k_hw
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030086 */
87u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
88{
89 return ath5k_hw_reg_read(ah, AR5K_RXDP);
90}
91
92/**
93 * ath5k_hw_set_rxdp - Set RX Descriptor's address
94 *
95 * @ah: The &struct ath5k_hw
96 * @phys_addr: RX descriptor address
97 *
Nick Kossifidise8325ed2010-11-23 20:52:24 +020098 * Returns -EIO if rx is active
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030099 */
Nick Kossifidise8325ed2010-11-23 20:52:24 +0200100int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300101{
Nick Kossifidise8325ed2010-11-23 20:52:24 +0200102 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
103 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
104 "tried to set RXDP while rx was active !\n");
105 return -EIO;
106 }
107
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300108 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
Nick Kossifidise8325ed2010-11-23 20:52:24 +0200109 return 0;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300110}
111
112
113/**********\
114* Transmit *
115\**********/
116
117/**
118 * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
119 *
120 * @ah: The &struct ath5k_hw
121 * @queue: The hw queue number
122 *
123 * Start DMA transmit for a specific queue and since 5210 doesn't have
124 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
125 * queue for normal data and one queue for beacons). For queue setup
126 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
127 * of range or if queue is already disabled.
128 *
129 * NOTE: Must be called after setting up tx control descriptor for that
130 * queue (see below).
131 */
132int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
133{
134 u32 tx_queue;
135
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300136 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
137
138 /* Return if queue is declared inactive */
139 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200140 return -EINVAL;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300141
142 if (ah->ah_version == AR5K_AR5210) {
143 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
144
145 /*
146 * Set the queue by type on 5210
147 */
148 switch (ah->ah_txq[queue].tqi_type) {
149 case AR5K_TX_QUEUE_DATA:
150 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
151 break;
152 case AR5K_TX_QUEUE_BEACON:
153 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
154 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
155 AR5K_BSR);
156 break;
157 case AR5K_TX_QUEUE_CAB:
158 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
159 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
160 AR5K_BCR_BDMAE, AR5K_BSR);
161 break;
162 default:
163 return -EINVAL;
164 }
165 /* Start queue */
166 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
167 ath5k_hw_reg_read(ah, AR5K_CR);
168 } else {
169 /* Return if queue is disabled */
170 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
171 return -EIO;
172
173 /* Start queue */
174 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
175 }
176
177 return 0;
178}
179
180/**
181 * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
182 *
183 * @ah: The &struct ath5k_hw
184 * @queue: The hw queue number
185 *
186 * Stop DMA transmit on a specific hw queue and drain queue so we don't
187 * have any pending frames. Returns -EBUSY if we still have pending frames,
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200188 * -EINVAL if queue number is out of range or inactive.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300189 *
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300190 */
191int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
192{
Nick Kossifidis509a1062008-09-29 01:23:07 +0300193 unsigned int i = 40;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300194 u32 tx_queue, pending;
195
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300196 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
197
198 /* Return if queue is declared inactive */
199 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200200 return -EINVAL;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300201
202 if (ah->ah_version == AR5K_AR5210) {
203 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
204
205 /*
206 * Set by queue type
207 */
208 switch (ah->ah_txq[queue].tqi_type) {
209 case AR5K_TX_QUEUE_DATA:
210 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
211 break;
212 case AR5K_TX_QUEUE_BEACON:
213 case AR5K_TX_QUEUE_CAB:
214 /* XXX Fix me... */
215 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
216 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
217 break;
218 default:
219 return -EINVAL;
220 }
221
222 /* Stop queue */
223 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
224 ath5k_hw_reg_read(ah, AR5K_CR);
225 } else {
Nick Kossifidisf7317ba2010-11-23 20:50:16 +0200226
227 /*
228 * Enable DCU early termination to quickly
229 * flush any pending frames from QCU
230 */
231 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
232 AR5K_QCU_MISC_DCU_EARLY);
233
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300234 /*
235 * Schedule TX disable and wait until queue is empty
236 */
237 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
238
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200239 /* Wait for queue to stop */
240 for (i = 1000; i > 0 &&
241 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
242 i--)
243 udelay(100);
244
245 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
246 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
247 "queue %i didn't stop !\n", queue);
248
249 /* Check for pending frames */
250 i = 1000;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300251 do {
252 pending = ath5k_hw_reg_read(ah,
253 AR5K_QUEUE_STATUS(queue)) &
254 AR5K_QCU_STS_FRMPENDCNT;
255 udelay(100);
256 } while (--i && pending);
257
Nick Kossifidis509a1062008-09-29 01:23:07 +0300258 /* For 2413+ order PCU to drop packets using
259 * QUIET mechanism */
260 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
261 pending){
262 /* Set periodicity and duration */
263 ath5k_hw_reg_write(ah,
264 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
265 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
266 AR5K_QUIET_CTL2);
267
268 /* Enable quiet period for current TSF */
269 ath5k_hw_reg_write(ah,
270 AR5K_QUIET_CTL1_QT_EN |
271 AR5K_REG_SM(ath5k_hw_reg_read(ah,
272 AR5K_TSF_L32_5211) >> 10,
273 AR5K_QUIET_CTL1_NEXT_QT_TSF),
274 AR5K_QUIET_CTL1);
275
276 /* Force channel idle high */
277 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
Bruno Randolfeada7ca2010-09-27 13:02:40 +0900278 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
Nick Kossifidis509a1062008-09-29 01:23:07 +0300279
280 /* Wait a while and disable mechanism */
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200281 udelay(400);
Nick Kossifidis509a1062008-09-29 01:23:07 +0300282 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
283 AR5K_QUIET_CTL1_QT_EN);
284
285 /* Re-check for pending frames */
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200286 i = 100;
Nick Kossifidis509a1062008-09-29 01:23:07 +0300287 do {
288 pending = ath5k_hw_reg_read(ah,
289 AR5K_QUEUE_STATUS(queue)) &
290 AR5K_QCU_STS_FRMPENDCNT;
291 udelay(100);
292 } while (--i && pending);
293
294 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
Bruno Randolfeada7ca2010-09-27 13:02:40 +0900295 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200296
297 if (pending)
298 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
299 "quiet mechanism didn't work q:%i !\n",
300 queue);
Nick Kossifidis509a1062008-09-29 01:23:07 +0300301 }
302
Nick Kossifidisf7317ba2010-11-23 20:50:16 +0200303 /*
304 * Disable DCU early termination
305 */
306 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
307 AR5K_QCU_MISC_DCU_EARLY);
308
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300309 /* Clear register */
310 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200311 if (pending) {
312 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
313 "tx dma didn't stop (q:%i, frm:%i) !\n",
314 queue, pending);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300315 return -EBUSY;
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200316 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300317 }
318
Nick Kossifidis509a1062008-09-29 01:23:07 +0300319 /* TODO: Check for success on 5210 else return error */
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300320 return 0;
321}
322
323/**
324 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
325 *
326 * @ah: The &struct ath5k_hw
327 * @queue: The hw queue number
328 *
329 * Get TX descriptor's address for a specific queue. For 5210 we ignore
330 * the queue number and use tx queue type since we only have 2 queues.
331 * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
332 * For newer chips with QCU/DCU we just read the corresponding TXDP register.
333 *
334 * XXX: Is TXDP read and clear ?
335 */
336u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
337{
338 u16 tx_reg;
339
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300340 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
341
342 /*
343 * Get the transmit queue descriptor pointer from the selected queue
344 */
345 /*5210 doesn't have QCU*/
346 if (ah->ah_version == AR5K_AR5210) {
347 switch (ah->ah_txq[queue].tqi_type) {
348 case AR5K_TX_QUEUE_DATA:
349 tx_reg = AR5K_NOQCU_TXDP0;
350 break;
351 case AR5K_TX_QUEUE_BEACON:
352 case AR5K_TX_QUEUE_CAB:
353 tx_reg = AR5K_NOQCU_TXDP1;
354 break;
355 default:
356 return 0xffffffff;
357 }
358 } else {
359 tx_reg = AR5K_QUEUE_TXDP(queue);
360 }
361
362 return ath5k_hw_reg_read(ah, tx_reg);
363}
364
365/**
366 * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
367 *
368 * @ah: The &struct ath5k_hw
369 * @queue: The hw queue number
370 *
371 * Set TX descriptor's address for a specific queue. For 5210 we ignore
372 * the queue number and we use tx queue type since we only have 2 queues
373 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
374 * For newer chips with QCU/DCU we just set the corresponding TXDP register.
375 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
376 * active.
377 */
378int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
379{
380 u16 tx_reg;
381
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300382 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
383
384 /*
385 * Set the transmit queue descriptor pointer register by type
386 * on 5210
387 */
388 if (ah->ah_version == AR5K_AR5210) {
389 switch (ah->ah_txq[queue].tqi_type) {
390 case AR5K_TX_QUEUE_DATA:
391 tx_reg = AR5K_NOQCU_TXDP0;
392 break;
393 case AR5K_TX_QUEUE_BEACON:
394 case AR5K_TX_QUEUE_CAB:
395 tx_reg = AR5K_NOQCU_TXDP1;
396 break;
397 default:
398 return -EINVAL;
399 }
400 } else {
401 /*
402 * Set the transmit queue descriptor pointer for
403 * the selected queue on QCU for 5211+
404 * (this won't work if the queue is still active)
405 */
406 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
407 return -EIO;
408
409 tx_reg = AR5K_QUEUE_TXDP(queue);
410 }
411
412 /* Set descriptor pointer */
413 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
414
415 return 0;
416}
417
418/**
419 * ath5k_hw_update_tx_triglevel - Update tx trigger level
420 *
421 * @ah: The &struct ath5k_hw
422 * @increase: Flag to force increase of trigger level
423 *
424 * This function increases/decreases the tx trigger level for the tx fifo
425 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
Bob Copelanda180a132010-08-15 13:03:12 -0400426 * the buffer and transmits its data. Lowering this results sending small
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300427 * frames more quickly but can lead to tx underruns, raising it a lot can
428 * result other problems (i think bmiss is related). Right now we start with
429 * the lowest possible (64Bytes) and if we get tx underrun we increase it using
Bob Copelanda180a132010-08-15 13:03:12 -0400430 * the increase flag. Returns -EIO if we have reached maximum/minimum.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300431 *
432 * XXX: Link this with tx DMA size ?
433 * XXX: Use it to save interrupts ?
434 * TODO: Needs testing, i think it's related to bmiss...
435 */
436int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
437{
438 u32 trigger_level, imr;
439 int ret = -EIO;
440
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300441 /*
442 * Disable interrupts by setting the mask
443 */
444 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
445
446 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
447 AR5K_TXCFG_TXFULL);
448
449 if (!increase) {
450 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
451 goto done;
452 } else
453 trigger_level +=
454 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
455
456 /*
457 * Update trigger level on success
458 */
459 if (ah->ah_version == AR5K_AR5210)
460 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
461 else
462 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
463 AR5K_TXCFG_TXFULL, trigger_level);
464
465 ret = 0;
466
467done:
468 /*
469 * Restore interrupt mask
470 */
471 ath5k_hw_set_imr(ah, imr);
472
473 return ret;
474}
475
Nick Kossifidis9320b5c2010-11-23 20:36:45 +0200476
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300477/*******************\
478* Interrupt masking *
479\*******************/
480
481/**
482 * ath5k_hw_is_intr_pending - Check if we have pending interrupts
483 *
484 * @ah: The &struct ath5k_hw
485 *
486 * Check if we have pending interrupts to process. Returns 1 if we
487 * have pending interrupts and 0 if we haven't.
488 */
489bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
490{
Nick Kossifidis509a1062008-09-29 01:23:07 +0300491 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300492}
493
494/**
495 * ath5k_hw_get_isr - Get interrupt status
496 *
497 * @ah: The @struct ath5k_hw
498 * @interrupt_mask: Driver's interrupt mask used to filter out
499 * interrupts in sw.
500 *
501 * This function is used inside our interrupt handler to determine the reason
502 * for the interrupt by reading Primary Interrupt Status Register. Returns an
503 * abstract interrupt status mask which is mostly ISR with some uncommon bits
504 * being mapped on some standard non hw-specific positions
505 * (check out &ath5k_int).
506 *
507 * NOTE: We use read-and-clear register, so after this function is called ISR
508 * is zeroed.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300509 */
510int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
511{
512 u32 data;
513
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300514 /*
515 * Read interrupt status from the Interrupt Status register
516 * on 5210
517 */
518 if (ah->ah_version == AR5K_AR5210) {
519 data = ath5k_hw_reg_read(ah, AR5K_ISR);
520 if (unlikely(data == AR5K_INT_NOCARD)) {
521 *interrupt_mask = data;
522 return -ENODEV;
523 }
524 } else {
525 /*
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200526 * Read interrupt status from Interrupt
527 * Status Register shadow copy (Read And Clear)
528 *
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300529 * Note: PISR/SISR Not available on 5210
530 */
531 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200532 if (unlikely(data == AR5K_INT_NOCARD)) {
533 *interrupt_mask = data;
534 return -ENODEV;
535 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300536 }
537
538 /*
539 * Get abstract interrupt mask (driver-compatible)
540 */
541 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
542
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300543 if (ah->ah_version != AR5K_AR5210) {
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200544 u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
545
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300546 /*HIU = Host Interface Unit (PCI etc)*/
547 if (unlikely(data & (AR5K_ISR_HIUERR)))
548 *interrupt_mask |= AR5K_INT_FATAL;
549
550 /*Beacon Not Ready*/
551 if (unlikely(data & (AR5K_ISR_BNR)))
552 *interrupt_mask |= AR5K_INT_BNR;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300553
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200554 if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
555 AR5K_SISR2_DPERR |
556 AR5K_SISR2_MCABT)))
557 *interrupt_mask |= AR5K_INT_FATAL;
558
559 if (data & AR5K_ISR_TIM)
560 *interrupt_mask |= AR5K_INT_TIM;
561
562 if (data & AR5K_ISR_BCNMISC) {
563 if (sisr2 & AR5K_SISR2_TIM)
564 *interrupt_mask |= AR5K_INT_TIM;
565 if (sisr2 & AR5K_SISR2_DTIM)
566 *interrupt_mask |= AR5K_INT_DTIM;
567 if (sisr2 & AR5K_SISR2_DTIM_SYNC)
568 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
569 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
570 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
571 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
572 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
573 }
574
575 if (data & AR5K_ISR_RXDOPPLER)
576 *interrupt_mask |= AR5K_INT_RX_DOPPLER;
577 if (data & AR5K_ISR_QCBRORN) {
578 *interrupt_mask |= AR5K_INT_QCBRORN;
579 ah->ah_txq_isr |= AR5K_REG_MS(
580 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
581 AR5K_SISR3_QCBRORN);
582 }
583 if (data & AR5K_ISR_QCBRURN) {
584 *interrupt_mask |= AR5K_INT_QCBRURN;
585 ah->ah_txq_isr |= AR5K_REG_MS(
586 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
587 AR5K_SISR3_QCBRURN);
588 }
589 if (data & AR5K_ISR_QTRIG) {
590 *interrupt_mask |= AR5K_INT_QTRIG;
591 ah->ah_txq_isr |= AR5K_REG_MS(
592 ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
593 AR5K_SISR4_QTRIG);
594 }
595
596 if (data & AR5K_ISR_TXOK)
597 ah->ah_txq_isr |= AR5K_REG_MS(
598 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
599 AR5K_SISR0_QCU_TXOK);
600
601 if (data & AR5K_ISR_TXDESC)
602 ah->ah_txq_isr |= AR5K_REG_MS(
603 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
604 AR5K_SISR0_QCU_TXDESC);
605
606 if (data & AR5K_ISR_TXERR)
607 ah->ah_txq_isr |= AR5K_REG_MS(
608 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
609 AR5K_SISR1_QCU_TXERR);
610
611 if (data & AR5K_ISR_TXEOL)
612 ah->ah_txq_isr |= AR5K_REG_MS(
613 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
614 AR5K_SISR1_QCU_TXEOL);
615
616 if (data & AR5K_ISR_TXURN)
617 ah->ah_txq_isr |= AR5K_REG_MS(
618 ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
619 AR5K_SISR2_QCU_TXURN);
620 } else {
621 if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
622 | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
623 *interrupt_mask |= AR5K_INT_FATAL;
624
625 /*
626 * XXX: BMISS interrupts may occur after association.
627 * I found this on 5210 code but it needs testing. If this is
628 * true we should disable them before assoc and re-enable them
Coly Li73ac36e2009-01-07 18:09:16 -0800629 * after a successful assoc + some jiffies.
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200630 interrupt_mask &= ~AR5K_INT_BMISS;
631 */
632 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300633
634 /*
635 * In case we didn't handle anything,
636 * print the register value.
637 */
638 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200639 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300640
641 return 0;
642}
643
644/**
645 * ath5k_hw_set_imr - Set interrupt mask
646 *
647 * @ah: The &struct ath5k_hw
648 * @new_mask: The new interrupt mask to be set
649 *
650 * Set the interrupt mask in hw to save interrupts. We do that by mapping
651 * ath5k_int bits to hw-specific bits to remove abstraction and writing
652 * Interrupt Mask Register.
653 */
654enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
655{
656 enum ath5k_int old_mask, int_mask;
657
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200658 old_mask = ah->ah_imr;
659
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300660 /*
661 * Disable card interrupts to prevent any race conditions
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200662 * (they will be re-enabled afterwards if AR5K_INT GLOBAL
663 * is set again on the new mask).
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300664 */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200665 if (old_mask & AR5K_INT_GLOBAL) {
666 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
667 ath5k_hw_reg_read(ah, AR5K_IER);
668 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300669
670 /*
671 * Add additional, chipset-dependent interrupt mask flags
672 * and write them to the IMR (interrupt mask register).
673 */
674 int_mask = new_mask & AR5K_INT_COMMON;
675
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300676 if (ah->ah_version != AR5K_AR5210) {
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200677 /* Preserve per queue TXURN interrupt mask */
678 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
679 & AR5K_SIMR2_QCU_TXURN;
680
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300681 if (new_mask & AR5K_INT_FATAL) {
682 int_mask |= AR5K_IMR_HIUERR;
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200683 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
684 | AR5K_SIMR2_DPERR);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300685 }
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200686
687 /*Beacon Not Ready*/
688 if (new_mask & AR5K_INT_BNR)
689 int_mask |= AR5K_INT_BNR;
690
691 if (new_mask & AR5K_INT_TIM)
692 int_mask |= AR5K_IMR_TIM;
693
694 if (new_mask & AR5K_INT_TIM)
695 simr2 |= AR5K_SISR2_TIM;
696 if (new_mask & AR5K_INT_DTIM)
697 simr2 |= AR5K_SISR2_DTIM;
698 if (new_mask & AR5K_INT_DTIM_SYNC)
699 simr2 |= AR5K_SISR2_DTIM_SYNC;
700 if (new_mask & AR5K_INT_BCN_TIMEOUT)
701 simr2 |= AR5K_SISR2_BCN_TIMEOUT;
702 if (new_mask & AR5K_INT_CAB_TIMEOUT)
703 simr2 |= AR5K_SISR2_CAB_TIMEOUT;
704
705 if (new_mask & AR5K_INT_RX_DOPPLER)
706 int_mask |= AR5K_IMR_RXDOPPLER;
707
708 /* Note: Per queue interrupt masks
709 * are set via reset_tx_queue (qcu.c) */
710 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
711 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
712
713 } else {
714 if (new_mask & AR5K_INT_FATAL)
715 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
716 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
717
718 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300719 }
720
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200721 /* If RXNOFRM interrupt is masked disable it
722 * by setting AR5K_RXNOFRM to zero */
723 if (!(new_mask & AR5K_INT_RXNOFRM))
724 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300725
726 /* Store new interrupt mask */
727 ah->ah_imr = new_mask;
728
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200729 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
730 if (new_mask & AR5K_INT_GLOBAL) {
731 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
732 ath5k_hw_reg_read(ah, AR5K_IER);
733 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300734
735 return old_mask;
736}
737
Nick Kossifidis9320b5c2010-11-23 20:36:45 +0200738
739/********************\
740 Init/Stop functions
741\********************/
742
743/**
744 * ath5k_hw_dma_init - Initialize DMA unit
745 *
746 * @ah: The &struct ath5k_hw
747 *
748 * Set DMA size and pre-enable interrupts
749 * (driver handles tx/rx buffer setup and
750 * dma start/stop)
751 *
752 * XXX: Save/restore RXDP/TXDP registers ?
753 */
754void ath5k_hw_dma_init(struct ath5k_hw *ah)
755{
756 /*
757 * Set Rx/Tx DMA Configuration
758 *
759 * Set standard DMA size (128). Note that
760 * a DMA size of 512 causes rx overruns and tx errors
761 * on pci-e cards (tested on 5424 but since rx overruns
762 * also occur on 5416/5418 with madwifi we set 128
763 * for all PCI-E cards to be safe).
764 *
765 * XXX: need to check 5210 for this
766 * TODO: Check out tx triger level, it's always 64 on dumps but I
767 * guess we can tweak it and see how it goes ;-)
768 */
769 if (ah->ah_version != AR5K_AR5210) {
770 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
771 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
772 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
773 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
774 }
775
776 /* Pre-enable interrupts on 5211/5212*/
777 if (ah->ah_version != AR5K_AR5210)
778 ath5k_hw_set_imr(ah, ah->ah_imr);
779
780}
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200781
782/**
783 * ath5k_hw_dma_stop - stop DMA unit
784 *
785 * @ah: The &struct ath5k_hw
786 *
787 * Stop tx/rx DMA and interrupts. Returns
788 * -EBUSY if tx or rx dma failed to stop.
789 *
790 * XXX: Sometimes DMA unit hangs and we have
791 * stuck frames on tx queues, only a reset
792 * can fix that.
793 */
794int ath5k_hw_dma_stop(struct ath5k_hw *ah)
795{
796 int i, qmax, err;
797 err = 0;
798
799 /* Disable interrupts */
800 ath5k_hw_set_imr(ah, 0);
801
802 /* Stop rx dma */
803 err = ath5k_hw_stop_rx_dma(ah);
804 if (err)
805 return err;
806
807 /* Clear any pending interrupts
808 * and disable tx dma */
809 if (ah->ah_version != AR5K_AR5210) {
810 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
811 qmax = AR5K_NUM_TX_QUEUES;
812 } else {
813 /* PISR/SISR Not available on 5210 */
814 ath5k_hw_reg_read(ah, AR5K_ISR);
815 qmax = AR5K_NUM_TX_QUEUES_NOQCU;
816 }
817
818 for (i = 0; i < qmax; i++) {
819 err = ath5k_hw_stop_tx_dma(ah, i);
820 /* -EINVAL -> queue inactive */
821 if (err != -EINVAL)
822 return err;
823 }
824
825 return err;
826}