blob: 97864d88d1812b18081b8e06c419db63482e5179 [file] [log] [blame]
Nick Kossifidisc6e387a2008-08-29 22:45:39 +03001/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* DMA and interrupt masking functions *
21\*************************************/
22
23/*
24 * dma.c - DMA and interrupt masking functions
25 *
26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
Pavel Roskin6a2a0e72011-07-09 00:17:51 -040028 * Also we setup interrupt mask register (IMR) and read the various interrupt
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030029 * status registers (ISR).
30 *
31 * TODO: Handle SISR on 5211+ and introduce a function to return the queue
32 * number that resulted the interrupt.
33 */
34
35#include "ath5k.h"
36#include "reg.h"
37#include "debug.h"
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030038
Nick Kossifidis9320b5c42010-11-23 20:36:45 +020039
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030040/*********\
41* Receive *
42\*********/
43
44/**
45 * ath5k_hw_start_rx_dma - Start DMA receive
46 *
47 * @ah: The &struct ath5k_hw
48 */
49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
50{
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030051 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
52 ath5k_hw_reg_read(ah, AR5K_CR);
53}
54
55/**
56 * ath5k_hw_stop_rx_dma - Stop DMA receive
57 *
58 * @ah: The &struct ath5k_hw
59 */
Nick Kossifidis14fae2d2010-11-23 20:55:17 +020060static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030061{
62 unsigned int i;
63
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030064 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
65
66 /*
67 * It may take some time to disable the DMA receive unit
68 */
Nick Kossifidis509a1062008-09-29 01:23:07 +030069 for (i = 1000; i > 0 &&
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030070 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
71 i--)
Nick Kossifidisb3a28e62010-11-23 20:47:31 +020072 udelay(100);
73
Nick Kossifidisf0e134a2010-12-03 06:09:38 +020074 if (!i)
Pavel Roskine0d687b2011-07-14 20:21:55 -040075 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
Nick Kossifidisb3a28e62010-11-23 20:47:31 +020076 "failed to stop RX DMA !\n");
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030077
78 return i ? 0 : -EBUSY;
79}
80
81/**
82 * ath5k_hw_get_rxdp - Get RX Descriptor's address
83 *
84 * @ah: The &struct ath5k_hw
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030085 */
86u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
87{
88 return ath5k_hw_reg_read(ah, AR5K_RXDP);
89}
90
91/**
92 * ath5k_hw_set_rxdp - Set RX Descriptor's address
93 *
94 * @ah: The &struct ath5k_hw
95 * @phys_addr: RX descriptor address
96 *
Nick Kossifidise8325ed2010-11-23 20:52:24 +020097 * Returns -EIO if rx is active
Nick Kossifidisc6e387a2008-08-29 22:45:39 +030098 */
Nick Kossifidise8325ed2010-11-23 20:52:24 +020099int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300100{
Nick Kossifidise8325ed2010-11-23 20:52:24 +0200101 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
Pavel Roskine0d687b2011-07-14 20:21:55 -0400102 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
Nick Kossifidise8325ed2010-11-23 20:52:24 +0200103 "tried to set RXDP while rx was active !\n");
104 return -EIO;
105 }
106
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300107 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
Nick Kossifidise8325ed2010-11-23 20:52:24 +0200108 return 0;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300109}
110
111
112/**********\
113* Transmit *
114\**********/
115
116/**
117 * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
118 *
119 * @ah: The &struct ath5k_hw
120 * @queue: The hw queue number
121 *
122 * Start DMA transmit for a specific queue and since 5210 doesn't have
123 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
124 * queue for normal data and one queue for beacons). For queue setup
125 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
126 * of range or if queue is already disabled.
127 *
128 * NOTE: Must be called after setting up tx control descriptor for that
129 * queue (see below).
130 */
131int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
132{
133 u32 tx_queue;
134
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300135 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
136
137 /* Return if queue is declared inactive */
138 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200139 return -EINVAL;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300140
141 if (ah->ah_version == AR5K_AR5210) {
142 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
143
144 /*
145 * Set the queue by type on 5210
146 */
147 switch (ah->ah_txq[queue].tqi_type) {
148 case AR5K_TX_QUEUE_DATA:
149 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
150 break;
151 case AR5K_TX_QUEUE_BEACON:
152 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
153 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
154 AR5K_BSR);
155 break;
156 case AR5K_TX_QUEUE_CAB:
157 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
158 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
159 AR5K_BCR_BDMAE, AR5K_BSR);
160 break;
161 default:
162 return -EINVAL;
163 }
164 /* Start queue */
165 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
166 ath5k_hw_reg_read(ah, AR5K_CR);
167 } else {
168 /* Return if queue is disabled */
169 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
170 return -EIO;
171
172 /* Start queue */
173 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
174 }
175
176 return 0;
177}
178
179/**
180 * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
181 *
182 * @ah: The &struct ath5k_hw
183 * @queue: The hw queue number
184 *
185 * Stop DMA transmit on a specific hw queue and drain queue so we don't
186 * have any pending frames. Returns -EBUSY if we still have pending frames,
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200187 * -EINVAL if queue number is out of range or inactive.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300188 *
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300189 */
Nick Kossifidis14fae2d2010-11-23 20:55:17 +0200190static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300191{
Nick Kossifidis509a1062008-09-29 01:23:07 +0300192 unsigned int i = 40;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300193 u32 tx_queue, pending;
194
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300195 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
196
197 /* Return if queue is declared inactive */
198 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200199 return -EINVAL;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300200
201 if (ah->ah_version == AR5K_AR5210) {
202 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
203
204 /*
205 * Set by queue type
206 */
207 switch (ah->ah_txq[queue].tqi_type) {
208 case AR5K_TX_QUEUE_DATA:
209 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
210 break;
211 case AR5K_TX_QUEUE_BEACON:
212 case AR5K_TX_QUEUE_CAB:
213 /* XXX Fix me... */
214 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
215 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
216 break;
217 default:
218 return -EINVAL;
219 }
220
221 /* Stop queue */
222 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
223 ath5k_hw_reg_read(ah, AR5K_CR);
224 } else {
Nick Kossifidisf7317ba2010-11-23 20:50:16 +0200225
226 /*
227 * Enable DCU early termination to quickly
228 * flush any pending frames from QCU
229 */
230 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
231 AR5K_QCU_MISC_DCU_EARLY);
232
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300233 /*
234 * Schedule TX disable and wait until queue is empty
235 */
236 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
237
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200238 /* Wait for queue to stop */
239 for (i = 1000; i > 0 &&
240 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
241 i--)
242 udelay(100);
243
244 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
Pavel Roskine0d687b2011-07-14 20:21:55 -0400245 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200246 "queue %i didn't stop !\n", queue);
247
248 /* Check for pending frames */
249 i = 1000;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300250 do {
251 pending = ath5k_hw_reg_read(ah,
252 AR5K_QUEUE_STATUS(queue)) &
253 AR5K_QCU_STS_FRMPENDCNT;
254 udelay(100);
255 } while (--i && pending);
256
Nick Kossifidis509a1062008-09-29 01:23:07 +0300257 /* For 2413+ order PCU to drop packets using
258 * QUIET mechanism */
259 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
Pavel Roskine4bbf2f2011-07-07 18:14:13 -0400260 pending) {
Nick Kossifidis509a1062008-09-29 01:23:07 +0300261 /* Set periodicity and duration */
262 ath5k_hw_reg_write(ah,
263 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
264 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
265 AR5K_QUIET_CTL2);
266
267 /* Enable quiet period for current TSF */
268 ath5k_hw_reg_write(ah,
269 AR5K_QUIET_CTL1_QT_EN |
270 AR5K_REG_SM(ath5k_hw_reg_read(ah,
271 AR5K_TSF_L32_5211) >> 10,
272 AR5K_QUIET_CTL1_NEXT_QT_TSF),
273 AR5K_QUIET_CTL1);
274
275 /* Force channel idle high */
276 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
Bruno Randolfeada7ca2010-09-27 13:02:40 +0900277 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
Nick Kossifidis509a1062008-09-29 01:23:07 +0300278
279 /* Wait a while and disable mechanism */
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200280 udelay(400);
Nick Kossifidis509a1062008-09-29 01:23:07 +0300281 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
282 AR5K_QUIET_CTL1_QT_EN);
283
284 /* Re-check for pending frames */
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200285 i = 100;
Nick Kossifidis509a1062008-09-29 01:23:07 +0300286 do {
287 pending = ath5k_hw_reg_read(ah,
288 AR5K_QUEUE_STATUS(queue)) &
289 AR5K_QCU_STS_FRMPENDCNT;
290 udelay(100);
291 } while (--i && pending);
292
293 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
Bruno Randolfeada7ca2010-09-27 13:02:40 +0900294 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200295
296 if (pending)
Pavel Roskine0d687b2011-07-14 20:21:55 -0400297 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200298 "quiet mechanism didn't work q:%i !\n",
299 queue);
Nick Kossifidis509a1062008-09-29 01:23:07 +0300300 }
301
Nick Kossifidisf7317ba2010-11-23 20:50:16 +0200302 /*
303 * Disable DCU early termination
304 */
305 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
306 AR5K_QCU_MISC_DCU_EARLY);
307
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300308 /* Clear register */
309 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200310 if (pending) {
Pavel Roskine0d687b2011-07-14 20:21:55 -0400311 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200312 "tx dma didn't stop (q:%i, frm:%i) !\n",
313 queue, pending);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300314 return -EBUSY;
Nick Kossifidisb3a28e62010-11-23 20:47:31 +0200315 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300316 }
317
Nick Kossifidis509a1062008-09-29 01:23:07 +0300318 /* TODO: Check for success on 5210 else return error */
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300319 return 0;
320}
321
322/**
Nick Kossifidis14fae2d2010-11-23 20:55:17 +0200323 * ath5k_hw_stop_beacon_queue - Stop beacon queue
324 *
325 * @ah The &struct ath5k_hw
326 * @queue The queue number
327 *
328 * Returns -EIO if queue didn't stop
329 */
330int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
331{
332 int ret;
333 ret = ath5k_hw_stop_tx_dma(ah, queue);
334 if (ret) {
Pavel Roskine0d687b2011-07-14 20:21:55 -0400335 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
Nick Kossifidis14fae2d2010-11-23 20:55:17 +0200336 "beacon queue didn't stop !\n");
337 return -EIO;
338 }
339 return 0;
340}
341
342/**
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300343 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
344 *
345 * @ah: The &struct ath5k_hw
346 * @queue: The hw queue number
347 *
348 * Get TX descriptor's address for a specific queue. For 5210 we ignore
349 * the queue number and use tx queue type since we only have 2 queues.
350 * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
351 * For newer chips with QCU/DCU we just read the corresponding TXDP register.
352 *
353 * XXX: Is TXDP read and clear ?
354 */
355u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
356{
357 u16 tx_reg;
358
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300359 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
360
361 /*
362 * Get the transmit queue descriptor pointer from the selected queue
363 */
364 /*5210 doesn't have QCU*/
365 if (ah->ah_version == AR5K_AR5210) {
366 switch (ah->ah_txq[queue].tqi_type) {
367 case AR5K_TX_QUEUE_DATA:
368 tx_reg = AR5K_NOQCU_TXDP0;
369 break;
370 case AR5K_TX_QUEUE_BEACON:
371 case AR5K_TX_QUEUE_CAB:
372 tx_reg = AR5K_NOQCU_TXDP1;
373 break;
374 default:
375 return 0xffffffff;
376 }
377 } else {
378 tx_reg = AR5K_QUEUE_TXDP(queue);
379 }
380
381 return ath5k_hw_reg_read(ah, tx_reg);
382}
383
384/**
385 * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
386 *
387 * @ah: The &struct ath5k_hw
388 * @queue: The hw queue number
389 *
390 * Set TX descriptor's address for a specific queue. For 5210 we ignore
391 * the queue number and we use tx queue type since we only have 2 queues
392 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
393 * For newer chips with QCU/DCU we just set the corresponding TXDP register.
394 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
395 * active.
396 */
397int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
398{
399 u16 tx_reg;
400
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300401 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
402
403 /*
404 * Set the transmit queue descriptor pointer register by type
405 * on 5210
406 */
407 if (ah->ah_version == AR5K_AR5210) {
408 switch (ah->ah_txq[queue].tqi_type) {
409 case AR5K_TX_QUEUE_DATA:
410 tx_reg = AR5K_NOQCU_TXDP0;
411 break;
412 case AR5K_TX_QUEUE_BEACON:
413 case AR5K_TX_QUEUE_CAB:
414 tx_reg = AR5K_NOQCU_TXDP1;
415 break;
416 default:
417 return -EINVAL;
418 }
419 } else {
420 /*
421 * Set the transmit queue descriptor pointer for
422 * the selected queue on QCU for 5211+
423 * (this won't work if the queue is still active)
424 */
425 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
426 return -EIO;
427
428 tx_reg = AR5K_QUEUE_TXDP(queue);
429 }
430
431 /* Set descriptor pointer */
432 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
433
434 return 0;
435}
436
437/**
438 * ath5k_hw_update_tx_triglevel - Update tx trigger level
439 *
440 * @ah: The &struct ath5k_hw
441 * @increase: Flag to force increase of trigger level
442 *
443 * This function increases/decreases the tx trigger level for the tx fifo
444 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
Bob Copelanda180a132010-08-15 13:03:12 -0400445 * the buffer and transmits its data. Lowering this results sending small
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300446 * frames more quickly but can lead to tx underruns, raising it a lot can
447 * result other problems (i think bmiss is related). Right now we start with
448 * the lowest possible (64Bytes) and if we get tx underrun we increase it using
Bob Copelanda180a132010-08-15 13:03:12 -0400449 * the increase flag. Returns -EIO if we have reached maximum/minimum.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300450 *
451 * XXX: Link this with tx DMA size ?
452 * XXX: Use it to save interrupts ?
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300453 */
454int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
455{
456 u32 trigger_level, imr;
457 int ret = -EIO;
458
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300459 /*
460 * Disable interrupts by setting the mask
461 */
462 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
463
464 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
465 AR5K_TXCFG_TXFULL);
466
467 if (!increase) {
468 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
469 goto done;
470 } else
471 trigger_level +=
472 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
473
474 /*
475 * Update trigger level on success
476 */
477 if (ah->ah_version == AR5K_AR5210)
478 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
479 else
480 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
481 AR5K_TXCFG_TXFULL, trigger_level);
482
483 ret = 0;
484
485done:
486 /*
487 * Restore interrupt mask
488 */
489 ath5k_hw_set_imr(ah, imr);
490
491 return ret;
492}
493
Nick Kossifidis9320b5c42010-11-23 20:36:45 +0200494
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300495/*******************\
496* Interrupt masking *
497\*******************/
498
499/**
500 * ath5k_hw_is_intr_pending - Check if we have pending interrupts
501 *
502 * @ah: The &struct ath5k_hw
503 *
504 * Check if we have pending interrupts to process. Returns 1 if we
505 * have pending interrupts and 0 if we haven't.
506 */
507bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
508{
Nick Kossifidis509a1062008-09-29 01:23:07 +0300509 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300510}
511
512/**
513 * ath5k_hw_get_isr - Get interrupt status
514 *
515 * @ah: The @struct ath5k_hw
516 * @interrupt_mask: Driver's interrupt mask used to filter out
517 * interrupts in sw.
518 *
519 * This function is used inside our interrupt handler to determine the reason
520 * for the interrupt by reading Primary Interrupt Status Register. Returns an
521 * abstract interrupt status mask which is mostly ISR with some uncommon bits
522 * being mapped on some standard non hw-specific positions
523 * (check out &ath5k_int).
524 *
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200525 * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
526 * function gets called are cleared on return.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300527 */
528int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
529{
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200530 u32 data = 0;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300531
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300532 /*
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200533 * Read interrupt status from Primary Interrupt
534 * Register.
535 *
536 * Note: PISR/SISR Not available on 5210
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300537 */
538 if (ah->ah_version == AR5K_AR5210) {
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200539 u32 isr = 0;
540 isr = ath5k_hw_reg_read(ah, AR5K_ISR);
541 if (unlikely(isr == AR5K_INT_NOCARD)) {
542 *interrupt_mask = isr;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300543 return -ENODEV;
544 }
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200545
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300546 /*
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200547 * Filter out the non-common bits from the interrupt
548 * status.
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300549 */
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200550 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
551
552 /* Hanlde INT_FATAL */
553 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
554 | AR5K_ISR_DPERR)))
555 *interrupt_mask |= AR5K_INT_FATAL;
556
557 /*
558 * XXX: BMISS interrupts may occur after association.
559 * I found this on 5210 code but it needs testing. If this is
560 * true we should disable them before assoc and re-enable them
561 * after a successful assoc + some jiffies.
562 interrupt_mask &= ~AR5K_INT_BMISS;
563 */
564
565 data = isr;
566 } else {
567 u32 pisr = 0;
568 u32 pisr_clear = 0;
569 u32 sisr0 = 0;
570 u32 sisr1 = 0;
571 u32 sisr2 = 0;
572 u32 sisr3 = 0;
573 u32 sisr4 = 0;
574
575 /* Read PISR and SISRs... */
576 pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
577 if (unlikely(pisr == AR5K_INT_NOCARD)) {
578 *interrupt_mask = pisr;
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200579 return -ENODEV;
580 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300581
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200582 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
583 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
584 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
585 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
586 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300587
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200588 /*
589 * PISR holds the logical OR of interrupt bits
590 * from SISR registers:
591 *
592 * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
593 * per-queue bits on SISR0
594 *
595 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
596 * per-queue bits on SISR1
597 *
598 * TXURN -> Logical OR of TXURN per-queue bits on SISR2
599 *
600 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
601 *
602 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
603 * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
604 * (and TSFOOR ?) bits on SISR2
605 *
606 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
607 * QCBRURN per-queue bits on SISR3
608 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
609 *
610 * If we clean these bits on PISR we 'll also clear all
611 * related bits from SISRs, e.g. if we write the TXOK bit on
612 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
613 * interrupt got fired for another queue while we were reading
614 * the interrupt registers and we write back the TXOK bit on
615 * PISR we 'll lose it. So make sure that we don't write back
616 * on PISR any bits that come from SISRs. Clearing them from
617 * SISRs will also clear PISR so no need to worry here.
618 */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200619
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200620 pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300621
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200622 /*
623 * Write to clear them...
624 * Note: This means that each bit we write back
625 * to the registers will get cleared, leaving the
626 * rest unaffected. So this won't affect new interrupts
627 * we didn't catch while reading/processing, we 'll get
628 * them next time get_isr gets called.
629 */
630 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
631 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
632 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
633 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
634 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
635 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
636 /* Flush previous write */
637 ath5k_hw_reg_read(ah, AR5K_PISR);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300638
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200639 /*
640 * Filter out the non-common bits from the interrupt
641 * status.
642 */
643 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200644
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200645
646 /* We treat TXOK,TXDESC, TXERR and TXEOL
647 * the same way (schedule the tx tasklet)
648 * so we track them all together per queue */
649 if (pisr & AR5K_ISR_TXOK)
650 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
651 AR5K_SISR0_QCU_TXOK);
652
653 if (pisr & AR5K_ISR_TXDESC)
654 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
655 AR5K_SISR0_QCU_TXDESC);
656
657 if (pisr & AR5K_ISR_TXERR)
658 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
659 AR5K_SISR1_QCU_TXERR);
660
661 if (pisr & AR5K_ISR_TXEOL)
662 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
663 AR5K_SISR1_QCU_TXEOL);
664
665 /* Currently this is not much usefull since we treat
666 * all queues the same way if we get a TXURN (update
667 * tx trigger level) but we might need it later on*/
668 if (pisr & AR5K_ISR_TXURN)
669 ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
670 AR5K_SISR2_QCU_TXURN);
671
672 /* Misc Beacon related interrupts */
673
674 /* For AR5211 */
675 if (pisr & AR5K_ISR_TIM)
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200676 *interrupt_mask |= AR5K_INT_TIM;
677
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200678 /* For AR5212+ */
679 if (pisr & AR5K_ISR_BCNMISC) {
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200680 if (sisr2 & AR5K_SISR2_TIM)
681 *interrupt_mask |= AR5K_INT_TIM;
682 if (sisr2 & AR5K_SISR2_DTIM)
683 *interrupt_mask |= AR5K_INT_DTIM;
684 if (sisr2 & AR5K_SISR2_DTIM_SYNC)
685 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
686 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
687 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
688 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
689 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
690 }
691
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200692 /* Below interrupts are unlikely to happen */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200693
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200694 /* HIU = Host Interface Unit (PCI etc)
695 * Can be one of MCABT, SSERR, DPERR from SISR2 */
696 if (unlikely(pisr & (AR5K_ISR_HIUERR)))
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200697 *interrupt_mask |= AR5K_INT_FATAL;
698
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200699
700 /*Beacon Not Ready*/
701 if (unlikely(pisr & (AR5K_ISR_BNR)))
702 *interrupt_mask |= AR5K_INT_BNR;
703
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200704 /* Doppler chirp received */
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200705 if (unlikely(pisr & (AR5K_ISR_RXDOPPLER)))
706 *interrupt_mask |= AR5K_INT_RX_DOPPLER;
707
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200708 /* A queue got CBR overrun */
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200709 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
710 *interrupt_mask |= AR5K_INT_QCBRORN;
711 ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
712 AR5K_SISR3_QCBRORN);
713 }
714
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200715 /* A queue got CBR underrun */
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200716 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
717 *interrupt_mask |= AR5K_INT_QCBRURN;
718 ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
719 AR5K_SISR3_QCBRURN);
720 }
721
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200722 /* A queue got triggered */
Nick Kossifidis7ff7c822011-11-25 20:40:20 +0200723 if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
724 *interrupt_mask |= AR5K_INT_QTRIG;
725 ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
726 AR5K_SISR4_QTRIG);
727 }
728
729 data = pisr;
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200730 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300731
732 /*
733 * In case we didn't handle anything,
734 * print the register value.
735 */
736 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200737 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300738
739 return 0;
740}
741
742/**
743 * ath5k_hw_set_imr - Set interrupt mask
744 *
745 * @ah: The &struct ath5k_hw
746 * @new_mask: The new interrupt mask to be set
747 *
748 * Set the interrupt mask in hw to save interrupts. We do that by mapping
749 * ath5k_int bits to hw-specific bits to remove abstraction and writing
750 * Interrupt Mask Register.
751 */
752enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
753{
754 enum ath5k_int old_mask, int_mask;
755
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200756 old_mask = ah->ah_imr;
757
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300758 /*
759 * Disable card interrupts to prevent any race conditions
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200760 * (they will be re-enabled afterwards if AR5K_INT GLOBAL
761 * is set again on the new mask).
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300762 */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200763 if (old_mask & AR5K_INT_GLOBAL) {
764 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
765 ath5k_hw_reg_read(ah, AR5K_IER);
766 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300767
768 /*
769 * Add additional, chipset-dependent interrupt mask flags
770 * and write them to the IMR (interrupt mask register).
771 */
772 int_mask = new_mask & AR5K_INT_COMMON;
773
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300774 if (ah->ah_version != AR5K_AR5210) {
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200775 /* Preserve per queue TXURN interrupt mask */
776 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
777 & AR5K_SIMR2_QCU_TXURN;
778
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200779 /* Fatal interrupt abstraction for 5211+ */
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300780 if (new_mask & AR5K_INT_FATAL) {
781 int_mask |= AR5K_IMR_HIUERR;
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200782 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
783 | AR5K_SIMR2_DPERR);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300784 }
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200785
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200786 /* Misc beacon related interrupts */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200787 if (new_mask & AR5K_INT_TIM)
788 int_mask |= AR5K_IMR_TIM;
789
790 if (new_mask & AR5K_INT_TIM)
791 simr2 |= AR5K_SISR2_TIM;
792 if (new_mask & AR5K_INT_DTIM)
793 simr2 |= AR5K_SISR2_DTIM;
794 if (new_mask & AR5K_INT_DTIM_SYNC)
795 simr2 |= AR5K_SISR2_DTIM_SYNC;
796 if (new_mask & AR5K_INT_BCN_TIMEOUT)
797 simr2 |= AR5K_SISR2_BCN_TIMEOUT;
798 if (new_mask & AR5K_INT_CAB_TIMEOUT)
799 simr2 |= AR5K_SISR2_CAB_TIMEOUT;
800
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200801 /*Beacon Not Ready*/
802 if (new_mask & AR5K_INT_BNR)
803 int_mask |= AR5K_INT_BNR;
804
805 /* RX doppler chirp */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200806 if (new_mask & AR5K_INT_RX_DOPPLER)
807 int_mask |= AR5K_IMR_RXDOPPLER;
808
809 /* Note: Per queue interrupt masks
Pavel Roskin6a2a0e72011-07-09 00:17:51 -0400810 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200811 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
812 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
813
814 } else {
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200815 /* Fatal interrupt abstraction for 5210 */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200816 if (new_mask & AR5K_INT_FATAL)
817 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
818 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
819
Nick Kossifidis34ce6442011-11-25 20:40:22 +0200820 /* Only common interrupts left for 5210 (no SIMRs) */
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200821 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300822 }
823
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200824 /* If RXNOFRM interrupt is masked disable it
825 * by setting AR5K_RXNOFRM to zero */
826 if (!(new_mask & AR5K_INT_RXNOFRM))
827 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300828
829 /* Store new interrupt mask */
830 ah->ah_imr = new_mask;
831
Nick Kossifidis4c674c62008-10-26 20:40:25 +0200832 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
833 if (new_mask & AR5K_INT_GLOBAL) {
834 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
835 ath5k_hw_reg_read(ah, AR5K_IER);
836 }
Nick Kossifidisc6e387a2008-08-29 22:45:39 +0300837
838 return old_mask;
839}
840
Nick Kossifidis9320b5c42010-11-23 20:36:45 +0200841
842/********************\
843 Init/Stop functions
844\********************/
845
846/**
847 * ath5k_hw_dma_init - Initialize DMA unit
848 *
849 * @ah: The &struct ath5k_hw
850 *
851 * Set DMA size and pre-enable interrupts
852 * (driver handles tx/rx buffer setup and
853 * dma start/stop)
854 *
855 * XXX: Save/restore RXDP/TXDP registers ?
856 */
857void ath5k_hw_dma_init(struct ath5k_hw *ah)
858{
859 /*
860 * Set Rx/Tx DMA Configuration
861 *
862 * Set standard DMA size (128). Note that
863 * a DMA size of 512 causes rx overruns and tx errors
864 * on pci-e cards (tested on 5424 but since rx overruns
865 * also occur on 5416/5418 with madwifi we set 128
866 * for all PCI-E cards to be safe).
867 *
868 * XXX: need to check 5210 for this
Pavel Roskin6a2a0e72011-07-09 00:17:51 -0400869 * TODO: Check out tx trigger level, it's always 64 on dumps but I
Nick Kossifidis9320b5c42010-11-23 20:36:45 +0200870 * guess we can tweak it and see how it goes ;-)
871 */
872 if (ah->ah_version != AR5K_AR5210) {
873 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
874 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
875 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
876 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
877 }
878
879 /* Pre-enable interrupts on 5211/5212*/
880 if (ah->ah_version != AR5K_AR5210)
881 ath5k_hw_set_imr(ah, ah->ah_imr);
882
883}
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200884
885/**
886 * ath5k_hw_dma_stop - stop DMA unit
887 *
888 * @ah: The &struct ath5k_hw
889 *
890 * Stop tx/rx DMA and interrupts. Returns
891 * -EBUSY if tx or rx dma failed to stop.
892 *
893 * XXX: Sometimes DMA unit hangs and we have
894 * stuck frames on tx queues, only a reset
895 * can fix that.
896 */
897int ath5k_hw_dma_stop(struct ath5k_hw *ah)
898{
899 int i, qmax, err;
900 err = 0;
901
902 /* Disable interrupts */
903 ath5k_hw_set_imr(ah, 0);
904
905 /* Stop rx dma */
906 err = ath5k_hw_stop_rx_dma(ah);
907 if (err)
908 return err;
909
910 /* Clear any pending interrupts
911 * and disable tx dma */
912 if (ah->ah_version != AR5K_AR5210) {
913 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
914 qmax = AR5K_NUM_TX_QUEUES;
915 } else {
916 /* PISR/SISR Not available on 5210 */
917 ath5k_hw_reg_read(ah, AR5K_ISR);
918 qmax = AR5K_NUM_TX_QUEUES_NOQCU;
919 }
920
921 for (i = 0; i < qmax; i++) {
922 err = ath5k_hw_stop_tx_dma(ah, i);
923 /* -EINVAL -> queue inactive */
Bob Copeland15411c22011-01-24 23:31:43 -0500924 if (err && err != -EINVAL)
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200925 return err;
926 }
927
Bob Copeland15411c22011-01-24 23:31:43 -0500928 return 0;
Nick Kossifidisd41174f2010-11-23 20:41:15 +0200929}