blob: 50c9147278b31c82a34328eb4b6fef8c79258d92 [file] [log] [blame]
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001/******************************************************************************
2 *
Wey-Yi Guyfb4961d2012-01-06 13:16:33 -08003 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/sched.h>
30#include <linux/wait.h>
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -070031#include <linux/gfp.h>
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070032
Johannes Berg1b29dc92012-03-06 13:30:50 -080033#include "iwl-prph.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070034#include "iwl-io.h"
Johannes Berg6468a012012-05-16 19:13:54 +020035#include "internal.h"
Emmanuel Grumbachdb70f292012-02-09 16:08:15 +020036#include "iwl-op-mode.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070037
38/******************************************************************************
39 *
40 * RX path functions
41 *
42 ******************************************************************************/
43
44/*
45 * Rx theory of operation
46 *
47 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
48 * each of which point to Receive Buffers to be filled by the NIC. These get
49 * used not only for Rx frames, but for any command response or notification
50 * from the NIC. The driver and NIC manage the Rx buffers by means
51 * of indexes into the circular buffer.
52 *
53 * Rx Queue Indexes
54 * The host/firmware share two index registers for managing the Rx buffers.
55 *
56 * The READ index maps to the first position that the firmware may be writing
57 * to -- the driver can read up to (but not including) this position and get
58 * good data.
59 * The READ index is managed by the firmware once the card is enabled.
60 *
61 * The WRITE index maps to the last position the driver has read from -- the
62 * position preceding WRITE is the last slot the firmware can place a packet.
63 *
64 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
65 * WRITE = READ.
66 *
67 * During initialization, the host sets up the READ queue position to the first
68 * INDEX position, and WRITE to the last (READ - 1 wrapped)
69 *
70 * When the firmware places a packet in a buffer, it will advance the READ index
71 * and fire the RX interrupt. The driver can then query the READ index and
72 * process as many packets as possible, moving the WRITE index forward as it
73 * resets the Rx queue buffers with new memory.
74 *
75 * The management in the driver is as follows:
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free.
79 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
81 * 'processed' and 'read' driver indexes as well)
82 * + A received packet is processed and handed to the kernel network stack,
83 * detached from the iwl->rxq. The driver 'processed' index is updated.
84 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
85 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
86 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
87 * were enough free buffers and RX_STALLED is set it is cleared.
88 *
89 *
90 * Driver sequence:
91 *
92 * iwl_rx_queue_alloc() Allocates rx_free
93 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
94 * iwl_rx_queue_restock
95 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
96 * queue, updates firmware pointers, and updates
97 * the WRITE index. If insufficient rx_free buffers
98 * are available, schedules iwl_rx_replenish
99 *
100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
102 * READ INDEX, detaching the SKB from the pool.
103 * Moves the packet buffer from queue to rx_used.
104 * Calls iwl_rx_queue_restock to refill any empty
105 * slots.
106 * ...
107 *
108 */
109
110/**
111 * iwl_rx_queue_space - Return number of free slots available in queue.
112 */
113static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
114{
115 int s = q->read - q->write;
116 if (s <= 0)
117 s += RX_QUEUE_SIZE;
118 /* keep some buffer to not confuse full and empty queue */
119 s -= 2;
120 if (s < 0)
121 s = 0;
122 return s;
123}
124
125/**
126 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
127 */
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700128void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
Johannes Berg20d3b642012-05-16 22:54:29 +0200129 struct iwl_rx_queue *q)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700130{
131 unsigned long flags;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
Emmanuel Grumbach035f7ff2012-03-26 08:57:01 -0700139 if (trans->cfg->base_params->shadow_reg_enable) {
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700140 /* shadow register enabled */
141 /* Device expects a multiple of 8 */
142 q->write_actual = (q->write & ~0x7);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200143 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700144 } else {
Don Fry47107e82012-03-15 13:27:06 -0700145 struct iwl_trans_pcie *trans_pcie =
146 IWL_TRANS_GET_PCIE_TRANS(trans);
147
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700148 /* If power-saving is in use, make sure device is awake */
Don Fry01d651d2012-03-23 08:34:31 -0700149 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200150 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700151
152 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700153 IWL_DEBUG_INFO(trans,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700154 "Rx queue requesting wakeup,"
155 " GP1 = 0x%x\n", reg);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200156 iwl_set_bit(trans, CSR_GP_CNTRL,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700157 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
158 goto exit_unlock;
159 }
160
161 q->write_actual = (q->write & ~0x7);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200162 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700163 q->write_actual);
164
165 /* Else device is assumed to be awake */
166 } else {
167 /* Device expects a multiple of 8 */
168 q->write_actual = (q->write & ~0x7);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200169 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700170 q->write_actual);
171 }
172 }
173 q->need_update = 0;
174
175 exit_unlock:
176 spin_unlock_irqrestore(&q->lock, flags);
177}
178
179/**
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300180 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700181 */
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300182static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700183{
184 return cpu_to_le32((u32)(dma_addr >> 8));
185}
186
187/**
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300188 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700189 *
190 * If there are slots in the RX queue that need to be restocked,
191 * and we have free pre-allocated buffers, fill the ranks as much
192 * as we can, pulling from rx_free.
193 *
194 * This moves the 'write' index forward to catch up with 'processed', and
195 * also updates the memory address in the firmware to reference the new
196 * target buffer.
197 */
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300198static void iwl_rx_queue_restock(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700199{
Johannes Berg20d3b642012-05-16 22:54:29 +0200200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700201 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700202 struct list_head *element;
203 struct iwl_rx_mem_buffer *rxb;
204 unsigned long flags;
205
Emmanuel Grumbach74390462012-09-09 16:58:07 +0300206 /*
207 * If the device isn't enabled - not need to try to add buffers...
208 * This can happen when we stop the device and still have an interrupt
209 * pending. We stop the APM before we sync the interrupts / tasklets
210 * because we have to (see comment there). On the other hand, since
211 * the APM is stopped, we cannot access the HW (in particular not prph).
212 * So don't try to restock if the APM has been already stopped.
213 */
214 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
215 return;
216
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700217 spin_lock_irqsave(&rxq->lock, flags);
218 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
219 /* The overwritten rxb must be a used one */
220 rxb = rxq->queue[rxq->write];
221 BUG_ON(rxb && rxb->page);
222
223 /* Get next free Rx buffer, remove from free list */
224 element = rxq->rx_free.next;
225 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
226 list_del(element);
227
228 /* Point to Rx buffer via next RBD in circular buffer */
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300229 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700230 rxq->queue[rxq->write] = rxb;
231 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
232 rxq->free_count--;
233 }
234 spin_unlock_irqrestore(&rxq->lock, flags);
235 /* If the pre-allocated buffer pool is dropping low, schedule to
236 * refill it */
237 if (rxq->free_count <= RX_LOW_WATERMARK)
Johannes Berg1ee158d2012-02-17 10:07:44 -0800238 schedule_work(&trans_pcie->rx_replenish);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700239
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700240 /* If we've added more space for the firmware to place data, tell it.
241 * Increment device's write pointer in multiples of 8. */
242 if (rxq->write_actual != (rxq->write & ~0x7)) {
243 spin_lock_irqsave(&rxq->lock, flags);
244 rxq->need_update = 1;
245 spin_unlock_irqrestore(&rxq->lock, flags);
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700246 iwl_rx_queue_update_write_ptr(trans, rxq);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700247 }
248}
249
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300250/*
251 * iwl_rx_allocate - allocate a page for each used RBD
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700252 *
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300253 * A used RBD is an Rx buffer that has been given to the stack. To use it again
254 * a page must be allocated and the RBD must point to the page. This function
255 * doesn't change the HW pointer but handles the list of pages that is used by
256 * iwl_rx_queue_restock. The latter function will update the HW to use the newly
257 * allocated buffers.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700258 */
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300259static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700260{
Johannes Berg20d3b642012-05-16 22:54:29 +0200261 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700262 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700263 struct list_head *element;
264 struct iwl_rx_mem_buffer *rxb;
265 struct page *page;
266 unsigned long flags;
267 gfp_t gfp_mask = priority;
268
269 while (1) {
270 spin_lock_irqsave(&rxq->lock, flags);
271 if (list_empty(&rxq->rx_used)) {
272 spin_unlock_irqrestore(&rxq->lock, flags);
273 return;
274 }
275 spin_unlock_irqrestore(&rxq->lock, flags);
276
277 if (rxq->free_count > RX_LOW_WATERMARK)
278 gfp_mask |= __GFP_NOWARN;
279
Johannes Bergb2cf4102012-04-09 17:46:51 -0700280 if (trans_pcie->rx_page_order > 0)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700281 gfp_mask |= __GFP_COMP;
282
283 /* Alloc a new receive buffer */
Johannes Berg20d3b642012-05-16 22:54:29 +0200284 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700285 if (!page) {
286 if (net_ratelimit())
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700287 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700288 "order: %d\n",
Johannes Bergb2cf4102012-04-09 17:46:51 -0700289 trans_pcie->rx_page_order);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700290
291 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
292 net_ratelimit())
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700293 IWL_CRIT(trans, "Failed to alloc_pages with %s."
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700294 "Only %u free buffers remaining.\n",
295 priority == GFP_ATOMIC ?
296 "GFP_ATOMIC" : "GFP_KERNEL",
297 rxq->free_count);
298 /* We don't reschedule replenish work here -- we will
299 * call the restock method and if it still needs
300 * more buffers it will schedule replenish */
301 return;
302 }
303
304 spin_lock_irqsave(&rxq->lock, flags);
305
306 if (list_empty(&rxq->rx_used)) {
307 spin_unlock_irqrestore(&rxq->lock, flags);
Johannes Bergb2cf4102012-04-09 17:46:51 -0700308 __free_pages(page, trans_pcie->rx_page_order);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700309 return;
310 }
311 element = rxq->rx_used.next;
312 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
313 list_del(element);
314
315 spin_unlock_irqrestore(&rxq->lock, flags);
316
317 BUG_ON(rxb->page);
318 rxb->page = page;
319 /* Get physical address of the RB */
Johannes Berg20d3b642012-05-16 22:54:29 +0200320 rxb->page_dma =
321 dma_map_page(trans->dev, page, 0,
322 PAGE_SIZE << trans_pcie->rx_page_order,
323 DMA_FROM_DEVICE);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700324 /* dma address must be no more than 36 bits */
325 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
326 /* and also 256 byte aligned! */
327 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
328
329 spin_lock_irqsave(&rxq->lock, flags);
330
331 list_add_tail(&rxb->list, &rxq->rx_free);
332 rxq->free_count++;
333
334 spin_unlock_irqrestore(&rxq->lock, flags);
335 }
336}
337
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300338/*
339 * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
340 *
341 * When moving to rx_free an page is allocated for the slot.
342 *
343 * Also restock the Rx queue via iwl_rx_queue_restock.
344 * This is called as a scheduled work item (except for during initialization)
345 */
346void iwl_rx_replenish(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700347{
Johannes Berg7b114882012-02-05 13:55:11 -0800348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700349 unsigned long flags;
350
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300351 iwl_rx_allocate(trans, GFP_KERNEL);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700352
Johannes Berg7b114882012-02-05 13:55:11 -0800353 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300354 iwl_rx_queue_restock(trans);
Johannes Berg7b114882012-02-05 13:55:11 -0800355 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700356}
357
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300358static void iwl_rx_replenish_now(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700359{
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300360 iwl_rx_allocate(trans, GFP_ATOMIC);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700361
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300362 iwl_rx_queue_restock(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700363}
364
365void iwl_bg_rx_replenish(struct work_struct *data)
366{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700367 struct iwl_trans_pcie *trans_pcie =
368 container_of(data, struct iwl_trans_pcie, rx_replenish);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700369
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300370 iwl_rx_replenish(trans_pcie->trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700371}
372
Johannes Bergdf2f3212012-03-05 11:24:40 -0800373static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
374 struct iwl_rx_mem_buffer *rxb)
375{
376 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
377 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Meenakshi Venkataramanc6f600f2012-03-08 11:29:12 -0800378 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
Johannes Bergdf2f3212012-03-05 11:24:40 -0800379 unsigned long flags;
Johannes Berg0c197442012-03-15 13:26:43 -0700380 bool page_stolen = false;
Johannes Bergb2cf4102012-04-09 17:46:51 -0700381 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
Johannes Berg0c197442012-03-15 13:26:43 -0700382 u32 offset = 0;
Johannes Bergdf2f3212012-03-05 11:24:40 -0800383
384 if (WARN_ON(!rxb))
385 return;
386
Johannes Berg0c197442012-03-15 13:26:43 -0700387 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800388
Johannes Berg0c197442012-03-15 13:26:43 -0700389 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
390 struct iwl_rx_packet *pkt;
391 struct iwl_device_cmd *cmd;
392 u16 sequence;
393 bool reclaim;
394 int index, cmd_index, err, len;
395 struct iwl_rx_cmd_buffer rxcb = {
396 ._offset = offset,
397 ._page = rxb->page,
398 ._page_stolen = false,
David S. Miller0d6c4a22012-05-07 23:35:40 -0400399 .truesize = max_len,
Johannes Berg0c197442012-03-15 13:26:43 -0700400 };
Johannes Bergdf2f3212012-03-05 11:24:40 -0800401
Johannes Berg0c197442012-03-15 13:26:43 -0700402 pkt = rxb_addr(&rxcb);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800403
Johannes Berg0c197442012-03-15 13:26:43 -0700404 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
405 break;
Johannes Bergdf2f3212012-03-05 11:24:40 -0800406
Johannes Berg0c197442012-03-15 13:26:43 -0700407 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
Johannes Bergd9fb6462012-03-26 08:23:39 -0700408 rxcb._offset,
409 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
410 pkt->hdr.cmd);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800411
Johannes Berg0c197442012-03-15 13:26:43 -0700412 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
413 len += sizeof(u32); /* account for status word */
Johannes Bergf042c2e2012-09-05 22:34:44 +0200414 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
415 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
Johannes Bergd663ee72012-03-10 13:00:07 -0800416
Johannes Berg0c197442012-03-15 13:26:43 -0700417 /* Reclaim a command buffer only if this packet is a response
418 * to a (driver-originated) command.
419 * If the packet (e.g. Rx frame) originated from uCode,
420 * there is no command buffer to reclaim.
421 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
422 * but apparently a few don't get set; catch them here. */
423 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
424 if (reclaim) {
425 int i;
426
427 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
428 if (trans_pcie->no_reclaim_cmds[i] ==
429 pkt->hdr.cmd) {
430 reclaim = false;
431 break;
432 }
Johannes Bergd663ee72012-03-10 13:00:07 -0800433 }
434 }
Johannes Bergdf2f3212012-03-05 11:24:40 -0800435
Johannes Berg0c197442012-03-15 13:26:43 -0700436 sequence = le16_to_cpu(pkt->hdr.sequence);
437 index = SEQ_TO_INDEX(sequence);
438 cmd_index = get_cmd_index(&txq->q, index);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800439
Emmanuel Grumbach96791422012-07-24 01:58:32 +0300440 if (reclaim) {
441 struct iwl_pcie_tx_queue_entry *ent;
442 ent = &txq->entries[cmd_index];
443 cmd = ent->copy_cmd;
444 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
445 } else {
Johannes Berg0c197442012-03-15 13:26:43 -0700446 cmd = NULL;
Emmanuel Grumbach96791422012-07-24 01:58:32 +0300447 }
Johannes Berg0c197442012-03-15 13:26:43 -0700448
449 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
450
Emmanuel Grumbach96791422012-07-24 01:58:32 +0300451 if (reclaim) {
452 /* The original command isn't needed any more */
453 kfree(txq->entries[cmd_index].copy_cmd);
454 txq->entries[cmd_index].copy_cmd = NULL;
Johannes Bergf4feb8a2012-10-19 14:24:43 +0200455 /* nor is the duplicated part of the command */
456 kfree(txq->entries[cmd_index].free_buf);
457 txq->entries[cmd_index].free_buf = NULL;
Emmanuel Grumbach96791422012-07-24 01:58:32 +0300458 }
459
Johannes Berg0c197442012-03-15 13:26:43 -0700460 /*
461 * After here, we should always check rxcb._page_stolen,
462 * if it is true then one of the handlers took the page.
463 */
464
465 if (reclaim) {
466 /* Invoke any callbacks, transfer the buffer to caller,
467 * and fire off the (possibly) blocking
468 * iwl_trans_send_cmd()
469 * as we reclaim the driver command queue */
470 if (!rxcb._page_stolen)
471 iwl_tx_cmd_complete(trans, &rxcb, err);
472 else
473 IWL_WARN(trans, "Claim null rxb?\n");
474 }
475
476 page_stolen |= rxcb._page_stolen;
477 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800478 }
479
Johannes Berg0c197442012-03-15 13:26:43 -0700480 /* page was stolen from us -- free our reference */
481 if (page_stolen) {
Johannes Bergb2cf4102012-04-09 17:46:51 -0700482 __free_pages(rxb->page, trans_pcie->rx_page_order);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800483 rxb->page = NULL;
Johannes Berg0c197442012-03-15 13:26:43 -0700484 }
Johannes Bergdf2f3212012-03-05 11:24:40 -0800485
486 /* Reuse the page if possible. For notification packets and
487 * SKBs that fail to Rx correctly, add them back into the
488 * rx_free list for reuse later. */
489 spin_lock_irqsave(&rxq->lock, flags);
490 if (rxb->page != NULL) {
491 rxb->page_dma =
492 dma_map_page(trans->dev, rxb->page, 0,
Johannes Berg20d3b642012-05-16 22:54:29 +0200493 PAGE_SIZE << trans_pcie->rx_page_order,
494 DMA_FROM_DEVICE);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800495 list_add_tail(&rxb->list, &rxq->rx_free);
496 rxq->free_count++;
497 } else
498 list_add_tail(&rxb->list, &rxq->rx_used);
499 spin_unlock_irqrestore(&rxq->lock, flags);
500}
501
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700502/**
503 * iwl_rx_handle - Main entry function for receiving responses from uCode
504 *
505 * Uses the priv->rx_handlers callback function array to invoke
506 * the appropriate handlers, including command responses,
507 * frame-received notifications, and other notifications.
508 */
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700509static void iwl_rx_handle(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700510{
Johannes Bergdf2f3212012-03-05 11:24:40 -0800511 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700512 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700513 u32 r, i;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700514 u8 fill_rx = 0;
515 u32 count = 8;
516 int total_empty;
517
518 /* uCode's read index (stored in shared DRAM) indicates the last Rx
519 * buffer that the driver may process (last buffer filled by ucode). */
520 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
521 i = rxq->read;
522
523 /* Rx interrupt, but nothing sent from uCode */
524 if (i == r)
Emmanuel Grumbach726f23f2012-05-16 22:40:49 +0200525 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700526
527 /* calculate total frames need to be restock after handling RX */
528 total_empty = r - rxq->write_actual;
529 if (total_empty < 0)
530 total_empty += RX_QUEUE_SIZE;
531
532 if (total_empty > (RX_QUEUE_SIZE / 2))
533 fill_rx = 1;
534
535 while (i != r) {
Johannes Berg48a2d662012-03-05 11:24:39 -0800536 struct iwl_rx_mem_buffer *rxb;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700537
538 rxb = rxq->queue[i];
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700539 rxq->queue[i] = NULL;
540
Emmanuel Grumbach726f23f2012-05-16 22:40:49 +0200541 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
542 r, i, rxb);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800543 iwl_rx_handle_rxbuf(trans, rxb);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700544
545 i = (i + 1) & RX_QUEUE_MASK;
546 /* If there are a lot of unused frames,
547 * restock the Rx queue so ucode wont assert. */
548 if (fill_rx) {
549 count++;
550 if (count >= 8) {
551 rxq->read = i;
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300552 iwl_rx_replenish_now(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700553 count = 0;
554 }
555 }
556 }
557
558 /* Backtrack one entry */
559 rxq->read = i;
560 if (fill_rx)
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300561 iwl_rx_replenish_now(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700562 else
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300563 iwl_rx_queue_restock(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700564}
565
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -0700566/**
567 * iwl_irq_handle_error - called for HW or SW error interrupt from card
568 */
Emmanuel Grumbach6bb78842011-08-25 23:11:09 -0700569static void iwl_irq_handle_error(struct iwl_trans *trans)
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -0700570{
Emmanuel Grumbachf946b522012-10-25 17:25:52 +0200571 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
572
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -0700573 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
Emmanuel Grumbach035f7ff2012-03-26 08:57:01 -0700574 if (trans->cfg->internal_wimax_coex &&
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200575 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
Johannes Berg20d3b642012-05-16 22:54:29 +0200576 APMS_CLK_VAL_MRB_FUNC_MODE) ||
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200577 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
Johannes Berg20d3b642012-05-16 22:54:29 +0200578 APMG_PS_CTRL_VAL_RESET_REQ))) {
Don Fry74fda972012-03-20 16:36:54 -0700579 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
Don Fry8a8bbdb2012-03-20 10:33:34 -0700580 iwl_op_mode_wimax_active(trans->op_mode);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +0200581 wake_up(&trans_pcie->wait_command_queue);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -0700582 return;
583 }
584
Emmanuel Grumbach6bb78842011-08-25 23:11:09 -0700585 iwl_dump_csr(trans);
Johannes Berg94543a82012-08-21 18:57:10 +0200586 iwl_dump_fh(trans, NULL);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -0700587
Emmanuel Grumbachf946b522012-10-25 17:25:52 +0200588 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
589 wake_up(&trans_pcie->wait_command_queue);
590
Emmanuel Grumbachbcb93212012-02-09 16:08:15 +0200591 iwl_op_mode_nic_error(trans->op_mode);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -0700592}
593
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700594/* tasklet for iwlagn interrupt */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700595void iwl_irq_tasklet(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700596{
Johannes Berg20d3b642012-05-16 22:54:29 +0200597 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
598 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700599 u32 inta = 0;
600 u32 handled = 0;
601 unsigned long flags;
602 u32 i;
603#ifdef CONFIG_IWLWIFI_DEBUG
604 u32 inta_mask;
605#endif
606
Johannes Berg7b114882012-02-05 13:55:11 -0800607 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700608
609 /* Ack/clear/reset pending uCode interrupts.
610 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
611 */
612 /* There is a hardware bug in the interrupt mask function that some
613 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
614 * they are disabled in the CSR_INT_MASK register. Furthermore the
615 * ICT interrupt handling mechanism has another bug that might cause
616 * these unmasked interrupts fail to be detected. We workaround the
617 * hardware bugs here by ACKing all the possible interrupts so that
618 * interrupt coalescing can still be achieved.
619 */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200620 iwl_write32(trans, CSR_INT,
Johannes Berg20d3b642012-05-16 22:54:29 +0200621 trans_pcie->inta | ~trans_pcie->inta_mask);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700622
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700623 inta = trans_pcie->inta;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700624
625#ifdef CONFIG_IWLWIFI_DEBUG
Johannes Berga8bceb32012-03-05 11:24:30 -0800626 if (iwl_have_debug_level(IWL_DL_ISR)) {
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700627 /* just for debug */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200628 inta_mask = iwl_read32(trans, CSR_INT_MASK);
Johannes Berg0ca24da2012-03-15 13:26:46 -0700629 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
Johannes Berg20d3b642012-05-16 22:54:29 +0200630 inta, inta_mask);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700631 }
632#endif
633
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700634 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
635 trans_pcie->inta = 0;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700636
Johannes Berg7b114882012-02-05 13:55:11 -0800637 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Johannes Bergb49ba042012-01-19 08:20:57 -0800638
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700639 /* Now service all interrupt bits discovered above. */
640 if (inta & CSR_INT_BIT_HW_ERR) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700641 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700642
643 /* Tell the device to stop sending interrupts */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700644 iwl_disable_interrupts(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700645
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700646 isr_stats->hw++;
Emmanuel Grumbach6bb78842011-08-25 23:11:09 -0700647 iwl_irq_handle_error(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700648
649 handled |= CSR_INT_BIT_HW_ERR;
650
651 return;
652 }
653
654#ifdef CONFIG_IWLWIFI_DEBUG
Johannes Berga8bceb32012-03-05 11:24:30 -0800655 if (iwl_have_debug_level(IWL_DL_ISR)) {
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700656 /* NIC fires this, but we don't use it, redundant with WAKEUP */
657 if (inta & CSR_INT_BIT_SCD) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700658 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700659 "the frame/frames.\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700660 isr_stats->sch++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700661 }
662
663 /* Alive notification via Rx interrupt will do the real work */
664 if (inta & CSR_INT_BIT_ALIVE) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700665 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700666 isr_stats->alive++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700667 }
668 }
669#endif
670 /* Safely ignore these bits for debug checks below */
671 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
672
673 /* HW RF KILL switch toggled */
674 if (inta & CSR_INT_BIT_RF_KILL) {
Johannes Bergc9eec952012-03-06 13:30:43 -0800675 bool hw_rfkill;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700676
Emmanuel Grumbach8d425512012-03-28 11:00:58 +0200677 hw_rfkill = iwl_is_rfkill_set(trans);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700678 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
Johannes Berg20d3b642012-05-16 22:54:29 +0200679 hw_rfkill ? "disable radio" : "enable radio");
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700680
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700681 isr_stats->rfkill++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700682
Johannes Bergc9eec952012-03-06 13:30:43 -0800683 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +0200684 if (hw_rfkill) {
685 set_bit(STATUS_RFKILL, &trans_pcie->status);
686 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
687 &trans_pcie->status))
688 IWL_DEBUG_RF_KILL(trans,
689 "Rfkill while SYNC HCMD in flight\n");
690 wake_up(&trans_pcie->wait_command_queue);
691 } else {
692 clear_bit(STATUS_RFKILL, &trans_pcie->status);
693 }
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700694
695 handled |= CSR_INT_BIT_RF_KILL;
696 }
697
698 /* Chip got too hot and stopped itself */
699 if (inta & CSR_INT_BIT_CT_KILL) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700700 IWL_ERR(trans, "Microcode CT kill error detected.\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700701 isr_stats->ctkill++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700702 handled |= CSR_INT_BIT_CT_KILL;
703 }
704
705 /* Error detected by uCode */
706 if (inta & CSR_INT_BIT_SW_ERR) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700707 IWL_ERR(trans, "Microcode SW error detected. "
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700708 " Restarting 0x%X.\n", inta);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700709 isr_stats->sw++;
Emmanuel Grumbach6bb78842011-08-25 23:11:09 -0700710 iwl_irq_handle_error(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700711 handled |= CSR_INT_BIT_SW_ERR;
712 }
713
714 /* uCode wakes up after power-down sleep */
715 if (inta & CSR_INT_BIT_WAKEUP) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700716 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
717 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
Emmanuel Grumbach035f7ff2012-03-26 08:57:01 -0700718 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700719 iwl_txq_update_write_ptr(trans,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700720 &trans_pcie->txq[i]);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700721
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700722 isr_stats->wakeup++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700723
724 handled |= CSR_INT_BIT_WAKEUP;
725 }
726
727 /* All uCode command responses, including Tx command responses,
728 * Rx "responses" (frame-received notification), and other
729 * notifications from uCode come through here*/
730 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
Johannes Berg20d3b642012-05-16 22:54:29 +0200731 CSR_INT_BIT_RX_PERIODIC)) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700732 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700733 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
734 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200735 iwl_write32(trans, CSR_FH_INT_STATUS,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700736 CSR_FH_INT_RX_MASK);
737 }
738 if (inta & CSR_INT_BIT_RX_PERIODIC) {
739 handled |= CSR_INT_BIT_RX_PERIODIC;
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200740 iwl_write32(trans,
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700741 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700742 }
743 /* Sending RX interrupt require many steps to be done in the
744 * the device:
745 * 1- write interrupt to current index in ICT table.
746 * 2- dma RX frame.
747 * 3- update RX shared data to indicate last write index.
748 * 4- send interrupt.
749 * This could lead to RX race, driver could receive RX interrupt
750 * but the shared data changes does not reflect this;
751 * periodic interrupt will detect any dangling Rx activity.
752 */
753
754 /* Disable periodic interrupt; we use it as just a one-shot. */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200755 iwl_write8(trans, CSR_INT_PERIODIC_REG,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700756 CSR_INT_PERIODIC_DIS);
Johannes Berg63791032012-09-06 15:33:42 +0200757
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700758 iwl_rx_handle(trans);
Johannes Berg63791032012-09-06 15:33:42 +0200759
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700760 /*
761 * Enable periodic interrupt in 8 msec only if we received
762 * real RX interrupt (instead of just periodic int), to catch
763 * any dangling Rx interrupt. If it was just the periodic
764 * interrupt, there was no dangling Rx activity, and no need
765 * to extend the periodic interrupt; one-shot is enough.
766 */
767 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200768 iwl_write8(trans, CSR_INT_PERIODIC_REG,
Johannes Berg20d3b642012-05-16 22:54:29 +0200769 CSR_INT_PERIODIC_ENA);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700770
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700771 isr_stats->rx++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700772 }
773
774 /* This "Tx" DMA channel is used only for loading uCode */
775 if (inta & CSR_INT_BIT_FH_TX) {
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200776 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700777 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700778 isr_stats->tx++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700779 handled |= CSR_INT_BIT_FH_TX;
780 /* Wake up uCode load routine, now that load is complete */
Johannes Berg13df1aa2012-03-06 13:31:00 -0800781 trans_pcie->ucode_write_complete = true;
782 wake_up(&trans_pcie->ucode_write_waitq);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700783 }
784
785 if (inta & ~handled) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700786 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -0700787 isr_stats->unhandled++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700788 }
789
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700790 if (inta & ~(trans_pcie->inta_mask)) {
791 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
792 inta & ~trans_pcie->inta_mask);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700793 }
794
795 /* Re-enable all interrupts */
796 /* only Re-enable if disabled by irq */
Don Fry83626402012-03-07 09:52:37 -0800797 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700798 iwl_enable_interrupts(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700799 /* Re-enable RF_KILL if it occurred */
Stanislaw Gruszka8722c892012-03-07 09:52:28 -0800800 else if (handled & CSR_INT_BIT_RF_KILL)
801 iwl_enable_rfkill_int(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700802}
803
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700804/******************************************************************************
805 *
806 * ICT functions
807 *
808 ******************************************************************************/
Johannes Berg10667132011-12-19 14:00:59 -0800809
810/* a device (PCI-E) page is 4096 bytes long */
811#define ICT_SHIFT 12
812#define ICT_SIZE (1 << ICT_SHIFT)
813#define ICT_COUNT (ICT_SIZE / sizeof(u32))
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700814
815/* Free dram table */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700816void iwl_free_isr_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700817{
Johannes Berg20d3b642012-05-16 22:54:29 +0200818 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700819
Johannes Berg10667132011-12-19 14:00:59 -0800820 if (trans_pcie->ict_tbl) {
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200821 dma_free_coherent(trans->dev, ICT_SIZE,
Johannes Berg10667132011-12-19 14:00:59 -0800822 trans_pcie->ict_tbl,
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700823 trans_pcie->ict_tbl_dma);
Johannes Berg10667132011-12-19 14:00:59 -0800824 trans_pcie->ict_tbl = NULL;
825 trans_pcie->ict_tbl_dma = 0;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700826 }
827}
828
829
Johannes Berg10667132011-12-19 14:00:59 -0800830/*
831 * allocate dram shared table, it is an aligned memory
832 * block of ICT_SIZE.
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700833 * also reset all data related to ICT table interrupt.
834 */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700835int iwl_alloc_isr_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700836{
Johannes Berg20d3b642012-05-16 22:54:29 +0200837 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700838
Johannes Berg10667132011-12-19 14:00:59 -0800839 trans_pcie->ict_tbl =
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200840 dma_alloc_coherent(trans->dev, ICT_SIZE,
Johannes Berg10667132011-12-19 14:00:59 -0800841 &trans_pcie->ict_tbl_dma,
842 GFP_KERNEL);
843 if (!trans_pcie->ict_tbl)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700844 return -ENOMEM;
845
Johannes Berg10667132011-12-19 14:00:59 -0800846 /* just an API sanity check ... it is guaranteed to be aligned */
847 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
848 iwl_free_isr_ict(trans);
849 return -EINVAL;
850 }
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700851
Johannes Berg10667132011-12-19 14:00:59 -0800852 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
853 (unsigned long long)trans_pcie->ict_tbl_dma);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700854
Johannes Berg10667132011-12-19 14:00:59 -0800855 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700856
857 /* reset table and index to all 0 */
Johannes Berg10667132011-12-19 14:00:59 -0800858 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700859 trans_pcie->ict_index = 0;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700860
861 /* add periodic RX interrupt */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700862 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700863 return 0;
864}
865
866/* Device is going up inform it about using ICT interrupt table,
867 * also we need to tell the driver to start using ICT interrupt.
868 */
Emmanuel Grumbached6a3802012-01-02 16:10:08 +0200869void iwl_reset_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700870{
Johannes Berg20d3b642012-05-16 22:54:29 +0200871 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700872 u32 val;
873 unsigned long flags;
874
Johannes Berg10667132011-12-19 14:00:59 -0800875 if (!trans_pcie->ict_tbl)
Emmanuel Grumbached6a3802012-01-02 16:10:08 +0200876 return;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700877
Johannes Berg7b114882012-02-05 13:55:11 -0800878 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700879 iwl_disable_interrupts(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700880
Johannes Berg10667132011-12-19 14:00:59 -0800881 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700882
Johannes Berg10667132011-12-19 14:00:59 -0800883 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700884
885 val |= CSR_DRAM_INT_TBL_ENABLE;
886 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
887
Johannes Berg10667132011-12-19 14:00:59 -0800888 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700889
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200890 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700891 trans_pcie->use_ict = true;
892 trans_pcie->ict_index = 0;
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200893 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700894 iwl_enable_interrupts(trans);
Johannes Berg7b114882012-02-05 13:55:11 -0800895 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700896}
897
898/* Device is going down disable ict interrupt usage */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700899void iwl_disable_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700900{
Johannes Berg20d3b642012-05-16 22:54:29 +0200901 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700902 unsigned long flags;
903
Johannes Berg7b114882012-02-05 13:55:11 -0800904 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700905 trans_pcie->use_ict = false;
Johannes Berg7b114882012-02-05 13:55:11 -0800906 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700907}
908
Emmanuel Grumbacheb647642012-06-14 14:23:02 +0300909/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700910static irqreturn_t iwl_isr(int irq, void *data)
911{
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700912 struct iwl_trans *trans = data;
Emmanuel Grumbacheb647642012-06-14 14:23:02 +0300913 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700914 u32 inta, inta_mask;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700915#ifdef CONFIG_IWLWIFI_DEBUG
916 u32 inta_fh;
917#endif
Emmanuel Grumbacheb647642012-06-14 14:23:02 +0300918
919 lockdep_assert_held(&trans_pcie->irq_lock);
920
Johannes Berg6c1011e2012-03-06 13:30:48 -0800921 trace_iwlwifi_dev_irq(trans->dev);
Johannes Bergb80667e2011-12-09 07:26:13 -0800922
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700923 /* Disable (but don't clear!) interrupts here to avoid
924 * back-to-back ISRs and sporadic interrupts from our NIC.
925 * If we have something to service, the tasklet will re-enable ints.
926 * If we *don't* have something, we'll re-enable before leaving here. */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200927 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
928 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700929
930 /* Discover which interrupts are active/pending */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200931 inta = iwl_read32(trans, CSR_INT);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700932
933 /* Ignore interrupt if there's nothing in NIC to service.
934 * This may be due to IRQ shared with another device,
935 * or due to sporadic interrupts thrown from our NIC. */
936 if (!inta) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700937 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700938 goto none;
939 }
940
941 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
942 /* Hardware disappeared. It might have already raised
943 * an interrupt */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700944 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
Emmanuel Grumbacheb647642012-06-14 14:23:02 +0300945 return IRQ_HANDLED;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700946 }
947
948#ifdef CONFIG_IWLWIFI_DEBUG
Johannes Berga8bceb32012-03-05 11:24:30 -0800949 if (iwl_have_debug_level(IWL_DL_ISR)) {
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200950 inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700951 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700952 "fh 0x%08x\n", inta, inta_mask, inta_fh);
953 }
954#endif
955
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700956 trans_pcie->inta |= inta;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700957 /* iwl_irq_tasklet() will service interrupts and re-enable them */
958 if (likely(inta))
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700959 tasklet_schedule(&trans_pcie->irq_tasklet);
Don Fry83626402012-03-07 09:52:37 -0800960 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
Johannes Berg20d3b642012-05-16 22:54:29 +0200961 !trans_pcie->inta)
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700962 iwl_enable_interrupts(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700963
Emmanuel Grumbacheb647642012-06-14 14:23:02 +0300964none:
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700965 /* re-enable interrupts here since we don't have anything to service. */
966 /* only Re-enable if disabled by irq and no schedules tasklet. */
Don Fry83626402012-03-07 09:52:37 -0800967 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
Johannes Berg20d3b642012-05-16 22:54:29 +0200968 !trans_pcie->inta)
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700969 iwl_enable_interrupts(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700970
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700971 return IRQ_NONE;
972}
973
974/* interrupt handler using ict table, with this interrupt driver will
975 * stop using INTA register to get device's interrupt, reading this register
976 * is expensive, device will write interrupts in ICT dram table, increment
977 * index then will fire interrupt to driver, driver will OR all ICT table
978 * entries from current index up to table entry with 0 value. the result is
979 * the interrupt we need to service, driver will set the entries back to 0 and
980 * set index.
981 */
982irqreturn_t iwl_isr_ict(int irq, void *data)
983{
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700984 struct iwl_trans *trans = data;
985 struct iwl_trans_pcie *trans_pcie;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700986 u32 inta, inta_mask;
987 u32 val = 0;
Johannes Bergb80667e2011-12-09 07:26:13 -0800988 u32 read;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700989 unsigned long flags;
990
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700991 if (!trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700992 return IRQ_NONE;
993
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700994 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
995
Emmanuel Grumbacheb647642012-06-14 14:23:02 +0300996 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
997
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -0700998 /* dram interrupt table not set yet,
999 * use legacy interrupt.
1000 */
Emmanuel Grumbacheb647642012-06-14 14:23:02 +03001001 if (unlikely(!trans_pcie->use_ict)) {
1002 irqreturn_t ret = iwl_isr(irq, data);
1003 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1004 return ret;
1005 }
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001006
Johannes Berg6c1011e2012-03-06 13:30:48 -08001007 trace_iwlwifi_dev_irq(trans->dev);
Johannes Bergb80667e2011-12-09 07:26:13 -08001008
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001009
1010 /* Disable (but don't clear!) interrupts here to avoid
1011 * back-to-back ISRs and sporadic interrupts from our NIC.
1012 * If we have something to service, the tasklet will re-enable ints.
1013 * If we *don't* have something, we'll re-enable before leaving here.
1014 */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001015 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
1016 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001017
1018
1019 /* Ignore interrupt if there's nothing in NIC to service.
1020 * This may be due to IRQ shared with another device,
1021 * or due to sporadic interrupts thrown from our NIC. */
Johannes Bergb80667e2011-12-09 07:26:13 -08001022 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
Johannes Berg6c1011e2012-03-06 13:30:48 -08001023 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
Johannes Bergb80667e2011-12-09 07:26:13 -08001024 if (!read) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001025 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001026 goto none;
1027 }
1028
Johannes Bergb80667e2011-12-09 07:26:13 -08001029 /*
1030 * Collect all entries up to the first 0, starting from ict_index;
1031 * note we already read at ict_index.
1032 */
1033 do {
1034 val |= read;
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001035 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
Johannes Bergb80667e2011-12-09 07:26:13 -08001036 trans_pcie->ict_index, read);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001037 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1038 trans_pcie->ict_index =
1039 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001040
Johannes Bergb80667e2011-12-09 07:26:13 -08001041 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
Johannes Berg6c1011e2012-03-06 13:30:48 -08001042 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
Johannes Bergb80667e2011-12-09 07:26:13 -08001043 read);
1044 } while (read);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001045
1046 /* We should not get this value, just ignore it. */
1047 if (val == 0xffffffff)
1048 val = 0;
1049
1050 /*
1051 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1052 * (bit 15 before shifting it to 31) to clear when using interrupt
1053 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1054 * so we use them to decide on the real state of the Rx bit.
1055 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1056 */
1057 if (val & 0xC0000)
1058 val |= 0x8000;
1059
1060 inta = (0xff & val) | ((0xff00 & val) << 16);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001061 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
Johannes Berg20d3b642012-05-16 22:54:29 +02001062 inta, inta_mask, val);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001063
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001064 inta &= trans_pcie->inta_mask;
1065 trans_pcie->inta |= inta;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001066
1067 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1068 if (likely(inta))
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001069 tasklet_schedule(&trans_pcie->irq_tasklet);
Don Fry83626402012-03-07 09:52:37 -08001070 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
Johannes Bergb80667e2011-12-09 07:26:13 -08001071 !trans_pcie->inta) {
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001072 /* Allow interrupt if was disabled by this handler and
1073 * no tasklet was schedules, We should not enable interrupt,
1074 * tasklet will enable it.
1075 */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001076 iwl_enable_interrupts(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001077 }
1078
Johannes Berg7b114882012-02-05 13:55:11 -08001079 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001080 return IRQ_HANDLED;
1081
1082 none:
1083 /* re-enable interrupts here since we don't have anything to service.
1084 * only Re-enable if disabled by irq.
1085 */
Don Fry83626402012-03-07 09:52:37 -08001086 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
Johannes Bergb80667e2011-12-09 07:26:13 -08001087 !trans_pcie->inta)
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001088 iwl_enable_interrupts(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001089
Johannes Berg7b114882012-02-05 13:55:11 -08001090 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001091 return IRQ_NONE;
1092}