blob: 3db82bf3a25813f46ab3a6926dcd8e18add186fd [file] [log] [blame]
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001/******************************************************************************
2 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02003 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
Sara Sharon26d535a2015-04-28 12:56:54 +03004 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07005 *
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
30#include <linux/sched.h>
31#include <linux/wait.h>
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -070032#include <linux/gfp.h>
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070033
Johannes Berg1b29dc92012-03-06 13:30:50 -080034#include "iwl-prph.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070035#include "iwl-io.h"
Johannes Berg6468a012012-05-16 19:13:54 +020036#include "internal.h"
Emmanuel Grumbachdb70f292012-02-09 16:08:15 +020037#include "iwl-op-mode.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070038
39/******************************************************************************
40 *
41 * RX path functions
42 *
43 ******************************************************************************/
44
45/*
46 * Rx theory of operation
47 *
48 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
49 * each of which point to Receive Buffers to be filled by the NIC. These get
50 * used not only for Rx frames, but for any command response or notification
51 * from the NIC. The driver and NIC manage the Rx buffers by means
52 * of indexes into the circular buffer.
53 *
54 * Rx Queue Indexes
55 * The host/firmware share two index registers for managing the Rx buffers.
56 *
57 * The READ index maps to the first position that the firmware may be writing
58 * to -- the driver can read up to (but not including) this position and get
59 * good data.
60 * The READ index is managed by the firmware once the card is enabled.
61 *
62 * The WRITE index maps to the last position the driver has read from -- the
63 * position preceding WRITE is the last slot the firmware can place a packet.
64 *
65 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
66 * WRITE = READ.
67 *
68 * During initialization, the host sets up the READ queue position to the first
69 * INDEX position, and WRITE to the last (READ - 1 wrapped)
70 *
71 * When the firmware places a packet in a buffer, it will advance the READ index
72 * and fire the RX interrupt. The driver can then query the READ index and
73 * process as many packets as possible, moving the WRITE index forward as it
74 * resets the Rx queue buffers with new memory.
75 *
76 * The management in the driver is as follows:
Sara Sharon26d535a2015-04-28 12:56:54 +030077 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
78 * When the interrupt handler is called, the request is processed.
79 * The page is either stolen - transferred to the upper layer
80 * or reused - added immediately to the iwl->rxq->rx_free list.
81 * + When the page is stolen - the driver updates the matching queue's used
82 * count, detaches the RBD and transfers it to the queue used list.
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070097 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated.
Sara Sharon26d535a2015-04-28 12:56:54 +030099 * + If there are no allocated buffers in iwl->rxq->rx_free,
Johannes Berg2bfb5092012-12-27 21:43:48 +0100100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700102 *
103 *
104 * Driver sequence:
105 *
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200106 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
Sara Sharon26d535a2015-04-28 12:56:54 +0300108 * iwl_pcie_rxq_restock.
109 * Used only during initialization.
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700111 * queue, updates firmware pointers, and updates
Sara Sharon26d535a2015-04-28 12:56:54 +0300112 * the WRITE index.
113 * iwl_pcie_rx_allocator() Background work for allocating pages.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700114 *
115 * -- enable interrupts --
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700117 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used.
Sara Sharon26d535a2015-04-28 12:56:54 +0300119 * Posts and claims requests to the allocator.
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200120 * Calls iwl_pcie_rxq_restock to refill any empty
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700121 * slots.
Sara Sharon26d535a2015-04-28 12:56:54 +0300122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700134 * ...
135 *
136 */
137
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200138/*
139 * iwl_rxq_space - Return number of free slots available in queue.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700140 */
Johannes Bergfecba092013-06-20 21:56:49 +0200141static int iwl_rxq_space(const struct iwl_rxq *rxq)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700142{
Ido Yariv351746c2013-07-15 12:41:27 -0400143 /* Make sure RX_QUEUE_SIZE is a power of 2 */
144 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
Johannes Bergfecba092013-06-20 21:56:49 +0200145
Ido Yariv351746c2013-07-15 12:41:27 -0400146 /*
147 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
148 * between empty and completely full queues.
149 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
150 * defined for negative dividends.
151 */
152 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700153}
154
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200155/*
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200156 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700157 */
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200158static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
159{
160 return cpu_to_le32((u32)(dma_addr >> 8));
161}
162
Emmanuel Grumbach49bd072d2012-11-18 13:14:51 +0200163/*
164 * iwl_pcie_rx_stop - stops the Rx DMA
165 */
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200166int iwl_pcie_rx_stop(struct iwl_trans *trans)
167{
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200168 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
169 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
170 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
171}
172
173/*
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200174 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700175 */
Johannes Berg5d63f922014-02-27 11:20:07 +0100176static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700177{
Johannes Berg5d63f922014-02-27 11:20:07 +0100178 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
179 struct iwl_rxq *rxq = &trans_pcie->rxq;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700180 u32 reg;
181
Johannes Berg5d63f922014-02-27 11:20:07 +0100182 lockdep_assert_held(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700183
Eliad Peller50453882014-02-05 19:12:24 +0200184 /*
185 * explicitly wake up the NIC if:
186 * 1. shadow registers aren't enabled
187 * 2. there is a chance that the NIC is asleep
188 */
189 if (!trans->cfg->base_params->shadow_reg_enable &&
190 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
191 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700192
Eliad Peller50453882014-02-05 19:12:24 +0200193 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
194 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
195 reg);
196 iwl_set_bit(trans, CSR_GP_CNTRL,
197 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Johannes Berg5d63f922014-02-27 11:20:07 +0100198 rxq->need_update = true;
199 return;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700200 }
201 }
Eliad Peller50453882014-02-05 19:12:24 +0200202
203 rxq->write_actual = round_down(rxq->write, 8);
204 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
Johannes Berg5d63f922014-02-27 11:20:07 +0100205}
206
207static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
208{
209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210 struct iwl_rxq *rxq = &trans_pcie->rxq;
211
212 spin_lock(&rxq->lock);
213
214 if (!rxq->need_update)
215 goto exit_unlock;
216
217 iwl_pcie_rxq_inc_wr_ptr(trans);
218 rxq->need_update = false;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700219
220 exit_unlock:
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200221 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700222}
223
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200224/*
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200225 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700226 *
227 * If there are slots in the RX queue that need to be restocked,
228 * and we have free pre-allocated buffers, fill the ranks as much
229 * as we can, pulling from rx_free.
230 *
231 * This moves the 'write' index forward to catch up with 'processed', and
232 * also updates the memory address in the firmware to reference the new
233 * target buffer.
234 */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200235static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700236{
Johannes Berg20d3b642012-05-16 22:54:29 +0200237 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200238 struct iwl_rxq *rxq = &trans_pcie->rxq;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700239 struct iwl_rx_mem_buffer *rxb;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700240
Emmanuel Grumbach74390462012-09-09 16:58:07 +0300241 /*
242 * If the device isn't enabled - not need to try to add buffers...
243 * This can happen when we stop the device and still have an interrupt
Johannes Berg2bfb5092012-12-27 21:43:48 +0100244 * pending. We stop the APM before we sync the interrupts because we
245 * have to (see comment there). On the other hand, since the APM is
246 * stopped, we cannot access the HW (in particular not prph).
Emmanuel Grumbach74390462012-09-09 16:58:07 +0300247 * So don't try to restock if the APM has been already stopped.
248 */
Arik Nemtsoveb7ff772013-12-01 12:30:38 +0200249 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
Emmanuel Grumbach74390462012-09-09 16:58:07 +0300250 return;
251
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200252 spin_lock(&rxq->lock);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200253 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700254 /* The overwritten rxb must be a used one */
255 rxb = rxq->queue[rxq->write];
256 BUG_ON(rxb && rxb->page);
257
258 /* Get next free Rx buffer, remove from free list */
Johannes Berge2b19302012-11-04 09:31:25 +0100259 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
260 list);
261 list_del(&rxb->list);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700262
263 /* Point to Rx buffer via next RBD in circular buffer */
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200264 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700265 rxq->queue[rxq->write] = rxb;
266 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
267 rxq->free_count--;
268 }
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200269 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700270
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700271 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */
273 if (rxq->write_actual != (rxq->write & ~0x7)) {
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200274 spin_lock(&rxq->lock);
Johannes Berg5d63f922014-02-27 11:20:07 +0100275 iwl_pcie_rxq_inc_wr_ptr(trans);
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200276 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700277 }
278}
279
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300280/*
Sara Sharon26d535a2015-04-28 12:56:54 +0300281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
285 gfp_t priority)
286{
287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288 struct iwl_rxq *rxq = &trans_pcie->rxq;
289 struct page *page;
290 gfp_t gfp_mask = priority;
291
292 if (rxq->free_count > RX_LOW_WATERMARK)
293 gfp_mask |= __GFP_NOWARN;
294
295 if (trans_pcie->rx_page_order > 0)
296 gfp_mask |= __GFP_COMP;
297
298 /* Alloc a new receive buffer */
299 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
300 if (!page) {
301 if (net_ratelimit())
302 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
303 trans_pcie->rx_page_order);
304 /* Issue an error if the hardware has consumed more than half
305 * of its free buffer list and we don't have enough
306 * pre-allocated buffers.
307` */
308 if (rxq->free_count <= RX_LOW_WATERMARK &&
309 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
310 net_ratelimit())
311 IWL_CRIT(trans,
312 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
313 rxq->free_count);
314 return NULL;
315 }
316 return page;
317}
318
319/*
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200320 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700321 *
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300322 * A used RBD is an Rx buffer that has been given to the stack. To use it again
323 * a page must be allocated and the RBD must point to the page. This function
324 * doesn't change the HW pointer but handles the list of pages that is used by
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200325 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300326 * allocated buffers.
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700327 */
Emmanuel Grumbach255ba062015-07-11 22:30:49 +0300328static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700329{
Johannes Berg20d3b642012-05-16 22:54:29 +0200330 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200331 struct iwl_rxq *rxq = &trans_pcie->rxq;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700332 struct iwl_rx_mem_buffer *rxb;
333 struct page *page;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700334
335 while (1) {
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200336 spin_lock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700337 if (list_empty(&rxq->rx_used)) {
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200338 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700339 return;
340 }
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200341 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700342
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700343 /* Alloc a new receive buffer */
Sara Sharon26d535a2015-04-28 12:56:54 +0300344 page = iwl_pcie_rx_alloc_page(trans, priority);
345 if (!page)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700346 return;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700347
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200348 spin_lock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700349
350 if (list_empty(&rxq->rx_used)) {
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200351 spin_unlock(&rxq->lock);
Johannes Bergb2cf4102012-04-09 17:46:51 -0700352 __free_pages(page, trans_pcie->rx_page_order);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700353 return;
354 }
Johannes Berge2b19302012-11-04 09:31:25 +0100355 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
356 list);
357 list_del(&rxb->list);
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200358 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700359
360 BUG_ON(rxb->page);
361 rxb->page = page;
362 /* Get physical address of the RB */
Johannes Berg20d3b642012-05-16 22:54:29 +0200363 rxb->page_dma =
364 dma_map_page(trans->dev, page, 0,
365 PAGE_SIZE << trans_pcie->rx_page_order,
366 DMA_FROM_DEVICE);
Johannes Berg7c3415822012-11-04 09:29:17 +0100367 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
368 rxb->page = NULL;
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200369 spin_lock(&rxq->lock);
Johannes Berg7c3415822012-11-04 09:29:17 +0100370 list_add(&rxb->list, &rxq->rx_used);
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200371 spin_unlock(&rxq->lock);
Johannes Berg7c3415822012-11-04 09:29:17 +0100372 __free_pages(page, trans_pcie->rx_page_order);
373 return;
374 }
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700375 /* dma address must be no more than 36 bits */
376 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
377 /* and also 256 byte aligned! */
378 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
379
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200380 spin_lock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700381
382 list_add_tail(&rxb->list, &rxq->rx_free);
383 rxq->free_count++;
384
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200385 spin_unlock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700386 }
387}
388
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200389static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
390{
391 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392 struct iwl_rxq *rxq = &trans_pcie->rxq;
393 int i;
394
Johannes Bergc7df1f42013-06-20 20:59:34 +0200395 lockdep_assert_held(&rxq->lock);
396
Sara Sharon26d535a2015-04-28 12:56:54 +0300397 for (i = 0; i < RX_QUEUE_SIZE; i++) {
Johannes Bergc7df1f42013-06-20 20:59:34 +0200398 if (!rxq->pool[i].page)
399 continue;
400 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
401 PAGE_SIZE << trans_pcie->rx_page_order,
402 DMA_FROM_DEVICE);
403 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
404 rxq->pool[i].page = NULL;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200405 }
406}
407
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300408/*
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200409 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300410 *
411 * When moving to rx_free an page is allocated for the slot.
412 *
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200413 * Also restock the Rx queue via iwl_pcie_rxq_restock.
Sara Sharon26d535a2015-04-28 12:56:54 +0300414 * This is called only during initialization
Emmanuel Grumbach358a46d2012-09-09 16:39:18 +0300415 */
Sara Sharon26d535a2015-04-28 12:56:54 +0300416static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700417{
Sara Sharon26d535a2015-04-28 12:56:54 +0300418 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700419
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200420 iwl_pcie_rxq_restock(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700421}
422
Sara Sharon26d535a2015-04-28 12:56:54 +0300423/*
424 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
425 *
426 * Allocates for each received request 8 pages
427 * Called as a scheduled work item.
428 */
429static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700430{
Sara Sharon26d535a2015-04-28 12:56:54 +0300431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
432 struct iwl_rb_allocator *rba = &trans_pcie->rba;
433 struct list_head local_empty;
434 int pending = atomic_xchg(&rba->req_pending, 0);
Sara Sharon5f175702015-04-28 12:56:54 +0300435
Sara Sharon26d535a2015-04-28 12:56:54 +0300436 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
437
438 /* If we were scheduled - there is at least one request */
439 spin_lock(&rba->lock);
440 /* swap out the rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 while (pending) {
445 int i;
446 struct list_head local_allocated;
447
448 INIT_LIST_HEAD(&local_allocated);
449
450 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
451 struct iwl_rx_mem_buffer *rxb;
452 struct page *page;
453
454 /* List should never be empty - each reused RBD is
455 * returned to the list, and initial pool covers any
456 * possible gap between the time the page is allocated
457 * to the time the RBD is added.
458 */
459 BUG_ON(list_empty(&local_empty));
460 /* Get the first rxb from the rbd list */
461 rxb = list_first_entry(&local_empty,
462 struct iwl_rx_mem_buffer, list);
463 BUG_ON(rxb->page);
464
465 /* Alloc a new receive buffer */
466 page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
467 if (!page)
468 continue;
469 rxb->page = page;
470
471 /* Get physical address of the RB */
472 rxb->page_dma = dma_map_page(trans->dev, page, 0,
473 PAGE_SIZE << trans_pcie->rx_page_order,
474 DMA_FROM_DEVICE);
475 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
476 rxb->page = NULL;
477 __free_pages(page, trans_pcie->rx_page_order);
478 continue;
479 }
480 /* dma address must be no more than 36 bits */
481 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
482 /* and also 256 byte aligned! */
483 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
484
485 /* move the allocated entry to the out list */
486 list_move(&rxb->list, &local_allocated);
487 i++;
488 }
489
490 pending--;
491 if (!pending) {
492 pending = atomic_xchg(&rba->req_pending, 0);
493 IWL_DEBUG_RX(trans,
494 "Pending allocation requests = %d\n",
495 pending);
496 }
497
498 spin_lock(&rba->lock);
499 /* add the allocated rbds to the allocator allocated list */
500 list_splice_tail(&local_allocated, &rba->rbd_allocated);
501 /* get more empty RBDs for current pending requests */
502 list_splice_tail_init(&rba->rbd_empty, &local_empty);
503 spin_unlock(&rba->lock);
504
505 atomic_inc(&rba->req_ready);
506 }
507
508 spin_lock(&rba->lock);
509 /* return unused rbds to the allocator empty list */
510 list_splice_tail(&local_empty, &rba->rbd_empty);
511 spin_unlock(&rba->lock);
512}
513
514/*
515 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
516.*
517.* Called by queue when the queue posted allocation request and
518 * has freed 8 RBDs in order to restock itself.
519 */
520static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
521 struct iwl_rx_mem_buffer
522 *out[RX_CLAIM_REQ_ALLOC])
523{
524 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525 struct iwl_rb_allocator *rba = &trans_pcie->rba;
526 int i;
527
528 /*
529 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
530 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
531 * function will return -ENOMEM, as there are no ready requests.
532 * atomic_dec_if_positive will perofrm the *actual* decrement only if
533 * req_ready > 0, i.e. - there are ready requests and the function
534 * hands one request to the caller.
535 */
536 if (atomic_dec_if_positive(&rba->req_ready) < 0)
537 return -ENOMEM;
538
539 spin_lock(&rba->lock);
540 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
541 /* Get next free Rx buffer, remove it from free list */
542 out[i] = list_first_entry(&rba->rbd_allocated,
543 struct iwl_rx_mem_buffer, list);
544 list_del(&out[i]->list);
545 }
546 spin_unlock(&rba->lock);
547
548 return 0;
549}
550
551static void iwl_pcie_rx_allocator_work(struct work_struct *data)
552{
553 struct iwl_rb_allocator *rba_p =
554 container_of(data, struct iwl_rb_allocator, rx_alloc);
555 struct iwl_trans_pcie *trans_pcie =
556 container_of(rba_p, struct iwl_trans_pcie, rba);
557
558 iwl_pcie_rx_allocator(trans_pcie->trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700559}
560
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200561static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
562{
563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
564 struct iwl_rxq *rxq = &trans_pcie->rxq;
Sara Sharon26d535a2015-04-28 12:56:54 +0300565 struct iwl_rb_allocator *rba = &trans_pcie->rba;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200566 struct device *dev = trans->dev;
567
568 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
569
570 spin_lock_init(&rxq->lock);
Sara Sharon26d535a2015-04-28 12:56:54 +0300571 spin_lock_init(&rba->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200572
573 if (WARN_ON(rxq->bd || rxq->rb_stts))
574 return -EINVAL;
575
576 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
577 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
578 &rxq->bd_dma, GFP_KERNEL);
579 if (!rxq->bd)
580 goto err_bd;
581
582 /*Allocate the driver's pointer to receive buffer status */
583 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
584 &rxq->rb_stts_dma, GFP_KERNEL);
585 if (!rxq->rb_stts)
586 goto err_rb_stts;
587
588 return 0;
589
590err_rb_stts:
591 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
592 rxq->bd, rxq->bd_dma);
Johannes Bergd21fa2d2013-01-08 00:25:21 +0100593 rxq->bd_dma = 0;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200594 rxq->bd = NULL;
595err_bd:
596 return -ENOMEM;
597}
598
599static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
600{
601 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
602 u32 rb_size;
603 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
604
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200605 switch (trans_pcie->rx_buf_size) {
606 case IWL_AMSDU_4K:
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200607 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200608 break;
609 case IWL_AMSDU_8K:
610 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
611 break;
612 case IWL_AMSDU_12K:
613 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
614 break;
615 default:
616 WARN_ON(1);
617 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
618 }
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200619
620 /* Stop Rx DMA */
621 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
Johannes Bergddaf5a52013-01-08 11:25:44 +0100622 /* reset and flush pointers */
623 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
624 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
625 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200626
627 /* Reset driver's Rx queue write index */
628 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
629
630 /* Tell device where to find RBD circular buffer in DRAM */
631 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
632 (u32)(rxq->bd_dma >> 8));
633
634 /* Tell device where in DRAM to update its Rx status */
635 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
636 rxq->rb_stts_dma >> 4);
637
638 /* Enable Rx DMA
639 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
640 * the credit mechanism in 5000 HW RX FIFO
641 * Direct rx interrupts to hosts
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200642 * Rx buffer size 4 or 8k or 12k
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200643 * RB timeout 0x10
644 * 256 RBDs
645 */
646 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
647 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
648 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
649 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
650 rb_size|
Emmanuel Grumbach49bd072d2012-11-18 13:14:51 +0200651 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200652 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
653
654 /* Set interrupt coalescing timer to default (2048 usecs) */
655 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
Emmanuel Grumbach6960a052013-11-11 15:23:01 +0200656
657 /* W/A for interrupt coalescing bug in 7260 and 3160 */
658 if (trans->cfg->host_interrupt_operation_mode)
659 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200660}
661
Johannes Bergc7df1f42013-06-20 20:59:34 +0200662static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
663{
664 int i;
665
666 lockdep_assert_held(&rxq->lock);
667
668 INIT_LIST_HEAD(&rxq->rx_free);
669 INIT_LIST_HEAD(&rxq->rx_used);
670 rxq->free_count = 0;
Sara Sharon26d535a2015-04-28 12:56:54 +0300671 rxq->used_count = 0;
Johannes Bergc7df1f42013-06-20 20:59:34 +0200672
Sara Sharon26d535a2015-04-28 12:56:54 +0300673 for (i = 0; i < RX_QUEUE_SIZE; i++)
Johannes Bergc7df1f42013-06-20 20:59:34 +0200674 list_add(&rxq->pool[i].list, &rxq->rx_used);
675}
676
Sara Sharon26d535a2015-04-28 12:56:54 +0300677static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
678{
679 int i;
680
681 lockdep_assert_held(&rba->lock);
682
683 INIT_LIST_HEAD(&rba->rbd_allocated);
684 INIT_LIST_HEAD(&rba->rbd_empty);
685
686 for (i = 0; i < RX_POOL_SIZE; i++)
687 list_add(&rba->pool[i].list, &rba->rbd_empty);
688}
689
690static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
691{
692 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693 struct iwl_rb_allocator *rba = &trans_pcie->rba;
694 int i;
695
696 lockdep_assert_held(&rba->lock);
697
698 for (i = 0; i < RX_POOL_SIZE; i++) {
699 if (!rba->pool[i].page)
700 continue;
701 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
702 PAGE_SIZE << trans_pcie->rx_page_order,
703 DMA_FROM_DEVICE);
704 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
705 rba->pool[i].page = NULL;
706 }
707}
708
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200709int iwl_pcie_rx_init(struct iwl_trans *trans)
710{
711 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
712 struct iwl_rxq *rxq = &trans_pcie->rxq;
Sara Sharon26d535a2015-04-28 12:56:54 +0300713 struct iwl_rb_allocator *rba = &trans_pcie->rba;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200714 int i, err;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200715
716 if (!rxq->bd) {
717 err = iwl_pcie_rx_alloc(trans);
718 if (err)
719 return err;
720 }
Sara Sharon26d535a2015-04-28 12:56:54 +0300721 if (!rba->alloc_wq)
722 rba->alloc_wq = alloc_workqueue("rb_allocator",
723 WQ_HIGHPRI | WQ_UNBOUND, 1);
724 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
725
726 spin_lock(&rba->lock);
727 atomic_set(&rba->req_pending, 0);
728 atomic_set(&rba->req_ready, 0);
729 /* free all first - we might be reconfigured for a different size */
730 iwl_pcie_rx_free_rba(trans);
731 iwl_pcie_rx_init_rba(rba);
732 spin_unlock(&rba->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200733
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200734 spin_lock(&rxq->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200735
Johannes Bergc7df1f42013-06-20 20:59:34 +0200736 /* free all first - we might be reconfigured for a different size */
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200737 iwl_pcie_rxq_free_rbs(trans);
Johannes Bergc7df1f42013-06-20 20:59:34 +0200738 iwl_pcie_rx_init_rxb_lists(rxq);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200739
740 for (i = 0; i < RX_QUEUE_SIZE; i++)
741 rxq->queue[i] = NULL;
742
743 /* Set us so that we have processed and used all buffers, but have
744 * not restocked the Rx queue with fresh buffers */
745 rxq->read = rxq->write = 0;
746 rxq->write_actual = 0;
Johannes Bergddaf5a52013-01-08 11:25:44 +0100747 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200748 spin_unlock(&rxq->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200749
Sara Sharon26d535a2015-04-28 12:56:54 +0300750 iwl_pcie_rx_replenish(trans);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200751
752 iwl_pcie_rx_hw_init(trans, rxq);
753
Johannes Berg5d63f922014-02-27 11:20:07 +0100754 spin_lock(&rxq->lock);
755 iwl_pcie_rxq_inc_wr_ptr(trans);
756 spin_unlock(&rxq->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200757
758 return 0;
759}
760
761void iwl_pcie_rx_free(struct iwl_trans *trans)
762{
763 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
764 struct iwl_rxq *rxq = &trans_pcie->rxq;
Sara Sharon26d535a2015-04-28 12:56:54 +0300765 struct iwl_rb_allocator *rba = &trans_pcie->rba;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200766
767 /*if rxq->bd is NULL, it means that nothing has been allocated,
768 * exit now */
769 if (!rxq->bd) {
770 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
771 return;
772 }
773
Sara Sharon26d535a2015-04-28 12:56:54 +0300774 cancel_work_sync(&rba->rx_alloc);
775 if (rba->alloc_wq) {
776 destroy_workqueue(rba->alloc_wq);
777 rba->alloc_wq = NULL;
778 }
779
780 spin_lock(&rba->lock);
781 iwl_pcie_rx_free_rba(trans);
782 spin_unlock(&rba->lock);
Johannes Berg0aa86df2012-12-27 22:58:21 +0100783
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200784 spin_lock(&rxq->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200785 iwl_pcie_rxq_free_rbs(trans);
Emmanuel Grumbach51232f72013-12-11 10:22:28 +0200786 spin_unlock(&rxq->lock);
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200787
788 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
789 rxq->bd, rxq->bd_dma);
Johannes Bergd21fa2d2013-01-08 00:25:21 +0100790 rxq->bd_dma = 0;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200791 rxq->bd = NULL;
792
793 if (rxq->rb_stts)
794 dma_free_coherent(trans->dev,
795 sizeof(struct iwl_rb_status),
796 rxq->rb_stts, rxq->rb_stts_dma);
797 else
798 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
Johannes Bergd21fa2d2013-01-08 00:25:21 +0100799 rxq->rb_stts_dma = 0;
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200800 rxq->rb_stts = NULL;
801}
802
Sara Sharon26d535a2015-04-28 12:56:54 +0300803/*
804 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
805 *
806 * Called when a RBD can be reused. The RBD is transferred to the allocator.
807 * When there are 2 empty RBDs - a request for allocation is posted
808 */
809static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb,
811 struct iwl_rxq *rxq, bool emergency)
812{
813 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
814 struct iwl_rb_allocator *rba = &trans_pcie->rba;
815
816 /* Move the RBD to the used list, will be moved to allocator in batches
817 * before claiming or posting a request*/
818 list_add_tail(&rxb->list, &rxq->rx_used);
819
820 if (unlikely(emergency))
821 return;
822
823 /* Count the allocator owned RBDs */
824 rxq->used_count++;
825
826 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
827 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
828 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
829 * after but we still need to post another request.
830 */
831 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
832 /* Move the 2 RBDs to the allocator ownership.
833 Allocator has another 6 from pool for the request completion*/
834 spin_lock(&rba->lock);
835 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
836 spin_unlock(&rba->lock);
837
838 atomic_inc(&rba->req_pending);
839 queue_work(rba->alloc_wq, &rba->rx_alloc);
840 }
841}
842
Emmanuel Grumbach9805c4462012-11-14 14:44:18 +0200843static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
Sara Sharon26d535a2015-04-28 12:56:54 +0300844 struct iwl_rx_mem_buffer *rxb,
845 bool emergency)
Johannes Bergdf2f3212012-03-05 11:24:40 -0800846{
847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200848 struct iwl_rxq *rxq = &trans_pcie->rxq;
849 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
Johannes Berg0c197442012-03-15 13:26:43 -0700850 bool page_stolen = false;
Johannes Bergb2cf4102012-04-09 17:46:51 -0700851 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
Johannes Berg0c197442012-03-15 13:26:43 -0700852 u32 offset = 0;
Johannes Bergdf2f3212012-03-05 11:24:40 -0800853
854 if (WARN_ON(!rxb))
855 return;
856
Johannes Berg0c197442012-03-15 13:26:43 -0700857 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800858
Johannes Berg0c197442012-03-15 13:26:43 -0700859 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
860 struct iwl_rx_packet *pkt;
Johannes Berg0c197442012-03-15 13:26:43 -0700861 u16 sequence;
862 bool reclaim;
Johannes Bergf7e64692015-06-23 21:58:17 +0200863 int index, cmd_index, len;
Johannes Berg0c197442012-03-15 13:26:43 -0700864 struct iwl_rx_cmd_buffer rxcb = {
865 ._offset = offset,
Emmanuel Grumbachd13f1862013-01-23 10:59:29 +0200866 ._rx_page_order = trans_pcie->rx_page_order,
Johannes Berg0c197442012-03-15 13:26:43 -0700867 ._page = rxb->page,
868 ._page_stolen = false,
David S. Miller0d6c4a22012-05-07 23:35:40 -0400869 .truesize = max_len,
Johannes Berg0c197442012-03-15 13:26:43 -0700870 };
Johannes Bergdf2f3212012-03-05 11:24:40 -0800871
Johannes Berg0c197442012-03-15 13:26:43 -0700872 pkt = rxb_addr(&rxcb);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800873
Johannes Berg0c197442012-03-15 13:26:43 -0700874 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
875 break;
Johannes Bergdf2f3212012-03-05 11:24:40 -0800876
Liad Kaufman9243efc2015-03-15 17:38:22 +0200877 IWL_DEBUG_RX(trans,
878 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
879 rxcb._offset,
880 get_cmd_string(trans_pcie, pkt->hdr.cmd),
881 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
Johannes Bergdf2f3212012-03-05 11:24:40 -0800882
Johannes Berg65b30342014-01-08 13:16:33 +0100883 len = iwl_rx_packet_len(pkt);
Johannes Berg0c197442012-03-15 13:26:43 -0700884 len += sizeof(u32); /* account for status word */
Johannes Bergf042c2e2012-09-05 22:34:44 +0200885 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
886 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
Johannes Bergd663ee72012-03-10 13:00:07 -0800887
Johannes Berg0c197442012-03-15 13:26:43 -0700888 /* Reclaim a command buffer only if this packet is a response
889 * to a (driver-originated) command.
890 * If the packet (e.g. Rx frame) originated from uCode,
891 * there is no command buffer to reclaim.
892 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
893 * but apparently a few don't get set; catch them here. */
894 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
895 if (reclaim) {
896 int i;
897
898 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
899 if (trans_pcie->no_reclaim_cmds[i] ==
900 pkt->hdr.cmd) {
901 reclaim = false;
902 break;
903 }
Johannes Bergd663ee72012-03-10 13:00:07 -0800904 }
905 }
Johannes Bergdf2f3212012-03-05 11:24:40 -0800906
Johannes Berg0c197442012-03-15 13:26:43 -0700907 sequence = le16_to_cpu(pkt->hdr.sequence);
908 index = SEQ_TO_INDEX(sequence);
909 cmd_index = get_cmd_index(&txq->q, index);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800910
Johannes Berg1be5d8c2015-06-11 16:51:24 +0200911 iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
Johannes Berg0c197442012-03-15 13:26:43 -0700912
Emmanuel Grumbach96791422012-07-24 01:58:32 +0300913 if (reclaim) {
Johannes Berg5d4185a2014-09-09 21:16:06 +0200914 kzfree(txq->entries[cmd_index].free_buf);
Johannes Bergf4feb8a2012-10-19 14:24:43 +0200915 txq->entries[cmd_index].free_buf = NULL;
Emmanuel Grumbach96791422012-07-24 01:58:32 +0300916 }
917
Johannes Berg0c197442012-03-15 13:26:43 -0700918 /*
919 * After here, we should always check rxcb._page_stolen,
920 * if it is true then one of the handlers took the page.
921 */
922
923 if (reclaim) {
924 /* Invoke any callbacks, transfer the buffer to caller,
925 * and fire off the (possibly) blocking
926 * iwl_trans_send_cmd()
927 * as we reclaim the driver command queue */
928 if (!rxcb._page_stolen)
Johannes Bergf7e64692015-06-23 21:58:17 +0200929 iwl_pcie_hcmd_complete(trans, &rxcb);
Johannes Berg0c197442012-03-15 13:26:43 -0700930 else
931 IWL_WARN(trans, "Claim null rxb?\n");
932 }
933
934 page_stolen |= rxcb._page_stolen;
935 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800936 }
937
Johannes Berg0c197442012-03-15 13:26:43 -0700938 /* page was stolen from us -- free our reference */
939 if (page_stolen) {
Johannes Bergb2cf4102012-04-09 17:46:51 -0700940 __free_pages(rxb->page, trans_pcie->rx_page_order);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800941 rxb->page = NULL;
Johannes Berg0c197442012-03-15 13:26:43 -0700942 }
Johannes Bergdf2f3212012-03-05 11:24:40 -0800943
944 /* Reuse the page if possible. For notification packets and
945 * SKBs that fail to Rx correctly, add them back into the
946 * rx_free list for reuse later. */
Johannes Bergdf2f3212012-03-05 11:24:40 -0800947 if (rxb->page != NULL) {
948 rxb->page_dma =
949 dma_map_page(trans->dev, rxb->page, 0,
Johannes Berg20d3b642012-05-16 22:54:29 +0200950 PAGE_SIZE << trans_pcie->rx_page_order,
951 DMA_FROM_DEVICE);
Johannes Berg7c3415822012-11-04 09:29:17 +0100952 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
953 /*
954 * free the page(s) as well to not break
955 * the invariant that the items on the used
956 * list have no page(s)
957 */
958 __free_pages(rxb->page, trans_pcie->rx_page_order);
959 rxb->page = NULL;
Sara Sharon26d535a2015-04-28 12:56:54 +0300960 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
Johannes Berg7c3415822012-11-04 09:29:17 +0100961 } else {
962 list_add_tail(&rxb->list, &rxq->rx_free);
963 rxq->free_count++;
964 }
Johannes Bergdf2f3212012-03-05 11:24:40 -0800965 } else
Sara Sharon26d535a2015-04-28 12:56:54 +0300966 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
Johannes Bergdf2f3212012-03-05 11:24:40 -0800967}
968
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200969/*
970 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700971 */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200972static void iwl_pcie_rx_handle(struct iwl_trans *trans)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700973{
Johannes Bergdf2f3212012-03-05 11:24:40 -0800974 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200975 struct iwl_rxq *rxq = &trans_pcie->rxq;
Sara Sharon26d535a2015-04-28 12:56:54 +0300976 u32 r, i, j, count = 0;
977 bool emergency = false;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700978
Johannes Bergf14d6b32014-03-21 13:30:03 +0100979restart:
980 spin_lock(&rxq->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700981 /* uCode's read index (stored in shared DRAM) indicates the last Rx
982 * buffer that the driver may process (last buffer filled by ucode). */
Emmanuel Grumbach52e2a992012-11-25 14:42:25 +0200983 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700984 i = rxq->read;
985
986 /* Rx interrupt, but nothing sent from uCode */
987 if (i == r)
Emmanuel Grumbach726f23f2012-05-16 22:40:49 +0200988 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700989
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700990 while (i != r) {
Johannes Berg48a2d662012-03-05 11:24:39 -0800991 struct iwl_rx_mem_buffer *rxb;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700992
Sara Sharon26d535a2015-04-28 12:56:54 +0300993 if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
994 emergency = true;
995
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700996 rxb = rxq->queue[i];
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700997 rxq->queue[i] = NULL;
998
Johannes Bergf02d2cc2015-11-06 11:27:23 +0100999 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
Sara Sharon26d535a2015-04-28 12:56:54 +03001000 iwl_pcie_rx_handle_rb(trans, rxb, emergency);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001001
1002 i = (i + 1) & RX_QUEUE_MASK;
Sara Sharon26d535a2015-04-28 12:56:54 +03001003
1004 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1005 * try to claim the pre-allocated buffers from the allocator */
1006 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
1007 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1008 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
1009
1010 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
1011 !emergency) {
1012 /* Add the remaining 6 empty RBDs
1013 * for allocator use
1014 */
1015 spin_lock(&rba->lock);
1016 list_splice_tail_init(&rxq->rx_used,
1017 &rba->rbd_empty);
1018 spin_unlock(&rba->lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001019 }
Sara Sharon26d535a2015-04-28 12:56:54 +03001020
1021 /* If not ready - continue, will try to reclaim later.
1022 * No need to reschedule work - allocator exits only on
1023 * success */
1024 if (!iwl_pcie_rx_allocator_get(trans, out)) {
1025 /* If success - then RX_CLAIM_REQ_ALLOC
1026 * buffers were retrieved and should be added
1027 * to free list */
1028 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
1029 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
1030 list_add_tail(&out[j]->list,
1031 &rxq->rx_free);
1032 rxq->free_count++;
1033 }
1034 }
1035 }
1036 if (emergency) {
1037 count++;
1038 if (count == 8) {
1039 count = 0;
1040 if (rxq->used_count < RX_QUEUE_SIZE / 3)
1041 emergency = false;
1042 spin_unlock(&rxq->lock);
1043 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
1044 spin_lock(&rxq->lock);
1045 }
1046 }
1047 /* handle restock for three cases, can be all of them at once:
1048 * - we just pulled buffers from the allocator
1049 * - we have 8+ unstolen pages accumulated
1050 * - we are in emergency and allocated buffers
1051 */
1052 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1053 rxq->read = i;
1054 spin_unlock(&rxq->lock);
1055 iwl_pcie_rxq_restock(trans);
1056 goto restart;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001057 }
1058 }
1059
1060 /* Backtrack one entry */
1061 rxq->read = i;
Johannes Bergf14d6b32014-03-21 13:30:03 +01001062 spin_unlock(&rxq->lock);
1063
Sara Sharon26d535a2015-04-28 12:56:54 +03001064 /*
1065 * handle a case where in emergency there are some unallocated RBDs.
1066 * those RBDs are in the used list, but are not tracked by the queue's
1067 * used_count which counts allocator owned RBDs.
1068 * unallocated emergency RBDs must be allocated on exit, otherwise
1069 * when called again the function may not be in emergency mode and
1070 * they will be handed to the allocator with no tracking in the RBD
1071 * allocator counters, which will lead to them never being claimed back
1072 * by the queue.
1073 * by allocating them here, they are now in the queue free list, and
1074 * will be restocked by the next call of iwl_pcie_rxq_restock.
1075 */
1076 if (unlikely(emergency && count))
1077 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
Emmanuel Grumbach255ba062015-07-11 22:30:49 +03001078
Johannes Bergf14d6b32014-03-21 13:30:03 +01001079 if (trans_pcie->napi.poll)
1080 napi_gro_flush(&trans_pcie->napi, false);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001081}
1082
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001083/*
1084 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001085 */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001086static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001087{
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001088 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach11033232015-06-24 14:58:13 +03001089 int i;
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001090
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001091 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
Emmanuel Grumbach035f7ff2012-03-26 08:57:01 -07001092 if (trans->cfg->internal_wimax_coex &&
Avri Altman95411d02015-05-11 11:04:34 +03001093 !trans->cfg->apmg_not_supported &&
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001094 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
Johannes Berg20d3b642012-05-16 22:54:29 +02001095 APMS_CLK_VAL_MRB_FUNC_MODE) ||
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001096 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
Johannes Berg20d3b642012-05-16 22:54:29 +02001097 APMG_PS_CTRL_VAL_RESET_REQ))) {
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001098 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
Don Fry8a8bbdb2012-03-20 10:33:34 -07001099 iwl_op_mode_wimax_active(trans->op_mode);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001100 wake_up(&trans_pcie->wait_command_queue);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001101 return;
1102 }
1103
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001104 iwl_pcie_dump_csr(trans);
Inbal Hacohen313b0a22013-06-24 10:35:53 +03001105 iwl_dump_fh(trans, NULL);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001106
Arik Nemtsov2a988e92013-12-01 13:50:40 +02001107 local_bh_disable();
1108 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1109 * before we wake up the command caller, to ensure a proper cleanup. */
1110 iwl_trans_fw_error(trans);
1111 local_bh_enable();
1112
Emmanuel Grumbach11033232015-06-24 14:58:13 +03001113 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1114 del_timer(&trans_pcie->txq[i].stuck_timer);
1115
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001116 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001117 wake_up(&trans_pcie->wait_command_queue);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001118}
1119
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001120static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001121{
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001122 u32 inta;
1123
Emmanuel Grumbach46e81af2014-01-14 10:33:54 +02001124 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001125
1126 trace_iwlwifi_dev_irq(trans->dev);
1127
1128 /* Discover which interrupts are active/pending */
1129 inta = iwl_read32(trans, CSR_INT);
1130
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001131 /* the thread will service interrupts and re-enable them */
Emmanuel Grumbachfe523dc2013-12-11 09:24:39 +02001132 return inta;
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001133}
1134
1135/* a device (PCI-E) page is 4096 bytes long */
1136#define ICT_SHIFT 12
1137#define ICT_SIZE (1 << ICT_SHIFT)
1138#define ICT_COUNT (ICT_SIZE / sizeof(u32))
1139
1140/* interrupt handler using ict table, with this interrupt driver will
1141 * stop using INTA register to get device's interrupt, reading this register
1142 * is expensive, device will write interrupts in ICT dram table, increment
1143 * index then will fire interrupt to driver, driver will OR all ICT table
1144 * entries from current index up to table entry with 0 value. the result is
1145 * the interrupt we need to service, driver will set the entries back to 0 and
1146 * set index.
1147 */
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001148static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001149{
1150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001151 u32 inta;
1152 u32 val = 0;
1153 u32 read;
1154
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001155 trace_iwlwifi_dev_irq(trans->dev);
1156
1157 /* Ignore interrupt if there's nothing in NIC to service.
1158 * This may be due to IRQ shared with another device,
1159 * or due to sporadic interrupts thrown from our NIC. */
1160 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1161 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
Emmanuel Grumbach7ba1faa2013-12-11 09:39:30 +02001162 if (!read)
1163 return 0;
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001164
1165 /*
1166 * Collect all entries up to the first 0, starting from ict_index;
1167 * note we already read at ict_index.
1168 */
1169 do {
1170 val |= read;
1171 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1172 trans_pcie->ict_index, read);
1173 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1174 trans_pcie->ict_index =
Johannes Berg83f32a42014-04-24 09:57:40 +02001175 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001176
1177 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1178 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1179 read);
1180 } while (read);
1181
1182 /* We should not get this value, just ignore it. */
1183 if (val == 0xffffffff)
1184 val = 0;
1185
1186 /*
1187 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1188 * (bit 15 before shifting it to 31) to clear when using interrupt
1189 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1190 * so we use them to decide on the real state of the Rx bit.
1191 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1192 */
1193 if (val & 0xC0000)
1194 val |= 0x8000;
1195
1196 inta = (0xff & val) | ((0xff00 & val) << 16);
Emmanuel Grumbachfe523dc2013-12-11 09:24:39 +02001197 return inta;
Emmanuel Grumbachfc844722013-12-09 14:27:44 +02001198}
1199
Johannes Berg2bfb5092012-12-27 21:43:48 +01001200irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001201{
Johannes Berg2bfb5092012-12-27 21:43:48 +01001202 struct iwl_trans *trans = dev_id;
Johannes Berg20d3b642012-05-16 22:54:29 +02001203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1204 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001205 u32 inta = 0;
1206 u32 handled = 0;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001207
Johannes Berg2bfb5092012-12-27 21:43:48 +01001208 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1209
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001210 spin_lock(&trans_pcie->irq_lock);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001211
Emmanuel Grumbach0fec9542013-12-11 09:02:25 +02001212 /* dram interrupt table not set yet,
1213 * use legacy interrupt.
1214 */
1215 if (likely(trans_pcie->use_ict))
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001216 inta = iwl_pcie_int_cause_ict(trans);
Emmanuel Grumbach0fec9542013-12-11 09:02:25 +02001217 else
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001218 inta = iwl_pcie_int_cause_non_ict(trans);
Emmanuel Grumbach0fec9542013-12-11 09:02:25 +02001219
Emmanuel Grumbach7ba1faa2013-12-11 09:39:30 +02001220 if (iwl_have_debug_level(IWL_DL_ISR)) {
1221 IWL_DEBUG_ISR(trans,
1222 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1223 inta, trans_pcie->inta_mask,
1224 iwl_read32(trans, CSR_INT_MASK),
1225 iwl_read32(trans, CSR_FH_INT_STATUS));
1226 if (inta & (~trans_pcie->inta_mask))
1227 IWL_DEBUG_ISR(trans,
1228 "We got a masked interrupt (0x%08x)\n",
1229 inta & (~trans_pcie->inta_mask));
1230 }
1231
1232 inta &= trans_pcie->inta_mask;
1233
1234 /*
1235 * Ignore interrupt if there's nothing in NIC to service.
1236 * This may be due to IRQ shared with another device,
1237 * or due to sporadic interrupts thrown from our NIC.
1238 */
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001239 if (unlikely(!inta)) {
Emmanuel Grumbach7ba1faa2013-12-11 09:39:30 +02001240 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1241 /*
1242 * Re-enable interrupts here since we don't
1243 * have anything to service
1244 */
1245 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1246 iwl_enable_interrupts(trans);
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001247 spin_unlock(&trans_pcie->irq_lock);
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001248 lock_map_release(&trans->sync_cmd_lockdep_map);
1249 return IRQ_NONE;
1250 }
1251
Emmanuel Grumbach7ba1faa2013-12-11 09:39:30 +02001252 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1253 /*
1254 * Hardware disappeared. It might have
1255 * already raised an interrupt.
1256 */
1257 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001258 spin_unlock(&trans_pcie->irq_lock);
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001259 goto out;
Emmanuel Grumbacha0f337c2013-12-11 09:00:03 +02001260 }
1261
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001262 /* Ack/clear/reset pending uCode interrupts.
1263 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1264 */
1265 /* There is a hardware bug in the interrupt mask function that some
1266 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1267 * they are disabled in the CSR_INT_MASK register. Furthermore the
1268 * ICT interrupt handling mechanism has another bug that might cause
1269 * these unmasked interrupts fail to be detected. We workaround the
1270 * hardware bugs here by ACKing all the possible interrupts so that
1271 * interrupt coalescing can still be achieved.
1272 */
Emmanuel Grumbach7117c002013-12-11 09:20:34 +02001273 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001274
Johannes Berg51cd53a2013-06-12 09:56:51 +02001275 if (iwl_have_debug_level(IWL_DL_ISR))
Johannes Berg0ca24da2012-03-15 13:26:46 -07001276 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
Johannes Berg51cd53a2013-06-12 09:56:51 +02001277 inta, iwl_read32(trans, CSR_INT_MASK));
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001278
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001279 spin_unlock(&trans_pcie->irq_lock);
Johannes Bergb49ba042012-01-19 08:20:57 -08001280
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001281 /* Now service all interrupt bits discovered above. */
1282 if (inta & CSR_INT_BIT_HW_ERR) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001283 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001284
1285 /* Tell the device to stop sending interrupts */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001286 iwl_disable_interrupts(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001287
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001288 isr_stats->hw++;
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001289 iwl_pcie_irq_handle_error(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001290
1291 handled |= CSR_INT_BIT_HW_ERR;
1292
Johannes Berg2bfb5092012-12-27 21:43:48 +01001293 goto out;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001294 }
1295
Johannes Berga8bceb32012-03-05 11:24:30 -08001296 if (iwl_have_debug_level(IWL_DL_ISR)) {
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001297 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1298 if (inta & CSR_INT_BIT_SCD) {
Johannes Berg51cd53a2013-06-12 09:56:51 +02001299 IWL_DEBUG_ISR(trans,
1300 "Scheduler finished to transmit the frame/frames.\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001301 isr_stats->sch++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001302 }
1303
1304 /* Alive notification via Rx interrupt will do the real work */
1305 if (inta & CSR_INT_BIT_ALIVE) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001306 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001307 isr_stats->alive++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001308 }
1309 }
Johannes Berg51cd53a2013-06-12 09:56:51 +02001310
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001311 /* Safely ignore these bits for debug checks below */
1312 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1313
1314 /* HW RF KILL switch toggled */
1315 if (inta & CSR_INT_BIT_RF_KILL) {
Johannes Bergc9eec952012-03-06 13:30:43 -08001316 bool hw_rfkill;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001317
Emmanuel Grumbach8d425512012-03-28 11:00:58 +02001318 hw_rfkill = iwl_is_rfkill_set(trans);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001319 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
Johannes Berg20d3b642012-05-16 22:54:29 +02001320 hw_rfkill ? "disable radio" : "enable radio");
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001321
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001322 isr_stats->rfkill++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001323
Emmanuel Grumbachfa9f3282015-06-11 20:45:49 +03001324 mutex_lock(&trans_pcie->mutex);
Johannes Berg14cfca72014-02-25 20:50:53 +01001325 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
Emmanuel Grumbachfa9f3282015-06-11 20:45:49 +03001326 mutex_unlock(&trans_pcie->mutex);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001327 if (hw_rfkill) {
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001328 set_bit(STATUS_RFKILL, &trans->status);
1329 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1330 &trans->status))
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001331 IWL_DEBUG_RF_KILL(trans,
1332 "Rfkill while SYNC HCMD in flight\n");
1333 wake_up(&trans_pcie->wait_command_queue);
1334 } else {
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001335 clear_bit(STATUS_RFKILL, &trans->status);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001336 }
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001337
1338 handled |= CSR_INT_BIT_RF_KILL;
1339 }
1340
1341 /* Chip got too hot and stopped itself */
1342 if (inta & CSR_INT_BIT_CT_KILL) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001343 IWL_ERR(trans, "Microcode CT kill error detected.\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001344 isr_stats->ctkill++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001345 handled |= CSR_INT_BIT_CT_KILL;
1346 }
1347
1348 /* Error detected by uCode */
1349 if (inta & CSR_INT_BIT_SW_ERR) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001350 IWL_ERR(trans, "Microcode SW error detected. "
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001351 " Restarting 0x%X.\n", inta);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001352 isr_stats->sw++;
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001353 iwl_pcie_irq_handle_error(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001354 handled |= CSR_INT_BIT_SW_ERR;
1355 }
1356
1357 /* uCode wakes up after power-down sleep */
1358 if (inta & CSR_INT_BIT_WAKEUP) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001359 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
Johannes Berg5d63f922014-02-27 11:20:07 +01001360 iwl_pcie_rxq_check_wrptr(trans);
Johannes Bergea68f462014-02-27 14:36:55 +01001361 iwl_pcie_txq_check_wrptrs(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001362
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001363 isr_stats->wakeup++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001364
1365 handled |= CSR_INT_BIT_WAKEUP;
1366 }
1367
1368 /* All uCode command responses, including Tx command responses,
1369 * Rx "responses" (frame-received notification), and other
1370 * notifications from uCode come through here*/
1371 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
Johannes Berg20d3b642012-05-16 22:54:29 +02001372 CSR_INT_BIT_RX_PERIODIC)) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001373 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001374 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1375 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001376 iwl_write32(trans, CSR_FH_INT_STATUS,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001377 CSR_FH_INT_RX_MASK);
1378 }
1379 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1380 handled |= CSR_INT_BIT_RX_PERIODIC;
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001381 iwl_write32(trans,
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001382 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001383 }
1384 /* Sending RX interrupt require many steps to be done in the
1385 * the device:
1386 * 1- write interrupt to current index in ICT table.
1387 * 2- dma RX frame.
1388 * 3- update RX shared data to indicate last write index.
1389 * 4- send interrupt.
1390 * This could lead to RX race, driver could receive RX interrupt
1391 * but the shared data changes does not reflect this;
1392 * periodic interrupt will detect any dangling Rx activity.
1393 */
1394
1395 /* Disable periodic interrupt; we use it as just a one-shot. */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001396 iwl_write8(trans, CSR_INT_PERIODIC_REG,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001397 CSR_INT_PERIODIC_DIS);
Johannes Berg63791032012-09-06 15:33:42 +02001398
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001399 /*
1400 * Enable periodic interrupt in 8 msec only if we received
1401 * real RX interrupt (instead of just periodic int), to catch
1402 * any dangling Rx interrupt. If it was just the periodic
1403 * interrupt, there was no dangling Rx activity, and no need
1404 * to extend the periodic interrupt; one-shot is enough.
1405 */
1406 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001407 iwl_write8(trans, CSR_INT_PERIODIC_REG,
Johannes Berg20d3b642012-05-16 22:54:29 +02001408 CSR_INT_PERIODIC_ENA);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001409
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001410 isr_stats->rx++;
Johannes Bergf14d6b32014-03-21 13:30:03 +01001411
1412 local_bh_disable();
1413 iwl_pcie_rx_handle(trans);
1414 local_bh_enable();
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001415 }
1416
1417 /* This "Tx" DMA channel is used only for loading uCode */
1418 if (inta & CSR_INT_BIT_FH_TX) {
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001419 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001420 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001421 isr_stats->tx++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001422 handled |= CSR_INT_BIT_FH_TX;
1423 /* Wake up uCode load routine, now that load is complete */
Johannes Berg13df1aa2012-03-06 13:31:00 -08001424 trans_pcie->ucode_write_complete = true;
1425 wake_up(&trans_pcie->ucode_write_waitq);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001426 }
1427
1428 if (inta & ~handled) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001429 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001430 isr_stats->unhandled++;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001431 }
1432
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001433 if (inta & ~(trans_pcie->inta_mask)) {
1434 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1435 inta & ~trans_pcie->inta_mask);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001436 }
1437
1438 /* Re-enable all interrupts */
1439 /* only Re-enable if disabled by irq */
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001440 if (test_bit(STATUS_INT_ENABLED, &trans->status))
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001441 iwl_enable_interrupts(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001442 /* Re-enable RF_KILL if it occurred */
Stanislaw Gruszka8722c892012-03-07 09:52:28 -08001443 else if (handled & CSR_INT_BIT_RF_KILL)
1444 iwl_enable_rfkill_int(trans);
Johannes Berg2bfb5092012-12-27 21:43:48 +01001445
1446out:
1447 lock_map_release(&trans->sync_cmd_lockdep_map);
1448 return IRQ_HANDLED;
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001449}
1450
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001451/******************************************************************************
1452 *
1453 * ICT functions
1454 *
1455 ******************************************************************************/
Johannes Berg10667132011-12-19 14:00:59 -08001456
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001457/* Free dram table */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001458void iwl_pcie_free_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001459{
Johannes Berg20d3b642012-05-16 22:54:29 +02001460 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001461
Johannes Berg10667132011-12-19 14:00:59 -08001462 if (trans_pcie->ict_tbl) {
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001463 dma_free_coherent(trans->dev, ICT_SIZE,
Johannes Berg10667132011-12-19 14:00:59 -08001464 trans_pcie->ict_tbl,
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001465 trans_pcie->ict_tbl_dma);
Johannes Berg10667132011-12-19 14:00:59 -08001466 trans_pcie->ict_tbl = NULL;
1467 trans_pcie->ict_tbl_dma = 0;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001468 }
1469}
1470
Johannes Berg10667132011-12-19 14:00:59 -08001471/*
1472 * allocate dram shared table, it is an aligned memory
1473 * block of ICT_SIZE.
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001474 * also reset all data related to ICT table interrupt.
1475 */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001476int iwl_pcie_alloc_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001477{
Johannes Berg20d3b642012-05-16 22:54:29 +02001478 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001479
Johannes Berg10667132011-12-19 14:00:59 -08001480 trans_pcie->ict_tbl =
Emmanuel Grumbacheef31712013-12-09 09:47:46 +02001481 dma_zalloc_coherent(trans->dev, ICT_SIZE,
Johannes Berg10667132011-12-19 14:00:59 -08001482 &trans_pcie->ict_tbl_dma,
1483 GFP_KERNEL);
1484 if (!trans_pcie->ict_tbl)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001485 return -ENOMEM;
1486
Johannes Berg10667132011-12-19 14:00:59 -08001487 /* just an API sanity check ... it is guaranteed to be aligned */
1488 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001489 iwl_pcie_free_ict(trans);
Johannes Berg10667132011-12-19 14:00:59 -08001490 return -EINVAL;
1491 }
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001492
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001493 return 0;
1494}
1495
1496/* Device is going up inform it about using ICT interrupt table,
1497 * also we need to tell the driver to start using ICT interrupt.
1498 */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001499void iwl_pcie_reset_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001500{
Johannes Berg20d3b642012-05-16 22:54:29 +02001501 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001502 u32 val;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001503
Johannes Berg10667132011-12-19 14:00:59 -08001504 if (!trans_pcie->ict_tbl)
Emmanuel Grumbached6a3802012-01-02 16:10:08 +02001505 return;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001506
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001507 spin_lock(&trans_pcie->irq_lock);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001508 iwl_disable_interrupts(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001509
Johannes Berg10667132011-12-19 14:00:59 -08001510 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001511
Johannes Berg10667132011-12-19 14:00:59 -08001512 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001513
Eliad Peller18f5a372015-07-16 20:17:42 +03001514 val |= CSR_DRAM_INT_TBL_ENABLE |
1515 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1516 CSR_DRAM_INIT_TBL_WRITE_POINTER;
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001517
Johannes Berg10667132011-12-19 14:00:59 -08001518 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001519
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001520 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001521 trans_pcie->use_ict = true;
1522 trans_pcie->ict_index = 0;
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001523 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001524 iwl_enable_interrupts(trans);
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001525 spin_unlock(&trans_pcie->irq_lock);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001526}
1527
1528/* Device is going down disable ict interrupt usage */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001529void iwl_pcie_disable_ict(struct iwl_trans *trans)
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001530{
Johannes Berg20d3b642012-05-16 22:54:29 +02001531 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001532
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001533 spin_lock(&trans_pcie->irq_lock);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001534 trans_pcie->use_ict = false;
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +02001535 spin_unlock(&trans_pcie->irq_lock);
Emmanuel Grumbach1a361cd2011-07-11 07:44:57 -07001536}
1537
Emmanuel Grumbach85bf9da2013-12-09 11:48:30 +02001538irqreturn_t iwl_pcie_isr(int irq, void *data)
1539{
1540 struct iwl_trans *trans = data;
1541
1542 if (!trans)
1543 return IRQ_NONE;
1544
1545 /* Disable (but don't clear!) interrupts here to avoid
1546 * back-to-back ISRs and sporadic interrupts from our NIC.
1547 * If we have something to service, the tasklet will re-enable ints.
1548 * If we *don't* have something, we'll re-enable before leaving here.
1549 */
1550 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1551
Emmanuel Grumbacha0f337c2013-12-11 09:00:03 +02001552 return IRQ_WAKE_THREAD;
Emmanuel Grumbach85bf9da2013-12-09 11:48:30 +02001553}