blob: 89560089a34891c25094b7235d7a04ba2a445aa4 [file] [log] [blame]
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +03001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -070063#include <linux/interrupt.h>
Emmanuel Grumbach87e56662011-08-25 23:10:50 -070064#include <linux/debugfs.h>
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -070065
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030066#include "iwl-dev.h"
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030067#include "iwl-trans.h"
Emmanuel Grumbach02aca582011-06-28 08:58:41 -070068#include "iwl-core.h"
69#include "iwl-helpers.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070070#include "iwl-trans-int-pcie.h"
Emmanuel Grumbach02aca582011-06-28 08:58:41 -070071/*TODO remove uneeded includes when the transport layer tx_free will be here */
72#include "iwl-agn.h"
Emmanuel Grumbache419d622011-07-08 08:46:14 -070073#include "iwl-core.h"
Emmanuel Grumbach48f20d32011-08-25 23:10:36 -070074#include "iwl-shared.h"
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030075
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -070076static int iwl_trans_rx_alloc(struct iwl_trans *trans)
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030077{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -070078 struct iwl_trans_pcie *trans_pcie =
79 IWL_TRANS_GET_PCIE_TRANS(trans);
80 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81 struct device *dev = bus(trans)->dev;
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030082
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -070083 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030084
85 spin_lock_init(&rxq->lock);
86 INIT_LIST_HEAD(&rxq->rx_free);
87 INIT_LIST_HEAD(&rxq->rx_used);
88
89 if (WARN_ON(rxq->bd || rxq->rb_stts))
90 return -EINVAL;
91
92 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030093 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94 &rxq->bd_dma, GFP_KERNEL);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030095 if (!rxq->bd)
96 goto err_bd;
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030097 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030098
99 /*Allocate the driver's pointer to receive buffer status */
100 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101 &rxq->rb_stts_dma, GFP_KERNEL);
102 if (!rxq->rb_stts)
103 goto err_rb_stts;
104 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
105
106 return 0;
107
108err_rb_stts:
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300109 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110 rxq->bd, rxq->bd_dma);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300111 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
112 rxq->bd = NULL;
113err_bd:
114 return -ENOMEM;
115}
116
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700117static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300118{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700119 struct iwl_trans_pcie *trans_pcie =
120 IWL_TRANS_GET_PCIE_TRANS(trans);
121 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300122 int i;
123
124 /* Fill the rx_used queue with _all_ of the Rx buffers */
125 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126 /* In the reset function, these buffers may have been allocated
127 * to an SKB, so we need to unmap and free potential storage */
128 if (rxq->pool[i].page != NULL) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700129 dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130 PAGE_SIZE << hw_params(trans).rx_page_order,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300131 DMA_FROM_DEVICE);
Emmanuel Grumbach790428b2011-08-25 23:11:05 -0700132 __free_pages(rxq->pool[i].page,
133 hw_params(trans).rx_page_order);
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300134 rxq->pool[i].page = NULL;
135 }
136 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
137 }
138}
139
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700140static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
141 struct iwl_rx_queue *rxq)
142{
143 u32 rb_size;
144 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
145 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
146
147 rb_timeout = RX_RB_TIMEOUT;
148
149 if (iwlagn_mod_params.amsdu_size_8K)
150 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
151 else
152 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
153
154 /* Stop Rx DMA */
155 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
156
157 /* Reset driver's Rx queue write index */
158 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
159
160 /* Tell device where to find RBD circular buffer in DRAM */
161 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
162 (u32)(rxq->bd_dma >> 8));
163
164 /* Tell device where in DRAM to update its Rx status */
165 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
166 rxq->rb_stts_dma >> 4);
167
168 /* Enable Rx DMA
169 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
170 * the credit mechanism in 5000 HW RX FIFO
171 * Direct rx interrupts to hosts
172 * Rx buffer size 4 or 8k
173 * RB timeout 0x10
174 * 256 RBDs
175 */
176 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
177 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
178 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
179 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
180 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
181 rb_size|
182 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
183 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184
185 /* Set interrupt coalescing timer to default (2048 usecs) */
186 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
187}
188
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700189static int iwl_rx_init(struct iwl_trans *trans)
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300190{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700191 struct iwl_trans_pcie *trans_pcie =
192 IWL_TRANS_GET_PCIE_TRANS(trans);
193 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
194
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300195 int i, err;
196 unsigned long flags;
197
198 if (!rxq->bd) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700199 err = iwl_trans_rx_alloc(trans);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300200 if (err)
201 return err;
202 }
203
204 spin_lock_irqsave(&rxq->lock, flags);
205 INIT_LIST_HEAD(&rxq->rx_free);
206 INIT_LIST_HEAD(&rxq->rx_used);
207
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700208 iwl_trans_rxq_free_rx_bufs(trans);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300209
210 for (i = 0; i < RX_QUEUE_SIZE; i++)
211 rxq->queue[i] = NULL;
212
213 /* Set us so that we have processed and used all buffers, but have
214 * not restocked the Rx queue with fresh buffers */
215 rxq->read = rxq->write = 0;
216 rxq->write_actual = 0;
217 rxq->free_count = 0;
218 spin_unlock_irqrestore(&rxq->lock, flags);
219
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700220 iwlagn_rx_replenish(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700221
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700222 iwl_trans_rx_hw_init(priv(trans), rxq);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700223
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700224 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700225 rxq->need_update = 1;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700226 iwl_rx_queue_update_write_ptr(trans, rxq);
227 spin_unlock_irqrestore(&trans->shrd->lock, flags);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700228
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300229 return 0;
230}
231
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700232static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300233{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700234 struct iwl_trans_pcie *trans_pcie =
235 IWL_TRANS_GET_PCIE_TRANS(trans);
236 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
237
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300238 unsigned long flags;
239
240 /*if rxq->bd is NULL, it means that nothing has been allocated,
241 * exit now */
242 if (!rxq->bd) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700243 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300244 return;
245 }
246
247 spin_lock_irqsave(&rxq->lock, flags);
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700248 iwl_trans_rxq_free_rx_bufs(trans);
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300249 spin_unlock_irqrestore(&rxq->lock, flags);
250
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700251 dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300252 rxq->bd, rxq->bd_dma);
253 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
254 rxq->bd = NULL;
255
256 if (rxq->rb_stts)
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700257 dma_free_coherent(bus(trans)->dev,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300258 sizeof(struct iwl_rb_status),
259 rxq->rb_stts, rxq->rb_stts_dma);
260 else
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700261 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300262 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
263 rxq->rb_stts = NULL;
264}
265
Emmanuel Grumbachc2c52e82011-07-08 08:46:11 -0700266static int iwl_trans_rx_stop(struct iwl_priv *priv)
267{
268
269 /* stop Rx DMA */
270 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
271 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
272 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
273}
274
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700275static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
276 struct iwl_dma_ptr *ptr, size_t size)
277{
278 if (WARN_ON(ptr->addr))
279 return -EINVAL;
280
Emmanuel Grumbachd5934112011-07-11 10:48:51 +0300281 ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700282 &ptr->dma, GFP_KERNEL);
283 if (!ptr->addr)
284 return -ENOMEM;
285 ptr->size = size;
286 return 0;
287}
288
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700289static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
290 struct iwl_dma_ptr *ptr)
291{
292 if (unlikely(!ptr->addr))
293 return;
294
Emmanuel Grumbachd5934112011-07-11 10:48:51 +0300295 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700296 memset(ptr, 0, sizeof(*ptr));
297}
298
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700299static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
300 int slots_num, u32 txq_id)
301{
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700302 size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700303 int i;
304
305 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
306 return -EINVAL;
307
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700308 txq->q.n_window = slots_num;
309
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700310 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
311 GFP_KERNEL);
312 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
313 GFP_KERNEL);
314
315 if (!txq->meta || !txq->cmd)
316 goto error;
317
318 for (i = 0; i < slots_num; i++) {
319 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
320 GFP_KERNEL);
321 if (!txq->cmd[i])
322 goto error;
323 }
324
325 /* Alloc driver data array and TFD circular buffer */
326 /* Driver private data, only for Tx (not command) queues,
327 * not shared with device. */
Emmanuel Grumbachcefeaa52011-08-25 23:10:40 -0700328 if (txq_id != priv->shrd->cmd_queue) {
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700329 txq->txb = kzalloc(sizeof(txq->txb[0]) *
330 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331 if (!txq->txb) {
332 IWL_ERR(priv, "kmalloc for auxiliary BD "
333 "structures failed\n");
334 goto error;
335 }
336 } else {
337 txq->txb = NULL;
338 }
339
340 /* Circular buffer of transmit frame descriptors (TFDs),
341 * shared with device */
Emmanuel Grumbachd5934112011-07-11 10:48:51 +0300342 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700343 GFP_KERNEL);
344 if (!txq->tfds) {
345 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
346 goto error;
347 }
348 txq->q.id = txq_id;
349
350 return 0;
351error:
352 kfree(txq->txb);
353 txq->txb = NULL;
354 /* since txq->cmd has been zeroed,
355 * all non allocated cmd[i] will be NULL */
356 if (txq->cmd)
357 for (i = 0; i < slots_num; i++)
358 kfree(txq->cmd[i]);
359 kfree(txq->meta);
360 kfree(txq->cmd);
361 txq->meta = NULL;
362 txq->cmd = NULL;
363
364 return -ENOMEM;
365
366}
367
368static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
369 int slots_num, u32 txq_id)
370{
371 int ret;
372
373 txq->need_update = 0;
374 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
375
376 /*
377 * For the default queues 0-3, set up the swq_id
378 * already -- all others need to get one later
379 * (if they need one at all).
380 */
381 if (txq_id < 4)
382 iwl_set_swq_id(txq, txq_id, txq_id);
383
384 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
385 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
390 txq_id);
391 if (ret)
392 return ret;
393
394 /*
395 * Tell nic where to find circular buffer of Tx Frame Descriptors for
396 * given Tx queue, and enable the DMA channel used for that queue.
397 * Circular buffer (TFD queue in DRAM) physical base address */
398 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
399 txq->q.dma_addr >> 8);
400
401 return 0;
402}
403
404/**
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700405 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
406 */
407static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
408{
409 struct iwl_tx_queue *txq = &priv->txq[txq_id];
410 struct iwl_queue *q = &txq->q;
411
412 if (!q->n_bd)
413 return;
414
415 while (q->write_ptr != q->read_ptr) {
416 /* The read_ptr needs to bound by q->n_window */
417 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
418 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
419 }
420}
421
422/**
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700423 * iwl_tx_queue_free - Deallocate DMA queue.
424 * @txq: Transmit queue to deallocate.
425 *
426 * Empty queue by removing and destroying all BD's.
427 * Free all buffers.
428 * 0-fill, but do not free "txq" descriptor structure.
429 */
430static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
431{
432 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Emmanuel Grumbachd5934112011-07-11 10:48:51 +0300433 struct device *dev = priv->bus->dev;
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700434 int i;
435 if (WARN_ON(!txq))
436 return;
437
438 iwl_tx_queue_unmap(priv, txq_id);
439
440 /* De-alloc array of command/tx buffers */
441 for (i = 0; i < txq->q.n_window; i++)
442 kfree(txq->cmd[i]);
443
444 /* De-alloc circular buffer of TFDs */
445 if (txq->q.n_bd) {
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700446 dma_free_coherent(dev, hw_params(priv).tfd_size *
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700447 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
448 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
449 }
450
451 /* De-alloc array of per-TFD driver data */
452 kfree(txq->txb);
453 txq->txb = NULL;
454
455 /* deallocate arrays */
456 kfree(txq->cmd);
457 kfree(txq->meta);
458 txq->cmd = NULL;
459 txq->meta = NULL;
460
461 /* 0-fill queue descriptor structure */
462 memset(txq, 0, sizeof(*txq));
463}
464
465/**
466 * iwl_trans_tx_free - Free TXQ Context
467 *
468 * Destroy all TX DMA queues and structures
469 */
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700470static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700471{
472 int txq_id;
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700473 struct iwl_trans *trans = trans(priv);
474 struct iwl_trans_pcie *trans_pcie =
475 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700476
477 /* Tx queues */
478 if (priv->txq) {
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700479 for (txq_id = 0;
480 txq_id < hw_params(priv).max_txq_num; txq_id++)
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700481 iwl_tx_queue_free(priv, txq_id);
482 }
483
484 kfree(priv->txq);
485 priv->txq = NULL;
486
487 iwlagn_free_dma_ptr(priv, &priv->kw);
488
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700489 iwlagn_free_dma_ptr(priv, &trans_pcie->scd_bc_tbls);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700490}
491
492/**
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700493 * iwl_trans_tx_alloc - allocate TX context
494 * Allocate all Tx DMA structures and initialize them
495 *
496 * @param priv
497 * @return error code
498 */
499static int iwl_trans_tx_alloc(struct iwl_priv *priv)
500{
501 int ret;
502 int txq_id, slots_num;
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700503 struct iwl_trans *trans = trans(priv);
504 struct iwl_trans_pcie *trans_pcie =
505 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700506
507 /*It is not allowed to alloc twice, so warn when this happens.
508 * We cannot rely on the previous allocation, so free and fail */
509 if (WARN_ON(priv->txq)) {
510 ret = -EINVAL;
511 goto error;
512 }
513
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700514 ret = iwlagn_alloc_dma_ptr(priv, &trans_pcie->scd_bc_tbls,
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700515 hw_params(priv).scd_bc_tbls_size);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700516 if (ret) {
517 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
518 goto error;
519 }
520
521 /* Alloc keep-warm buffer */
522 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
523 if (ret) {
524 IWL_ERR(priv, "Keep Warm allocation failed\n");
525 goto error;
526 }
527
528 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
529 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
530 if (!priv->txq) {
531 IWL_ERR(priv, "Not enough memory for txq\n");
532 ret = ENOMEM;
533 goto error;
534 }
535
536 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700537 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
Emmanuel Grumbachcefeaa52011-08-25 23:10:40 -0700538 slots_num = (txq_id == priv->shrd->cmd_queue) ?
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700539 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
540 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
541 txq_id);
542 if (ret) {
543 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
544 goto error;
545 }
546 }
547
548 return 0;
549
550error:
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700551 iwl_trans_tx_free(trans(priv));
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700552
553 return ret;
554}
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300555static int iwl_tx_init(struct iwl_priv *priv)
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700556{
557 int ret;
558 int txq_id, slots_num;
559 unsigned long flags;
560 bool alloc = false;
561
562 if (!priv->txq) {
563 ret = iwl_trans_tx_alloc(priv);
564 if (ret)
565 goto error;
566 alloc = true;
567 }
568
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700569 spin_lock_irqsave(&priv->shrd->lock, flags);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700570
571 /* Turn off all Tx DMA fifos */
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300572 iwl_write_prph(priv, SCD_TXFACT, 0);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700573
574 /* Tell NIC where to find the "keep warm" buffer */
575 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
576
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700577 spin_unlock_irqrestore(&priv->shrd->lock, flags);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700578
579 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700580 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
Emmanuel Grumbachcefeaa52011-08-25 23:10:40 -0700581 slots_num = (txq_id == priv->shrd->cmd_queue) ?
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700582 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
583 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
584 txq_id);
585 if (ret) {
586 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
587 goto error;
588 }
589 }
590
591 return 0;
592error:
593 /*Upon error, free only if we allocated something */
594 if (alloc)
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700595 iwl_trans_tx_free(trans(priv));
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700596 return ret;
597}
598
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300599static void iwl_set_pwr_vmain(struct iwl_priv *priv)
600{
601/*
602 * (for documentation purposes)
603 * to set power to V_AUX, do:
604
605 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
606 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
607 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
608 ~APMG_PS_CTRL_MSK_PWR_SRC);
609 */
610
611 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
612 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
613 ~APMG_PS_CTRL_MSK_PWR_SRC);
614}
615
616static int iwl_nic_init(struct iwl_priv *priv)
617{
618 unsigned long flags;
619
620 /* nic_init */
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700621 spin_lock_irqsave(&priv->shrd->lock, flags);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300622 iwl_apm_init(priv);
623
624 /* Set interrupt coalescing calibration timer to default (512 usecs) */
625 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
626
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700627 spin_unlock_irqrestore(&priv->shrd->lock, flags);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300628
629 iwl_set_pwr_vmain(priv);
630
631 priv->cfg->lib->nic_config(priv);
632
633 /* Allocate the RX queue, or reset if it is already allocated */
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700634 iwl_rx_init(trans(priv));
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300635
636 /* Allocate or reset and init all Tx and Command queues */
637 if (iwl_tx_init(priv))
638 return -ENOMEM;
639
640 if (priv->cfg->base_params->shadow_reg_enable) {
641 /* enable shadow regs in HW */
642 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
643 0x800FFFFF);
644 }
645
Emmanuel Grumbach63013ae2011-08-25 23:10:42 -0700646 set_bit(STATUS_INIT, &priv->shrd->status);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300647
648 return 0;
649}
650
651#define HW_READY_TIMEOUT (50)
652
653/* Note: returns poll_bit return value, which is >= 0 if success */
654static int iwl_set_hw_ready(struct iwl_priv *priv)
655{
656 int ret;
657
658 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
659 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
660
661 /* See if we got it */
662 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
663 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
664 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
665 HW_READY_TIMEOUT);
666
667 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
668 return ret;
669}
670
671/* Note: returns standard 0/-ERROR code */
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700672static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300673{
674 int ret;
675
Emmanuel Grumbach0286cee2011-07-10 15:39:57 +0300676 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300677
678 ret = iwl_set_hw_ready(priv);
679 if (ret >= 0)
680 return 0;
681
682 /* If HW is not ready, prepare the conditions to check again */
683 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
684 CSR_HW_IF_CONFIG_REG_PREPARE);
685
686 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
687 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
688 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
689
690 if (ret < 0)
691 return ret;
692
693 /* HW should be ready by now, check again. */
694 ret = iwl_set_hw_ready(priv);
695 if (ret >= 0)
696 return 0;
697 return ret;
698}
699
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700700static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300701{
702 int ret;
703
704 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
705
706 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700707 iwl_trans_pcie_prepare_card_hw(priv)) {
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300708 IWL_WARN(priv, "Exit HW not ready\n");
709 return -EIO;
710 }
711
712 /* If platform's RF_KILL switch is NOT set to KILL */
713 if (iwl_read32(priv, CSR_GP_CNTRL) &
714 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
Emmanuel Grumbach63013ae2011-08-25 23:10:42 -0700715 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300716 else
Emmanuel Grumbach63013ae2011-08-25 23:10:42 -0700717 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300718
Emmanuel Grumbach845a9c02011-08-25 23:11:04 -0700719 if (iwl_is_rfkill(priv->shrd)) {
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300720 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700721 iwl_enable_interrupts(trans(priv));
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300722 return -ERFKILL;
723 }
724
725 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
726
727 ret = iwl_nic_init(priv);
728 if (ret) {
729 IWL_ERR(priv, "Unable to init nic\n");
730 return ret;
731 }
732
733 /* make sure rfkill handshake bits are cleared */
734 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
735 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
736 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
737
738 /* clear (again), then enable host interrupts */
739 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700740 iwl_enable_interrupts(trans(priv));
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300741
742 /* really make sure rfkill handshake bits are cleared */
743 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
744 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
745
746 return 0;
747}
748
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300749/*
750 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700751 * must be called under priv->shrd->lock and mac access
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300752 */
753static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
754{
755 iwl_write_prph(priv, SCD_TXFACT, mask);
756}
757
758#define IWL_AC_UNSET -1
759
760struct queue_to_fifo_ac {
761 s8 fifo, ac;
762};
763
764static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
765 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
766 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
767 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
768 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
769 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
770 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
771 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
772 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
773 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
774 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
Johannes Berg72c04ce2011-07-23 10:24:40 -0700775 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300776};
777
778static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
779 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
780 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
781 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
782 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
783 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
784 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
785 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
786 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
787 { IWL_TX_FIFO_BE_IPAN, 2, },
788 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
Johannes Berg72c04ce2011-07-23 10:24:40 -0700789 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300790};
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700791static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300792{
793 const struct queue_to_fifo_ac *queue_to_fifo;
794 struct iwl_rxon_context *ctx;
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700795 struct iwl_trans *trans = trans(priv);
796 struct iwl_trans_pcie *trans_pcie =
797 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300798 u32 a;
799 unsigned long flags;
800 int i, chan;
801 u32 reg_val;
802
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700803 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300804
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700805 trans_pcie->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
806 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300807 /* reset conext data memory */
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700808 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300809 a += 4)
810 iwl_write_targ_mem(priv, a, 0);
811 /* reset tx status memory */
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700812 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300813 a += 4)
814 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700815 for (; a < trans_pcie->scd_base_addr +
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700816 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
817 a += 4)
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300818 iwl_write_targ_mem(priv, a, 0);
819
820 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700821 trans_pcie->scd_bc_tbls.dma >> 10);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300822
823 /* Enable DMA channel */
824 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
825 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
826 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
827 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
828
829 /* Update FH chicken bits */
830 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
831 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
832 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
833
834 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
835 SCD_QUEUECHAIN_SEL_ALL(priv));
836 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
837
838 /* initiate the queues */
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700839 for (i = 0; i < hw_params(priv).max_txq_num; i++) {
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300840 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
841 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700842 iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300843 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700844 iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300845 SCD_CONTEXT_QUEUE_OFFSET(i) +
846 sizeof(u32),
847 ((SCD_WIN_SIZE <<
848 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
849 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
850 ((SCD_FRAME_LIMIT <<
851 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
852 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
853 }
854
855 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700856 IWL_MASK(0, hw_params(trans).max_txq_num));
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300857
858 /* Activate all Tx DMA/FIFO channels */
859 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
860
861 /* map queues to FIFOs */
862 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
863 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
864 else
865 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
866
Emmanuel Grumbachcefeaa52011-08-25 23:10:40 -0700867 iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300868
869 /* make sure all queue are not stopped */
870 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
871 for (i = 0; i < 4; i++)
872 atomic_set(&priv->queue_stop_count[i], 0);
873 for_each_context(priv, ctx)
874 ctx->last_tx_rejected = false;
875
876 /* reset to 0 to enable all the queue first */
877 priv->txq_ctx_active_msk = 0;
878
Emmanuel Grumbacheffcea12011-08-25 23:11:03 -0700879 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
Johannes Berg72c04ce2011-07-23 10:24:40 -0700880 IWLAGN_FIRST_AMPDU_QUEUE);
Emmanuel Grumbacheffcea12011-08-25 23:11:03 -0700881 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
Johannes Berg72c04ce2011-07-23 10:24:40 -0700882 IWLAGN_FIRST_AMPDU_QUEUE);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300883
Johannes Berg72c04ce2011-07-23 10:24:40 -0700884 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300885 int fifo = queue_to_fifo[i].fifo;
886 int ac = queue_to_fifo[i].ac;
887
888 iwl_txq_ctx_activate(priv, i);
889
890 if (fifo == IWL_TX_FIFO_UNUSED)
891 continue;
892
893 if (ac != IWL_AC_UNSET)
894 iwl_set_swq_id(&priv->txq[i], ac, i);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300895 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300896 }
897
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700898 spin_unlock_irqrestore(&priv->shrd->lock, flags);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300899
900 /* Enable L1-Active */
901 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
902 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
903}
904
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700905/**
906 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
907 */
908static int iwl_trans_tx_stop(struct iwl_priv *priv)
909{
910 int ch, txq_id;
911 unsigned long flags;
912
913 /* Turn off all Tx DMA fifos */
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700914 spin_lock_irqsave(&priv->shrd->lock, flags);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700915
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300916 iwl_trans_txq_set_sched(priv, 0);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700917
918 /* Stop each Tx DMA channel, and wait for it to be idle */
Wey-Yi Guy02f6f652011-07-08 08:46:15 -0700919 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700920 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
921 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
922 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
923 1000))
924 IWL_ERR(priv, "Failing on timeout while stopping"
925 " DMA channel %d [0x%08x]", ch,
926 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
927 }
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700928 spin_unlock_irqrestore(&priv->shrd->lock, flags);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700929
930 if (!priv->txq) {
931 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
932 return 0;
933 }
934
935 /* Unmap DMA from host system and free skb's */
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700936 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700937 iwl_tx_queue_unmap(priv, txq_id);
938
939 return 0;
940}
941
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700942static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300943{
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300944 /* stop and reset the on-board processor */
945 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
946
947 /* tell the device to stop sending interrupts */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700948 iwl_trans_disable_sync_irq(trans(priv));
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300949
950 /* device going down, Stop using ICT table */
Emmanuel Grumbach0c325762011-08-25 23:10:53 -0700951 iwl_disable_ict(trans(priv));
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300952
953 /*
954 * If a HW restart happens during firmware loading,
955 * then the firmware loading might call this function
956 * and later it might be called again due to the
957 * restart. So don't process again if the device is
958 * already dead.
959 */
Emmanuel Grumbach63013ae2011-08-25 23:10:42 -0700960 if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300961 iwl_trans_tx_stop(priv);
962 iwl_trans_rx_stop(priv);
963
964 /* Power-down device's busmaster DMA clocks */
965 iwl_write_prph(priv, APMG_CLK_DIS_REG,
966 APMG_CLK_VAL_DMA_CLK_RQT);
967 udelay(5);
968 }
969
970 /* Make sure (redundant) we've released our request to stay awake */
971 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
972
973 /* Stop the device, and put it in low power state */
974 iwl_apm_stop(priv);
975}
976
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -0700977static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +0300978 int txq_id)
979{
980 struct iwl_tx_queue *txq = &priv->txq[txq_id];
981 struct iwl_queue *q = &txq->q;
982 struct iwl_device_cmd *dev_cmd;
983
984 if (unlikely(iwl_queue_space(q) < q->high_mark))
985 return NULL;
986
987 /*
988 * Set up the Tx-command (not MAC!) header.
989 * Store the chosen Tx queue and TFD index within the sequence field;
990 * after Tx, uCode's Tx response will return this value so driver can
991 * locate the frame within the tx queue and do post-tx processing.
992 */
993 dev_cmd = txq->cmd[q->write_ptr];
994 memset(dev_cmd, 0, sizeof(*dev_cmd));
995 dev_cmd->hdr.cmd = REPLY_TX;
996 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
997 INDEX_TO_SEQ(q->write_ptr)));
998 return &dev_cmd->cmd.tx;
999}
1000
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001001static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001002 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
1003 struct iwl_rxon_context *ctx)
1004{
1005 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1006 struct iwl_queue *q = &txq->q;
1007 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
1008 struct iwl_cmd_meta *out_meta;
1009
1010 dma_addr_t phys_addr = 0;
1011 dma_addr_t txcmd_phys;
1012 dma_addr_t scratch_phys;
1013 u16 len, firstlen, secondlen;
1014 u8 wait_write_ptr = 0;
1015 u8 hdr_len = ieee80211_hdrlen(fc);
1016
1017 /* Set up driver data for this TFD */
1018 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1019 txq->txb[q->write_ptr].skb = skb;
1020 txq->txb[q->write_ptr].ctx = ctx;
1021
1022 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1023 out_meta = &txq->meta[q->write_ptr];
1024
1025 /*
1026 * Use the first empty entry in this queue's command buffer array
1027 * to contain the Tx command and MAC header concatenated together
1028 * (payload data will be in another buffer).
1029 * Size of this varies, due to varying MAC header length.
1030 * If end is not dword aligned, we'll have 2 extra bytes at the end
1031 * of the MAC header (device reads on dword boundaries).
1032 * We'll tell device about this padding later.
1033 */
1034 len = sizeof(struct iwl_tx_cmd) +
1035 sizeof(struct iwl_cmd_header) + hdr_len;
1036 firstlen = (len + 3) & ~3;
1037
1038 /* Tell NIC about any 2-byte padding after MAC header */
1039 if (firstlen != len)
1040 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1041
1042 /* Physical address of this Tx command's header (not MAC header!),
1043 * within command buffer array. */
Emmanuel Grumbachd5934112011-07-11 10:48:51 +03001044 txcmd_phys = dma_map_single(priv->bus->dev,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001045 &dev_cmd->hdr, firstlen,
1046 DMA_BIDIRECTIONAL);
Emmanuel Grumbachd5934112011-07-11 10:48:51 +03001047 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001048 return -1;
1049 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1050 dma_unmap_len_set(out_meta, len, firstlen);
1051
1052 if (!ieee80211_has_morefrags(fc)) {
1053 txq->need_update = 1;
1054 } else {
1055 wait_write_ptr = 1;
1056 txq->need_update = 0;
1057 }
1058
1059 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1060 * if any (802.11 null frames have no payload). */
1061 secondlen = skb->len - hdr_len;
1062 if (secondlen > 0) {
Emmanuel Grumbachd5934112011-07-11 10:48:51 +03001063 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001064 secondlen, DMA_TO_DEVICE);
Emmanuel Grumbachd5934112011-07-11 10:48:51 +03001065 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1066 dma_unmap_single(priv->bus->dev,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001067 dma_unmap_addr(out_meta, mapping),
1068 dma_unmap_len(out_meta, len),
1069 DMA_BIDIRECTIONAL);
1070 return -1;
1071 }
1072 }
1073
1074 /* Attach buffers to TFD */
1075 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1076 if (secondlen > 0)
1077 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1078 secondlen, 0);
1079
1080 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1081 offsetof(struct iwl_tx_cmd, scratch);
1082
1083 /* take back ownership of DMA buffer to enable update */
Emmanuel Grumbachd5934112011-07-11 10:48:51 +03001084 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001085 DMA_BIDIRECTIONAL);
1086 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1087 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1088
1089 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1090 le16_to_cpu(dev_cmd->hdr.sequence));
1091 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1092 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1093 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1094
1095 /* Set up entry for this TFD in Tx byte-count array */
1096 if (ampdu)
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001097 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001098 le16_to_cpu(tx_cmd->len));
1099
Emmanuel Grumbachd5934112011-07-11 10:48:51 +03001100 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001101 DMA_BIDIRECTIONAL);
1102
1103 trace_iwlwifi_dev_tx(priv,
1104 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1105 sizeof(struct iwl_tfd),
1106 &dev_cmd->hdr, firstlen,
1107 skb->data + hdr_len, secondlen);
1108
1109 /* Tell device the write index *just past* this latest filled TFD */
1110 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1111 iwl_txq_update_write_ptr(priv, txq);
1112
1113 /*
1114 * At this point the frame is "transmitted" successfully
1115 * and we will get a TX status notification eventually,
1116 * regardless of the value of ret. "ret" only indicates
1117 * whether or not we should update the write pointer.
1118 */
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001119 if (iwl_queue_space(q) < q->high_mark) {
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001120 if (wait_write_ptr) {
1121 txq->need_update = 1;
1122 iwl_txq_update_write_ptr(priv, txq);
1123 } else {
1124 iwl_stop_queue(priv, txq);
1125 }
1126 }
1127 return 0;
1128}
1129
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001130static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
Emmanuel Grumbach56d90f42011-07-07 18:20:01 +03001131{
1132 /* Remove all resets to allow NIC to operate */
1133 iwl_write32(priv, CSR_RESET, 0);
1134}
1135
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001136static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
Emmanuel Grumbacha27367d2011-07-04 09:06:44 +03001137{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001138 struct iwl_trans_pcie *trans_pcie =
1139 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001140 int err;
1141
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001142 trans_pcie->inta_mask = CSR_INI_SET_MASK;
Emmanuel Grumbach1e89cbac2011-07-20 17:51:22 -07001143
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001144 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1145 iwl_irq_tasklet, (unsigned long)trans);
1146
1147 iwl_alloc_isr_ict(trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001148
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001149 err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001150 DRV_NAME, trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001151 if (err) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001152 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1153 iwl_free_isr_ict(trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001154 return err;
1155 }
1156
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001157 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001158 return 0;
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +03001159}
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001160
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001161static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id,
1162 int ssn, u32 status, struct sk_buff_head *skbs)
1163{
1164 struct iwl_priv *priv = priv(trans);
1165 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1166 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1167 int tfd_num = ssn & (txq->q.n_bd - 1);
1168 u8 agg_state;
1169 bool cond;
1170
1171 if (txq->sched_retry) {
1172 agg_state =
1173 priv->stations[txq->sta_id].tid[txq->tid].agg.state;
1174 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1175 } else {
1176 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1177 }
1178
1179 if (txq->q.read_ptr != tfd_num) {
1180 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1181 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1182 ssn , tfd_num, txq_id, txq->swq_id);
1183 iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1184 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1185 iwl_wake_queue(priv, txq);
1186 }
1187}
1188
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001189static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001190{
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001191 unsigned long flags;
1192 struct iwl_trans_pcie *trans_pcie =
1193 IWL_TRANS_GET_PCIE_TRANS(trans);
1194
1195 spin_lock_irqsave(&trans->shrd->lock, flags);
1196 iwl_disable_interrupts(trans);
1197 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1198
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001199 /* wait to make sure we flush pending tasklet*/
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001200 synchronize_irq(bus(trans)->irq);
1201 tasklet_kill(&trans_pcie->irq_tasklet);
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001202}
1203
1204static void iwl_trans_pcie_free(struct iwl_priv *priv)
1205{
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001206 free_irq(priv->bus->irq, trans(priv));
1207 iwl_free_isr_ict(trans(priv));
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001208 kfree(trans(priv));
1209 trans(priv) = NULL;
1210}
1211
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001212#ifdef CONFIG_PM
1213
1214static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1215{
1216 /*
1217 * This function is called when system goes into suspend state
1218 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1219 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1220 * it will not call apm_ops.stop() to stop the DMA operation.
1221 * Calling apm_ops.stop here to make sure we stop the DMA.
1222 *
1223 * But of course ... if we have configured WoWLAN then we did other
1224 * things already :-)
1225 */
1226 if (!trans->shrd->wowlan)
1227 iwl_apm_stop(priv(trans));
1228
1229 return 0;
1230}
1231
1232static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1233{
1234 bool hw_rfkill = false;
1235
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001236 iwl_enable_interrupts(trans);
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001237
1238 if (!(iwl_read32(priv(trans), CSR_GP_CNTRL) &
1239 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1240 hw_rfkill = true;
1241
1242 if (hw_rfkill)
1243 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1244 else
1245 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1246
1247 wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1248
1249 return 0;
1250}
1251#else /* CONFIG_PM */
1252static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1253{ return 0; }
1254
1255static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1256{ return 0; }
1257
1258#endif /* CONFIG_PM */
1259
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001260const struct iwl_trans_ops trans_ops_pcie;
1261
1262static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1263{
1264 struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1265 sizeof(struct iwl_trans_pcie),
1266 GFP_KERNEL);
1267 if (iwl_trans) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001268 struct iwl_trans_pcie *trans_pcie =
1269 IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001270 iwl_trans->ops = &trans_ops_pcie;
1271 iwl_trans->shrd = shrd;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001272 trans_pcie->trans = iwl_trans;
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001273 }
1274
1275 return iwl_trans;
1276}
1277
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001278#ifdef CONFIG_IWLWIFI_DEBUGFS
1279/* create and remove of files */
1280#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001281 if (!debugfs_create_file(#name, mode, parent, trans, \
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001282 &iwl_dbgfs_##name##_ops)) \
1283 return -ENOMEM; \
1284} while (0)
1285
1286/* file operation */
1287#define DEBUGFS_READ_FUNC(name) \
1288static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1289 char __user *user_buf, \
1290 size_t count, loff_t *ppos);
1291
1292#define DEBUGFS_WRITE_FUNC(name) \
1293static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1294 const char __user *user_buf, \
1295 size_t count, loff_t *ppos);
1296
1297
1298static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1299{
1300 file->private_data = inode->i_private;
1301 return 0;
1302}
1303
1304#define DEBUGFS_READ_FILE_OPS(name) \
1305 DEBUGFS_READ_FUNC(name); \
1306static const struct file_operations iwl_dbgfs_##name##_ops = { \
1307 .read = iwl_dbgfs_##name##_read, \
1308 .open = iwl_dbgfs_open_file_generic, \
1309 .llseek = generic_file_llseek, \
1310};
1311
1312#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1313 DEBUGFS_READ_FUNC(name); \
1314 DEBUGFS_WRITE_FUNC(name); \
1315static const struct file_operations iwl_dbgfs_##name##_ops = { \
1316 .write = iwl_dbgfs_##name##_write, \
1317 .read = iwl_dbgfs_##name##_read, \
1318 .open = iwl_dbgfs_open_file_generic, \
1319 .llseek = generic_file_llseek, \
1320};
1321
1322static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1323 char __user *user_buf,
1324 size_t count, loff_t *ppos)
1325{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001326 struct iwl_trans *trans = file->private_data;
1327 struct iwl_priv *priv = priv(trans);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001328 int pos = 0, ofs = 0;
1329 int cnt = 0, entry;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001330 struct iwl_trans_pcie *trans_pcie =
1331 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001332 struct iwl_tx_queue *txq;
1333 struct iwl_queue *q;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001334 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001335 char *buf;
1336 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1337 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1338 const u8 *ptr;
1339 ssize_t ret;
1340
1341 if (!priv->txq) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001342 IWL_ERR(trans, "txq not ready\n");
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001343 return -EAGAIN;
1344 }
1345 buf = kzalloc(bufsz, GFP_KERNEL);
1346 if (!buf) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001347 IWL_ERR(trans, "Can not allocate buffer\n");
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001348 return -ENOMEM;
1349 }
1350 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001351 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001352 txq = &priv->txq[cnt];
1353 q = &txq->q;
1354 pos += scnprintf(buf + pos, bufsz - pos,
1355 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1356 cnt, q->read_ptr, q->write_ptr);
1357 }
1358 if (priv->tx_traffic &&
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001359 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001360 ptr = priv->tx_traffic;
1361 pos += scnprintf(buf + pos, bufsz - pos,
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001362 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001363 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1364 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1365 entry++, ofs += 16) {
1366 pos += scnprintf(buf + pos, bufsz - pos,
1367 "0x%.4x ", ofs);
1368 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1369 buf + pos, bufsz - pos, 0);
1370 pos += strlen(buf + pos);
1371 if (bufsz - pos > 0)
1372 buf[pos++] = '\n';
1373 }
1374 }
1375 }
1376
1377 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1378 pos += scnprintf(buf + pos, bufsz - pos,
1379 "read: %u, write: %u\n",
1380 rxq->read, rxq->write);
1381
1382 if (priv->rx_traffic &&
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001383 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001384 ptr = priv->rx_traffic;
1385 pos += scnprintf(buf + pos, bufsz - pos,
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001386 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001387 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1388 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1389 entry++, ofs += 16) {
1390 pos += scnprintf(buf + pos, bufsz - pos,
1391 "0x%.4x ", ofs);
1392 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1393 buf + pos, bufsz - pos, 0);
1394 pos += strlen(buf + pos);
1395 if (bufsz - pos > 0)
1396 buf[pos++] = '\n';
1397 }
1398 }
1399 }
1400
1401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1402 kfree(buf);
1403 return ret;
1404}
1405
1406static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1407 const char __user *user_buf,
1408 size_t count, loff_t *ppos)
1409{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001410 struct iwl_trans *trans = file->private_data;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001411 char buf[8];
1412 int buf_size;
1413 int traffic_log;
1414
1415 memset(buf, 0, sizeof(buf));
1416 buf_size = min(count, sizeof(buf) - 1);
1417 if (copy_from_user(buf, user_buf, buf_size))
1418 return -EFAULT;
1419 if (sscanf(buf, "%d", &traffic_log) != 1)
1420 return -EFAULT;
1421 if (traffic_log == 0)
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001422 iwl_reset_traffic_log(priv(trans));
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001423
1424 return count;
1425}
1426
1427static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1428 char __user *user_buf,
1429 size_t count, loff_t *ppos) {
1430
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001431 struct iwl_trans *trans = file->private_data;
1432 struct iwl_priv *priv = priv(trans);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001433 struct iwl_tx_queue *txq;
1434 struct iwl_queue *q;
1435 char *buf;
1436 int pos = 0;
1437 int cnt;
1438 int ret;
1439 const size_t bufsz = sizeof(char) * 64 *
1440 priv->cfg->base_params->num_of_queues;
1441
1442 if (!priv->txq) {
1443 IWL_ERR(priv, "txq not ready\n");
1444 return -EAGAIN;
1445 }
1446 buf = kzalloc(bufsz, GFP_KERNEL);
1447 if (!buf)
1448 return -ENOMEM;
1449
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001450 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001451 txq = &priv->txq[cnt];
1452 q = &txq->q;
1453 pos += scnprintf(buf + pos, bufsz - pos,
1454 "hwq %.2d: read=%u write=%u stop=%d"
1455 " swq_id=%#.2x (ac %d/hwq %d)\n",
1456 cnt, q->read_ptr, q->write_ptr,
1457 !!test_bit(cnt, priv->queue_stopped),
1458 txq->swq_id, txq->swq_id & 3,
1459 (txq->swq_id >> 2) & 0x1f);
1460 if (cnt >= 4)
1461 continue;
1462 /* for the ACs, display the stop count too */
1463 pos += scnprintf(buf + pos, bufsz - pos,
1464 " stop-count: %d\n",
1465 atomic_read(&priv->queue_stop_count[cnt]));
1466 }
1467 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1468 kfree(buf);
1469 return ret;
1470}
1471
1472static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1473 char __user *user_buf,
1474 size_t count, loff_t *ppos) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001475 struct iwl_trans *trans = file->private_data;
1476 struct iwl_trans_pcie *trans_pcie =
1477 IWL_TRANS_GET_PCIE_TRANS(trans);
1478 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001479 char buf[256];
1480 int pos = 0;
1481 const size_t bufsz = sizeof(buf);
1482
1483 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1484 rxq->read);
1485 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1486 rxq->write);
1487 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1488 rxq->free_count);
1489 if (rxq->rb_stts) {
1490 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1491 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1492 } else {
1493 pos += scnprintf(buf + pos, bufsz - pos,
1494 "closed_rb_num: Not Allocated\n");
1495 }
1496 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1497}
1498
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001499static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1500 char __user *user_buf,
1501 size_t count, loff_t *ppos)
1502{
1503 struct iwl_trans *trans = file->private_data;
1504 char *buf;
1505 int pos = 0;
1506 ssize_t ret = -ENOMEM;
1507
1508 ret = pos = iwl_dump_nic_event_log(priv(trans), true, &buf, true);
1509 if (buf) {
1510 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1511 kfree(buf);
1512 }
1513 return ret;
1514}
1515
1516static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1517 const char __user *user_buf,
1518 size_t count, loff_t *ppos)
1519{
1520 struct iwl_trans *trans = file->private_data;
1521 u32 event_log_flag;
1522 char buf[8];
1523 int buf_size;
1524
1525 memset(buf, 0, sizeof(buf));
1526 buf_size = min(count, sizeof(buf) - 1);
1527 if (copy_from_user(buf, user_buf, buf_size))
1528 return -EFAULT;
1529 if (sscanf(buf, "%d", &event_log_flag) != 1)
1530 return -EFAULT;
1531 if (event_log_flag == 1)
1532 iwl_dump_nic_event_log(priv(trans), true, NULL, false);
1533
1534 return count;
1535}
1536
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001537static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1538 char __user *user_buf,
1539 size_t count, loff_t *ppos) {
1540
1541 struct iwl_trans *trans = file->private_data;
1542 struct iwl_trans_pcie *trans_pcie =
1543 IWL_TRANS_GET_PCIE_TRANS(trans);
1544 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1545
1546 int pos = 0;
1547 char *buf;
1548 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1549 ssize_t ret;
1550
1551 buf = kzalloc(bufsz, GFP_KERNEL);
1552 if (!buf) {
1553 IWL_ERR(trans, "Can not allocate Buffer\n");
1554 return -ENOMEM;
1555 }
1556
1557 pos += scnprintf(buf + pos, bufsz - pos,
1558 "Interrupt Statistics Report:\n");
1559
1560 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1561 isr_stats->hw);
1562 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1563 isr_stats->sw);
1564 if (isr_stats->sw || isr_stats->hw) {
1565 pos += scnprintf(buf + pos, bufsz - pos,
1566 "\tLast Restarting Code: 0x%X\n",
1567 isr_stats->err_code);
1568 }
1569#ifdef CONFIG_IWLWIFI_DEBUG
1570 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1571 isr_stats->sch);
1572 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1573 isr_stats->alive);
1574#endif
1575 pos += scnprintf(buf + pos, bufsz - pos,
1576 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1577
1578 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1579 isr_stats->ctkill);
1580
1581 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1582 isr_stats->wakeup);
1583
1584 pos += scnprintf(buf + pos, bufsz - pos,
1585 "Rx command responses:\t\t %u\n", isr_stats->rx);
1586
1587 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1588 isr_stats->tx);
1589
1590 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1591 isr_stats->unhandled);
1592
1593 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1594 kfree(buf);
1595 return ret;
1596}
1597
1598static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1599 const char __user *user_buf,
1600 size_t count, loff_t *ppos)
1601{
1602 struct iwl_trans *trans = file->private_data;
1603 struct iwl_trans_pcie *trans_pcie =
1604 IWL_TRANS_GET_PCIE_TRANS(trans);
1605 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1606
1607 char buf[8];
1608 int buf_size;
1609 u32 reset_flag;
1610
1611 memset(buf, 0, sizeof(buf));
1612 buf_size = min(count, sizeof(buf) - 1);
1613 if (copy_from_user(buf, user_buf, buf_size))
1614 return -EFAULT;
1615 if (sscanf(buf, "%x", &reset_flag) != 1)
1616 return -EFAULT;
1617 if (reset_flag == 0)
1618 memset(isr_stats, 0, sizeof(*isr_stats));
1619
1620 return count;
1621}
1622
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001623DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001624DEBUGFS_READ_WRITE_FILE_OPS(log_event);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001625DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001626DEBUGFS_READ_FILE_OPS(rx_queue);
1627DEBUGFS_READ_FILE_OPS(tx_queue);
1628
1629/*
1630 * Create the debugfs files and directories
1631 *
1632 */
1633static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1634 struct dentry *dir)
1635{
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001636 DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1637 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1638 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001639 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001640 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001641 return 0;
1642}
1643#else
1644static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1645 struct dentry *dir)
1646{ return 0; }
1647
1648#endif /*CONFIG_IWLWIFI_DEBUGFS */
1649
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001650const struct iwl_trans_ops trans_ops_pcie = {
1651 .alloc = iwl_trans_pcie_alloc,
1652 .request_irq = iwl_trans_pcie_request_irq,
1653 .start_device = iwl_trans_pcie_start_device,
1654 .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1655 .stop_device = iwl_trans_pcie_stop_device,
1656
1657 .tx_start = iwl_trans_pcie_tx_start,
1658
1659 .rx_free = iwl_trans_pcie_rx_free,
1660 .tx_free = iwl_trans_pcie_tx_free,
1661
1662 .send_cmd = iwl_trans_pcie_send_cmd,
1663 .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1664
1665 .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1666 .tx = iwl_trans_pcie_tx,
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001667 .reclaim = iwl_trans_pcie_reclaim,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001668
1669 .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1670 .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
1671
1672 .kick_nic = iwl_trans_pcie_kick_nic,
1673
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001674 .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001675 .free = iwl_trans_pcie_free,
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001676
1677 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001678 .suspend = iwl_trans_pcie_suspend,
1679 .resume = iwl_trans_pcie_resume,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001680};
1681