blob: 7c748f65c86f3938941b2ac1e9b85f87a425bde2 [file] [log] [blame]
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +03001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030063#include "iwl-dev.h"
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030064#include "iwl-trans.h"
Emmanuel Grumbach02aca582011-06-28 08:58:41 -070065#include "iwl-core.h"
66#include "iwl-helpers.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070067#include "iwl-trans-int-pcie.h"
Emmanuel Grumbach02aca582011-06-28 08:58:41 -070068/*TODO remove uneeded includes when the transport layer tx_free will be here */
69#include "iwl-agn.h"
Emmanuel Grumbache419d622011-07-08 08:46:14 -070070#include "iwl-core.h"
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030071
72static int iwl_trans_rx_alloc(struct iwl_priv *priv)
73{
74 struct iwl_rx_queue *rxq = &priv->rxq;
75 struct device *dev = priv->bus.dev;
76
77 memset(&priv->rxq, 0, sizeof(priv->rxq));
78
79 spin_lock_init(&rxq->lock);
80 INIT_LIST_HEAD(&rxq->rx_free);
81 INIT_LIST_HEAD(&rxq->rx_used);
82
83 if (WARN_ON(rxq->bd || rxq->rb_stts))
84 return -EINVAL;
85
86 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030087 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
88 &rxq->bd_dma, GFP_KERNEL);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030089 if (!rxq->bd)
90 goto err_bd;
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030091 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030092
93 /*Allocate the driver's pointer to receive buffer status */
94 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
95 &rxq->rb_stts_dma, GFP_KERNEL);
96 if (!rxq->rb_stts)
97 goto err_rb_stts;
98 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
99
100 return 0;
101
102err_rb_stts:
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300103 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
104 rxq->bd, rxq->bd_dma);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300105 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
106 rxq->bd = NULL;
107err_bd:
108 return -ENOMEM;
109}
110
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300111static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
112{
113 struct iwl_rx_queue *rxq = &priv->rxq;
114 int i;
115
116 /* Fill the rx_used queue with _all_ of the Rx buffers */
117 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
118 /* In the reset function, these buffers may have been allocated
119 * to an SKB, so we need to unmap and free potential storage */
120 if (rxq->pool[i].page != NULL) {
121 dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma,
122 PAGE_SIZE << priv->hw_params.rx_page_order,
123 DMA_FROM_DEVICE);
124 __iwl_free_pages(priv, rxq->pool[i].page);
125 rxq->pool[i].page = NULL;
126 }
127 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
128 }
129}
130
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700131static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
132 struct iwl_rx_queue *rxq)
133{
134 u32 rb_size;
135 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
136 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
137
138 rb_timeout = RX_RB_TIMEOUT;
139
140 if (iwlagn_mod_params.amsdu_size_8K)
141 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145 /* Stop Rx DMA */
146 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148 /* Reset driver's Rx queue write index */
149 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151 /* Tell device where to find RBD circular buffer in DRAM */
152 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
153 (u32)(rxq->bd_dma >> 8));
154
155 /* Tell device where in DRAM to update its Rx status */
156 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
157 rxq->rb_stts_dma >> 4);
158
159 /* Enable Rx DMA
160 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
161 * the credit mechanism in 5000 HW RX FIFO
162 * Direct rx interrupts to hosts
163 * Rx buffer size 4 or 8k
164 * RB timeout 0x10
165 * 256 RBDs
166 */
167 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
168 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
169 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
170 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
171 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
172 rb_size|
173 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
174 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
175
176 /* Set interrupt coalescing timer to default (2048 usecs) */
177 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
178}
179
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300180static int iwl_trans_rx_init(struct iwl_priv *priv)
181{
182 struct iwl_rx_queue *rxq = &priv->rxq;
183 int i, err;
184 unsigned long flags;
185
186 if (!rxq->bd) {
187 err = iwl_trans_rx_alloc(priv);
188 if (err)
189 return err;
190 }
191
192 spin_lock_irqsave(&rxq->lock, flags);
193 INIT_LIST_HEAD(&rxq->rx_free);
194 INIT_LIST_HEAD(&rxq->rx_used);
195
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300196 iwl_trans_rxq_free_rx_bufs(priv);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300197
198 for (i = 0; i < RX_QUEUE_SIZE; i++)
199 rxq->queue[i] = NULL;
200
201 /* Set us so that we have processed and used all buffers, but have
202 * not restocked the Rx queue with fresh buffers */
203 rxq->read = rxq->write = 0;
204 rxq->write_actual = 0;
205 rxq->free_count = 0;
206 spin_unlock_irqrestore(&rxq->lock, flags);
207
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700208 iwlagn_rx_replenish(priv);
209
210 iwl_trans_rx_hw_init(priv, rxq);
211
212 spin_lock_irqsave(&priv->lock, flags);
213 rxq->need_update = 1;
214 iwl_rx_queue_update_write_ptr(priv, rxq);
215 spin_unlock_irqrestore(&priv->lock, flags);
216
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300217 return 0;
218}
219
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300220static void iwl_trans_rx_free(struct iwl_priv *priv)
221{
222 struct iwl_rx_queue *rxq = &priv->rxq;
223 unsigned long flags;
224
225 /*if rxq->bd is NULL, it means that nothing has been allocated,
226 * exit now */
227 if (!rxq->bd) {
228 IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
229 return;
230 }
231
232 spin_lock_irqsave(&rxq->lock, flags);
233 iwl_trans_rxq_free_rx_bufs(priv);
234 spin_unlock_irqrestore(&rxq->lock, flags);
235
236 dma_free_coherent(priv->bus.dev, sizeof(__le32) * RX_QUEUE_SIZE,
237 rxq->bd, rxq->bd_dma);
238 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
239 rxq->bd = NULL;
240
241 if (rxq->rb_stts)
242 dma_free_coherent(priv->bus.dev,
243 sizeof(struct iwl_rb_status),
244 rxq->rb_stts, rxq->rb_stts_dma);
245 else
246 IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
247 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
248 rxq->rb_stts = NULL;
249}
250
Emmanuel Grumbachc2c52e82011-07-08 08:46:11 -0700251static int iwl_trans_rx_stop(struct iwl_priv *priv)
252{
253
254 /* stop Rx DMA */
255 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
256 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
257 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
258}
259
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700260static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
261 struct iwl_dma_ptr *ptr, size_t size)
262{
263 if (WARN_ON(ptr->addr))
264 return -EINVAL;
265
266 ptr->addr = dma_alloc_coherent(priv->bus.dev, size,
267 &ptr->dma, GFP_KERNEL);
268 if (!ptr->addr)
269 return -ENOMEM;
270 ptr->size = size;
271 return 0;
272}
273
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700274static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
275 struct iwl_dma_ptr *ptr)
276{
277 if (unlikely(!ptr->addr))
278 return;
279
280 dma_free_coherent(priv->bus.dev, ptr->size, ptr->addr, ptr->dma);
281 memset(ptr, 0, sizeof(*ptr));
282}
283
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700284static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
285 int slots_num, u32 txq_id)
286{
287 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
288 int i;
289
290 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
291 return -EINVAL;
292
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700293 txq->q.n_window = slots_num;
294
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700295 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
296 GFP_KERNEL);
297 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
298 GFP_KERNEL);
299
300 if (!txq->meta || !txq->cmd)
301 goto error;
302
303 for (i = 0; i < slots_num; i++) {
304 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
305 GFP_KERNEL);
306 if (!txq->cmd[i])
307 goto error;
308 }
309
310 /* Alloc driver data array and TFD circular buffer */
311 /* Driver private data, only for Tx (not command) queues,
312 * not shared with device. */
313 if (txq_id != priv->cmd_queue) {
314 txq->txb = kzalloc(sizeof(txq->txb[0]) *
315 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
316 if (!txq->txb) {
317 IWL_ERR(priv, "kmalloc for auxiliary BD "
318 "structures failed\n");
319 goto error;
320 }
321 } else {
322 txq->txb = NULL;
323 }
324
325 /* Circular buffer of transmit frame descriptors (TFDs),
326 * shared with device */
327 txq->tfds = dma_alloc_coherent(priv->bus.dev, tfd_sz, &txq->q.dma_addr,
328 GFP_KERNEL);
329 if (!txq->tfds) {
330 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
331 goto error;
332 }
333 txq->q.id = txq_id;
334
335 return 0;
336error:
337 kfree(txq->txb);
338 txq->txb = NULL;
339 /* since txq->cmd has been zeroed,
340 * all non allocated cmd[i] will be NULL */
341 if (txq->cmd)
342 for (i = 0; i < slots_num; i++)
343 kfree(txq->cmd[i]);
344 kfree(txq->meta);
345 kfree(txq->cmd);
346 txq->meta = NULL;
347 txq->cmd = NULL;
348
349 return -ENOMEM;
350
351}
352
353static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
354 int slots_num, u32 txq_id)
355{
356 int ret;
357
358 txq->need_update = 0;
359 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
360
361 /*
362 * For the default queues 0-3, set up the swq_id
363 * already -- all others need to get one later
364 * (if they need one at all).
365 */
366 if (txq_id < 4)
367 iwl_set_swq_id(txq, txq_id, txq_id);
368
369 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
370 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
371 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
372
373 /* Initialize queue's high/low-water marks, and head/tail indexes */
374 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
375 txq_id);
376 if (ret)
377 return ret;
378
379 /*
380 * Tell nic where to find circular buffer of Tx Frame Descriptors for
381 * given Tx queue, and enable the DMA channel used for that queue.
382 * Circular buffer (TFD queue in DRAM) physical base address */
383 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
384 txq->q.dma_addr >> 8);
385
386 return 0;
387}
388
389/**
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700390 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
391 */
392static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
393{
394 struct iwl_tx_queue *txq = &priv->txq[txq_id];
395 struct iwl_queue *q = &txq->q;
396
397 if (!q->n_bd)
398 return;
399
400 while (q->write_ptr != q->read_ptr) {
401 /* The read_ptr needs to bound by q->n_window */
402 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
403 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
404 }
405}
406
407/**
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700408 * iwl_tx_queue_free - Deallocate DMA queue.
409 * @txq: Transmit queue to deallocate.
410 *
411 * Empty queue by removing and destroying all BD's.
412 * Free all buffers.
413 * 0-fill, but do not free "txq" descriptor structure.
414 */
415static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
416{
417 struct iwl_tx_queue *txq = &priv->txq[txq_id];
418 struct device *dev = priv->bus.dev;
419 int i;
420 if (WARN_ON(!txq))
421 return;
422
423 iwl_tx_queue_unmap(priv, txq_id);
424
425 /* De-alloc array of command/tx buffers */
426 for (i = 0; i < txq->q.n_window; i++)
427 kfree(txq->cmd[i]);
428
429 /* De-alloc circular buffer of TFDs */
430 if (txq->q.n_bd) {
431 dma_free_coherent(dev, priv->hw_params.tfd_size *
432 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
433 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
434 }
435
436 /* De-alloc array of per-TFD driver data */
437 kfree(txq->txb);
438 txq->txb = NULL;
439
440 /* deallocate arrays */
441 kfree(txq->cmd);
442 kfree(txq->meta);
443 txq->cmd = NULL;
444 txq->meta = NULL;
445
446 /* 0-fill queue descriptor structure */
447 memset(txq, 0, sizeof(*txq));
448}
449
450/**
451 * iwl_trans_tx_free - Free TXQ Context
452 *
453 * Destroy all TX DMA queues and structures
454 */
455static void iwl_trans_tx_free(struct iwl_priv *priv)
456{
457 int txq_id;
458
459 /* Tx queues */
460 if (priv->txq) {
461 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
462 iwl_tx_queue_free(priv, txq_id);
463 }
464
465 kfree(priv->txq);
466 priv->txq = NULL;
467
468 iwlagn_free_dma_ptr(priv, &priv->kw);
469
470 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
471}
472
473/**
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700474 * iwl_trans_tx_alloc - allocate TX context
475 * Allocate all Tx DMA structures and initialize them
476 *
477 * @param priv
478 * @return error code
479 */
480static int iwl_trans_tx_alloc(struct iwl_priv *priv)
481{
482 int ret;
483 int txq_id, slots_num;
484
485 /*It is not allowed to alloc twice, so warn when this happens.
486 * We cannot rely on the previous allocation, so free and fail */
487 if (WARN_ON(priv->txq)) {
488 ret = -EINVAL;
489 goto error;
490 }
491
492 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
493 priv->hw_params.scd_bc_tbls_size);
494 if (ret) {
495 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
496 goto error;
497 }
498
499 /* Alloc keep-warm buffer */
500 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
501 if (ret) {
502 IWL_ERR(priv, "Keep Warm allocation failed\n");
503 goto error;
504 }
505
506 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
507 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
508 if (!priv->txq) {
509 IWL_ERR(priv, "Not enough memory for txq\n");
510 ret = ENOMEM;
511 goto error;
512 }
513
514 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
515 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
516 slots_num = (txq_id == priv->cmd_queue) ?
517 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
518 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
519 txq_id);
520 if (ret) {
521 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
522 goto error;
523 }
524 }
525
526 return 0;
527
528error:
Emmanuel Grumbachbdfbf092011-07-08 08:46:16 -0700529 trans_tx_free(priv);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700530
531 return ret;
532}
533static int iwl_trans_tx_init(struct iwl_priv *priv)
534{
535 int ret;
536 int txq_id, slots_num;
537 unsigned long flags;
538 bool alloc = false;
539
540 if (!priv->txq) {
541 ret = iwl_trans_tx_alloc(priv);
542 if (ret)
543 goto error;
544 alloc = true;
545 }
546
547 spin_lock_irqsave(&priv->lock, flags);
548
549 /* Turn off all Tx DMA fifos */
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300550 iwl_write_prph(priv, SCD_TXFACT, 0);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700551
552 /* Tell NIC where to find the "keep warm" buffer */
553 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
554
555 spin_unlock_irqrestore(&priv->lock, flags);
556
557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
558 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
559 slots_num = (txq_id == priv->cmd_queue) ?
560 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
561 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
562 txq_id);
563 if (ret) {
564 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
565 goto error;
566 }
567 }
568
569 return 0;
570error:
571 /*Upon error, free only if we allocated something */
572 if (alloc)
Emmanuel Grumbachbdfbf092011-07-08 08:46:16 -0700573 trans_tx_free(priv);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700574 return ret;
575}
576
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300577/*
578 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
579 * must be called under priv->lock and mac access
580 */
581static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
582{
583 iwl_write_prph(priv, SCD_TXFACT, mask);
584}
585
586#define IWL_AC_UNSET -1
587
588struct queue_to_fifo_ac {
589 s8 fifo, ac;
590};
591
592static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
593 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
594 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
595 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
596 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
597 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
598 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
599 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
600 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
601 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
602 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
603};
604
605static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
606 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
607 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
608 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
609 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
610 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
611 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
612 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
613 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
614 { IWL_TX_FIFO_BE_IPAN, 2, },
615 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
616};
617static void iwl_trans_tx_start(struct iwl_priv *priv)
618{
619 const struct queue_to_fifo_ac *queue_to_fifo;
620 struct iwl_rxon_context *ctx;
621 u32 a;
622 unsigned long flags;
623 int i, chan;
624 u32 reg_val;
625
626 spin_lock_irqsave(&priv->lock, flags);
627
628 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
629 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
630 /* reset conext data memory */
631 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
632 a += 4)
633 iwl_write_targ_mem(priv, a, 0);
634 /* reset tx status memory */
635 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
636 a += 4)
637 iwl_write_targ_mem(priv, a, 0);
638 for (; a < priv->scd_base_addr +
639 SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
640 iwl_write_targ_mem(priv, a, 0);
641
642 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
643 priv->scd_bc_tbls.dma >> 10);
644
645 /* Enable DMA channel */
646 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
647 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
648 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
649 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
650
651 /* Update FH chicken bits */
652 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
653 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
654 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
655
656 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
657 SCD_QUEUECHAIN_SEL_ALL(priv));
658 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
659
660 /* initiate the queues */
661 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
662 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
663 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
664 iwl_write_targ_mem(priv, priv->scd_base_addr +
665 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
666 iwl_write_targ_mem(priv, priv->scd_base_addr +
667 SCD_CONTEXT_QUEUE_OFFSET(i) +
668 sizeof(u32),
669 ((SCD_WIN_SIZE <<
670 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
671 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
672 ((SCD_FRAME_LIMIT <<
673 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
674 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
675 }
676
677 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
678 IWL_MASK(0, priv->hw_params.max_txq_num));
679
680 /* Activate all Tx DMA/FIFO channels */
681 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
682
683 /* map queues to FIFOs */
684 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
685 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
686 else
687 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
688
689 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
690
691 /* make sure all queue are not stopped */
692 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
693 for (i = 0; i < 4; i++)
694 atomic_set(&priv->queue_stop_count[i], 0);
695 for_each_context(priv, ctx)
696 ctx->last_tx_rejected = false;
697
698 /* reset to 0 to enable all the queue first */
699 priv->txq_ctx_active_msk = 0;
700
701 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
702 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
703
704 for (i = 0; i < 10; i++) {
705 int fifo = queue_to_fifo[i].fifo;
706 int ac = queue_to_fifo[i].ac;
707
708 iwl_txq_ctx_activate(priv, i);
709
710 if (fifo == IWL_TX_FIFO_UNUSED)
711 continue;
712
713 if (ac != IWL_AC_UNSET)
714 iwl_set_swq_id(&priv->txq[i], ac, i);
715 iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
716 }
717
718 spin_unlock_irqrestore(&priv->lock, flags);
719
720 /* Enable L1-Active */
721 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
722 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
723}
724
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700725/**
726 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
727 */
728static int iwl_trans_tx_stop(struct iwl_priv *priv)
729{
730 int ch, txq_id;
731 unsigned long flags;
732
733 /* Turn off all Tx DMA fifos */
734 spin_lock_irqsave(&priv->lock, flags);
735
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300736 iwl_trans_txq_set_sched(priv, 0);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700737
738 /* Stop each Tx DMA channel, and wait for it to be idle */
Wey-Yi Guy02f6f652011-07-08 08:46:15 -0700739 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700740 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
741 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
742 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
743 1000))
744 IWL_ERR(priv, "Failing on timeout while stopping"
745 " DMA channel %d [0x%08x]", ch,
746 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
747 }
748 spin_unlock_irqrestore(&priv->lock, flags);
749
750 if (!priv->txq) {
751 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
752 return 0;
753 }
754
755 /* Unmap DMA from host system and free skb's */
756 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
757 iwl_tx_queue_unmap(priv, txq_id);
758
759 return 0;
760}
761
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300762static void iwl_trans_stop_device(struct iwl_priv *priv)
763{
764 unsigned long flags;
765
766 /* stop and reset the on-board processor */
767 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
768
769 /* tell the device to stop sending interrupts */
770 spin_lock_irqsave(&priv->lock, flags);
771 iwl_disable_interrupts(priv);
772 spin_unlock_irqrestore(&priv->lock, flags);
773 trans_sync_irq(priv);
774
775 /* device going down, Stop using ICT table */
776 iwl_disable_ict(priv);
777
778 /*
779 * If a HW restart happens during firmware loading,
780 * then the firmware loading might call this function
781 * and later it might be called again due to the
782 * restart. So don't process again if the device is
783 * already dead.
784 */
785 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
786 iwl_trans_tx_stop(priv);
787 iwl_trans_rx_stop(priv);
788
789 /* Power-down device's busmaster DMA clocks */
790 iwl_write_prph(priv, APMG_CLK_DIS_REG,
791 APMG_CLK_VAL_DMA_CLK_RQT);
792 udelay(5);
793 }
794
795 /* Make sure (redundant) we've released our request to stay awake */
796 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
797
798 /* Stop the device, and put it in low power state */
799 iwl_apm_stop(priv);
800}
801
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +0300802static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
803 int txq_id)
804{
805 struct iwl_tx_queue *txq = &priv->txq[txq_id];
806 struct iwl_queue *q = &txq->q;
807 struct iwl_device_cmd *dev_cmd;
808
809 if (unlikely(iwl_queue_space(q) < q->high_mark))
810 return NULL;
811
812 /*
813 * Set up the Tx-command (not MAC!) header.
814 * Store the chosen Tx queue and TFD index within the sequence field;
815 * after Tx, uCode's Tx response will return this value so driver can
816 * locate the frame within the tx queue and do post-tx processing.
817 */
818 dev_cmd = txq->cmd[q->write_ptr];
819 memset(dev_cmd, 0, sizeof(*dev_cmd));
820 dev_cmd->hdr.cmd = REPLY_TX;
821 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
822 INDEX_TO_SEQ(q->write_ptr)));
823 return &dev_cmd->cmd.tx;
824}
825
826static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
827 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
828 struct iwl_rxon_context *ctx)
829{
830 struct iwl_tx_queue *txq = &priv->txq[txq_id];
831 struct iwl_queue *q = &txq->q;
832 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
833 struct iwl_cmd_meta *out_meta;
834
835 dma_addr_t phys_addr = 0;
836 dma_addr_t txcmd_phys;
837 dma_addr_t scratch_phys;
838 u16 len, firstlen, secondlen;
839 u8 wait_write_ptr = 0;
840 u8 hdr_len = ieee80211_hdrlen(fc);
841
842 /* Set up driver data for this TFD */
843 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
844 txq->txb[q->write_ptr].skb = skb;
845 txq->txb[q->write_ptr].ctx = ctx;
846
847 /* Set up first empty entry in queue's array of Tx/cmd buffers */
848 out_meta = &txq->meta[q->write_ptr];
849
850 /*
851 * Use the first empty entry in this queue's command buffer array
852 * to contain the Tx command and MAC header concatenated together
853 * (payload data will be in another buffer).
854 * Size of this varies, due to varying MAC header length.
855 * If end is not dword aligned, we'll have 2 extra bytes at the end
856 * of the MAC header (device reads on dword boundaries).
857 * We'll tell device about this padding later.
858 */
859 len = sizeof(struct iwl_tx_cmd) +
860 sizeof(struct iwl_cmd_header) + hdr_len;
861 firstlen = (len + 3) & ~3;
862
863 /* Tell NIC about any 2-byte padding after MAC header */
864 if (firstlen != len)
865 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
866
867 /* Physical address of this Tx command's header (not MAC header!),
868 * within command buffer array. */
869 txcmd_phys = dma_map_single(priv->bus.dev,
870 &dev_cmd->hdr, firstlen,
871 DMA_BIDIRECTIONAL);
872 if (unlikely(dma_mapping_error(priv->bus.dev, txcmd_phys)))
873 return -1;
874 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
875 dma_unmap_len_set(out_meta, len, firstlen);
876
877 if (!ieee80211_has_morefrags(fc)) {
878 txq->need_update = 1;
879 } else {
880 wait_write_ptr = 1;
881 txq->need_update = 0;
882 }
883
884 /* Set up TFD's 2nd entry to point directly to remainder of skb,
885 * if any (802.11 null frames have no payload). */
886 secondlen = skb->len - hdr_len;
887 if (secondlen > 0) {
888 phys_addr = dma_map_single(priv->bus.dev, skb->data + hdr_len,
889 secondlen, DMA_TO_DEVICE);
890 if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) {
891 dma_unmap_single(priv->bus.dev,
892 dma_unmap_addr(out_meta, mapping),
893 dma_unmap_len(out_meta, len),
894 DMA_BIDIRECTIONAL);
895 return -1;
896 }
897 }
898
899 /* Attach buffers to TFD */
900 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
901 if (secondlen > 0)
902 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
903 secondlen, 0);
904
905 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
906 offsetof(struct iwl_tx_cmd, scratch);
907
908 /* take back ownership of DMA buffer to enable update */
909 dma_sync_single_for_cpu(priv->bus.dev, txcmd_phys, firstlen,
910 DMA_BIDIRECTIONAL);
911 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
912 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
913
914 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
915 le16_to_cpu(dev_cmd->hdr.sequence));
916 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
917 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
918 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
919
920 /* Set up entry for this TFD in Tx byte-count array */
921 if (ampdu)
922 iwlagn_txq_update_byte_cnt_tbl(priv, txq,
923 le16_to_cpu(tx_cmd->len));
924
925 dma_sync_single_for_device(priv->bus.dev, txcmd_phys, firstlen,
926 DMA_BIDIRECTIONAL);
927
928 trace_iwlwifi_dev_tx(priv,
929 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
930 sizeof(struct iwl_tfd),
931 &dev_cmd->hdr, firstlen,
932 skb->data + hdr_len, secondlen);
933
934 /* Tell device the write index *just past* this latest filled TFD */
935 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
936 iwl_txq_update_write_ptr(priv, txq);
937
938 /*
939 * At this point the frame is "transmitted" successfully
940 * and we will get a TX status notification eventually,
941 * regardless of the value of ret. "ret" only indicates
942 * whether or not we should update the write pointer.
943 */
944 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
945 if (wait_write_ptr) {
946 txq->need_update = 1;
947 iwl_txq_update_write_ptr(priv, txq);
948 } else {
949 iwl_stop_queue(priv, txq);
950 }
951 }
952 return 0;
953}
954
Emmanuel Grumbacha27367d2011-07-04 09:06:44 +0300955static void iwl_trans_sync_irq(struct iwl_priv *priv)
956{
957 /* wait to make sure we flush pending tasklet*/
958 synchronize_irq(priv->bus.irq);
959 tasklet_kill(&priv->irq_tasklet);
960}
961
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +0300962static void iwl_trans_free(struct iwl_priv *priv)
963{
964 free_irq(priv->bus.irq, priv);
965 iwl_free_isr_ict(priv);
966}
967
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300968static const struct iwl_trans_ops trans_ops = {
969 .rx_init = iwl_trans_rx_init,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300970 .rx_free = iwl_trans_rx_free,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700971
972 .tx_init = iwl_trans_tx_init,
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300973 .tx_start = iwl_trans_tx_start,
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700974 .tx_free = iwl_trans_tx_free,
Emmanuel Grumbache419d622011-07-08 08:46:14 -0700975
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +0300976 .stop_device = iwl_trans_stop_device,
977
Emmanuel Grumbache419d622011-07-08 08:46:14 -0700978 .send_cmd = iwl_send_cmd,
979 .send_cmd_pdu = iwl_send_cmd_pdu,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +0300980
981 .get_tx_cmd = iwl_trans_get_tx_cmd,
982 .tx = iwl_trans_tx,
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +0300983
Emmanuel Grumbacha27367d2011-07-04 09:06:44 +0300984 .sync_irq = iwl_trans_sync_irq,
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +0300985 .free = iwl_trans_free,
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300986};
987
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +0300988int iwl_trans_register(struct iwl_priv *priv)
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300989{
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +0300990 int err;
991
992 priv->trans.ops = &trans_ops;
993
994 iwl_alloc_isr_ict(priv);
995
996 err = request_irq(priv->bus.irq, iwl_isr_ict, IRQF_SHARED,
997 DRV_NAME, priv);
998 if (err) {
999 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus.irq);
1000 iwl_free_isr_ict(priv);
1001 return err;
1002 }
1003
1004 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
1005 iwl_irq_tasklet, (unsigned long)priv);
1006
Emmanuel Grumbachab697a92011-07-11 07:35:34 -07001007 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
1008
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001009 return 0;
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +03001010}