blob: 137dba95b1ad765753425ec3afafb69949d2d16e [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Wey-Yi Guy901069c2011-04-05 09:42:00 -07003 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Tomas Winklerfd4abac2008-05-15 13:54:07 +080030#include <linux/etherdevice.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040031#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080033#include <net/mac80211.h>
34#include "iwl-eeprom.h"
Johannes Berg214d14d2011-05-04 07:50:44 -070035#include "iwl-agn.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080036#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41
Tomas Winklerfd4abac2008-05-15 13:54:07 +080042/**
43 * iwl_txq_update_write_ptr - Send new write index to hardware
44 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080045void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
Tomas Winklerfd4abac2008-05-15 13:54:07 +080046{
47 u32 reg = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080048 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080051 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080052
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080053 if (priv->cfg->base_params->shadow_reg_enable) {
54 /* shadow register enabled */
Tomas Winklerfd4abac2008-05-15 13:54:07 +080055 iwl_write32(priv, HBUS_TARG_WRPTR,
56 txq->q.write_ptr | (txq_id << 8));
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080057 } else {
58 /* if we're trying to save power */
59 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
60 /* wake up nic if it's powered down ...
61 * uCode will wake up, and interrupt us again, so next
62 * time we'll skip this part. */
63 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
Tomas Winklerfd4abac2008-05-15 13:54:07 +080064
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080065 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
66 IWL_DEBUG_INFO(priv,
67 "Tx queue %d requesting wakeup,"
68 " GP1 = 0x%x\n", txq_id, reg);
69 iwl_set_bit(priv, CSR_GP_CNTRL,
70 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
71 return;
72 }
73
74 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
75 txq->q.write_ptr | (txq_id << 8));
76
77 /*
78 * else not in power-save mode,
79 * uCode will never sleep when we're
80 * trying to tx (during RFKILL, we're not trying to tx).
81 */
82 } else
83 iwl_write32(priv, HBUS_TARG_WRPTR,
84 txq->q.write_ptr | (txq_id << 8));
85 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +080086 txq->need_update = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080087}
Tomas Winklerfd4abac2008-05-15 13:54:07 +080088
Johannes Berg214d14d2011-05-04 07:50:44 -070089static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
90{
91 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
92
93 dma_addr_t addr = get_unaligned_le32(&tb->lo);
94 if (sizeof(dma_addr_t) > sizeof(u32))
95 addr |=
96 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
97
98 return addr;
99}
100
101static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
102{
103 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
104
105 return le16_to_cpu(tb->hi_n_len) >> 4;
106}
107
108static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
109 dma_addr_t addr, u16 len)
110{
111 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
112 u16 hi_n_len = len << 4;
113
114 put_unaligned_le32(addr, &tb->lo);
115 if (sizeof(dma_addr_t) > sizeof(u32))
116 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
117
118 tb->hi_n_len = cpu_to_le16(hi_n_len);
119
120 tfd->num_tbs = idx + 1;
121}
122
123static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
124{
125 return tfd->num_tbs & 0x1f;
126}
127
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
John W. Linville5ee0a582011-06-27 15:38:05 -0400129 struct iwl_tfd *tfd, int dma_dir)
Johannes Berg214d14d2011-05-04 07:50:44 -0700130{
Johannes Berg214d14d2011-05-04 07:50:44 -0700131 struct pci_dev *dev = priv->pci_dev;
Johannes Berg214d14d2011-05-04 07:50:44 -0700132 int i;
133 int num_tbs;
134
Johannes Berg214d14d2011-05-04 07:50:44 -0700135 /* Sanity check on number of chunks */
136 num_tbs = iwl_tfd_get_num_tbs(tfd);
137
138 if (num_tbs >= IWL_NUM_OF_TBS) {
139 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
140 /* @todo issue fatal error, it is quite serious situation */
141 return;
142 }
143
144 /* Unmap tx_cmd */
145 if (num_tbs)
146 pci_unmap_single(dev,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700147 dma_unmap_addr(meta, mapping),
148 dma_unmap_len(meta, len),
Johannes Berg214d14d2011-05-04 07:50:44 -0700149 PCI_DMA_BIDIRECTIONAL);
150
151 /* Unmap chunks, if any. */
152 for (i = 1; i < num_tbs; i++)
153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
Johannes Berge8154072011-06-27 07:54:49 -0700154 iwl_tfd_tb_get_len(tfd, i), dma_dir);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700155}
156
157/**
158 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
159 * @priv - driver private data
160 * @txq - tx queue
161 *
162 * Does NOT advance any TFD circular buffer read/write indexes
163 * Does NOT free the TFD itself (which is within circular buffer)
164 */
165void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
166{
167 struct iwl_tfd *tfd_tmp = txq->tfds;
168 int index = txq->q.read_ptr;
169
Johannes Berge8154072011-06-27 07:54:49 -0700170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
John W. Linville5ee0a582011-06-27 15:38:05 -0400171 PCI_DMA_TODEVICE);
Johannes Berg214d14d2011-05-04 07:50:44 -0700172
173 /* free SKB */
174 if (txq->txb) {
175 struct sk_buff *skb;
176
177 skb = txq->txb[txq->q.read_ptr].skb;
178
179 /* can be called from irqs-disabled context */
180 if (skb) {
181 dev_kfree_skb_any(skb);
182 txq->txb[txq->q.read_ptr].skb = NULL;
183 }
184 }
185}
186
187int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
188 struct iwl_tx_queue *txq,
189 dma_addr_t addr, u16 len,
Johannes Berg4c42db02011-05-04 07:50:48 -0700190 u8 reset)
Johannes Berg214d14d2011-05-04 07:50:44 -0700191{
192 struct iwl_queue *q;
193 struct iwl_tfd *tfd, *tfd_tmp;
194 u32 num_tbs;
195
196 q = &txq->q;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700197 tfd_tmp = txq->tfds;
Johannes Berg214d14d2011-05-04 07:50:44 -0700198 tfd = &tfd_tmp[q->write_ptr];
199
200 if (reset)
201 memset(tfd, 0, sizeof(*tfd));
202
203 num_tbs = iwl_tfd_get_num_tbs(tfd);
204
205 /* Each TFD can point to a maximum 20 Tx buffers */
206 if (num_tbs >= IWL_NUM_OF_TBS) {
207 IWL_ERR(priv, "Error can not send more than %d chunks\n",
208 IWL_NUM_OF_TBS);
209 return -EINVAL;
210 }
211
212 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
213 return -EINVAL;
214
215 if (unlikely(addr & ~IWL_TX_DMA_MASK))
216 IWL_ERR(priv, "Unaligned address = %llx\n",
217 (unsigned long long)addr);
218
219 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
220
221 return 0;
222}
223
224/*
225 * Tell nic where to find circular buffer of Tx Frame Descriptors for
226 * given Tx queue, and enable the DMA channel used for that queue.
227 *
228 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
229 * channels supported in hardware.
230 */
231static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
232{
233 int txq_id = txq->q.id;
234
235 /* Circular buffer (TFD queue in DRAM) physical base address */
236 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
237 txq->q.dma_addr >> 8);
238
239 return 0;
240}
241
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800242/**
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100243 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
244 */
245void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
246{
247 struct iwl_tx_queue *txq = &priv->txq[txq_id];
248 struct iwl_queue *q = &txq->q;
249
250 if (q->n_bd == 0)
251 return;
252
253 while (q->write_ptr != q->read_ptr) {
Johannes Berg214d14d2011-05-04 07:50:44 -0700254 iwlagn_txq_free_tfd(priv, txq);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100255 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
256 }
257}
258
259/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800260 * iwl_tx_queue_free - Deallocate DMA queue.
261 * @txq: Transmit queue to deallocate.
262 *
263 * Empty queue by removing and destroying all BD's.
264 * Free all buffers.
265 * 0-fill, but do not free "txq" descriptor structure.
266 */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800267void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800268{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800269 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800270 struct device *dev = &priv->pci_dev->dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700271 int i;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800272
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100273 iwl_tx_queue_unmap(priv, txq_id);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800274
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800275 /* De-alloc array of command/tx buffers */
Tomas Winkler961ba602008-10-14 12:32:44 -0700276 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800277 kfree(txq->cmd[i]);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800278
279 /* De-alloc circular buffer of TFDs */
280 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800281 dma_free_coherent(dev, priv->hw_params.tfd_size *
282 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800283
284 /* De-alloc array of per-TFD driver data */
285 kfree(txq->txb);
286 txq->txb = NULL;
287
Johannes Bergc2acea82009-07-24 11:13:05 -0700288 /* deallocate arrays */
289 kfree(txq->cmd);
290 kfree(txq->meta);
291 txq->cmd = NULL;
292 txq->meta = NULL;
293
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800294 /* 0-fill queue descriptor structure */
295 memset(txq, 0, sizeof(*txq));
296}
Tomas Winkler961ba602008-10-14 12:32:44 -0700297
298/**
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100299 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
300 */
301void iwl_cmd_queue_unmap(struct iwl_priv *priv)
302{
303 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
304 struct iwl_queue *q = &txq->q;
305 int i;
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100306
307 if (q->n_bd == 0)
308 return;
309
310 while (q->read_ptr != q->write_ptr) {
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700311 i = get_cmd_index(q, q->read_ptr);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100312
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200313 if (txq->meta[i].flags & CMD_MAPPED) {
Johannes Berge8154072011-06-27 07:54:49 -0700314 iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
John W. Linville5ee0a582011-06-27 15:38:05 -0400315 PCI_DMA_BIDIRECTIONAL);
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200316 txq->meta[i].flags = 0;
317 }
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100318
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200319 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100320 }
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100321}
322
323/**
Tomas Winkler961ba602008-10-14 12:32:44 -0700324 * iwl_cmd_queue_free - Deallocate DMA queue.
325 * @txq: Transmit queue to deallocate.
326 *
327 * Empty queue by removing and destroying all BD's.
328 * Free all buffers.
329 * 0-fill, but do not free "txq" descriptor structure.
330 */
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700331void iwl_cmd_queue_free(struct iwl_priv *priv)
Tomas Winkler961ba602008-10-14 12:32:44 -0700332{
Johannes Berg13bb9482010-08-23 10:46:33 +0200333 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800334 struct device *dev = &priv->pci_dev->dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700335 int i;
Tomas Winkler961ba602008-10-14 12:32:44 -0700336
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100337 iwl_cmd_queue_unmap(priv);
Zhu Yidd487442010-03-22 02:28:41 -0700338
Tomas Winkler961ba602008-10-14 12:32:44 -0700339 /* De-alloc array of command/tx buffers */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700340 for (i = 0; i < TFD_CMD_SLOTS; i++)
Tomas Winkler961ba602008-10-14 12:32:44 -0700341 kfree(txq->cmd[i]);
342
343 /* De-alloc circular buffer of TFDs */
344 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800345 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
346 txq->tfds, txq->q.dma_addr);
Tomas Winkler961ba602008-10-14 12:32:44 -0700347
Reinette Chatre28142982009-09-25 14:24:22 -0700348 /* deallocate arrays */
349 kfree(txq->cmd);
350 kfree(txq->meta);
351 txq->cmd = NULL;
352 txq->meta = NULL;
353
Tomas Winkler961ba602008-10-14 12:32:44 -0700354 /* 0-fill queue descriptor structure */
355 memset(txq, 0, sizeof(*txq));
356}
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700357
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800358/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
359 * DMA services
360 *
361 * Theory of operation
362 *
363 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
364 * of buffer descriptors, each of which points to one or more data buffers for
365 * the device to read from or fill. Driver and device exchange status of each
366 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
367 * entries in each circular buffer, to protect against confusing empty and full
368 * queue states.
369 *
370 * The device reads or writes the data in the queues via the device's several
371 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
372 *
373 * For Tx queue, there are low mark and high mark limits. If, after queuing
374 * the packet for Tx, free space become < low mark, Tx queue stopped. When
375 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
376 * Tx queue resumed.
377 *
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800378 ***************************************************/
379
380int iwl_queue_space(const struct iwl_queue *q)
381{
382 int s = q->read_ptr - q->write_ptr;
383
384 if (q->read_ptr > q->write_ptr)
385 s -= q->n_bd;
386
387 if (s <= 0)
388 s += q->n_window;
389 /* keep some reserve to not confuse empty and full situations */
390 s -= 2;
391 if (s < 0)
392 s = 0;
393 return s;
394}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800395
396
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800397/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800398 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
399 */
Tomas Winkler443cfd42008-05-15 13:53:57 +0800400static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800401 int count, int slots_num, u32 id)
402{
403 q->n_bd = count;
404 q->n_window = slots_num;
405 q->id = id;
406
407 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
408 * and iwl_queue_dec_wrap are broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700409 if (WARN_ON(!is_power_of_2(count)))
410 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800411
412 /* slots_num must be power-of-two size, otherwise
413 * get_cmd_index is broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700414 if (WARN_ON(!is_power_of_2(slots_num)))
415 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800416
417 q->low_mark = q->n_window / 4;
418 if (q->low_mark < 4)
419 q->low_mark = 4;
420
421 q->high_mark = q->n_window / 8;
422 if (q->high_mark < 2)
423 q->high_mark = 2;
424
425 q->write_ptr = q->read_ptr = 0;
426
427 return 0;
428}
429
430/**
431 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
432 */
433static int iwl_tx_queue_alloc(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800434 struct iwl_tx_queue *txq, u32 id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800435{
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800436 struct device *dev = &priv->pci_dev->dev;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800437 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800438
439 /* Driver private data, only for Tx (not command) queues,
440 * not shared with device. */
Johannes Berg13bb9482010-08-23 10:46:33 +0200441 if (id != priv->cmd_queue) {
Johannes Berg519c7c42010-05-17 02:37:33 -0700442 txq->txb = kzalloc(sizeof(txq->txb[0]) *
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800443 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
444 if (!txq->txb) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800445 IWL_ERR(priv, "kmalloc for auxiliary BD "
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800446 "structures failed\n");
447 goto error;
448 }
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800449 } else {
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800450 txq->txb = NULL;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800451 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800452
453 /* Circular buffer of transmit frame descriptors (TFDs),
454 * shared with device */
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800455 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
456 GFP_KERNEL);
Tomas Winkler499b1882008-10-14 12:32:48 -0700457 if (!txq->tfds) {
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800458 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800459 goto error;
460 }
461 txq->q.id = id;
462
463 return 0;
464
465 error:
466 kfree(txq->txb);
467 txq->txb = NULL;
468
469 return -ENOMEM;
470}
471
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800472/**
473 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
474 */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800475int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
476 int slots_num, u32 txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800477{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800478 int i, len;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800479 int ret;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800480
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700481 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
Johannes Bergc2acea82009-07-24 11:13:05 -0700482 GFP_KERNEL);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700483 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
Johannes Bergc2acea82009-07-24 11:13:05 -0700484 GFP_KERNEL);
485
486 if (!txq->meta || !txq->cmd)
487 goto out_free_arrays;
488
489 len = sizeof(struct iwl_device_cmd);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700490 for (i = 0; i < slots_num; i++) {
John W. Linville49898852008-09-02 15:07:18 -0400491 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800492 if (!txq->cmd[i])
Tomas Winkler73b7d742008-09-03 11:18:48 +0800493 goto err;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800494 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800495
496 /* Alloc driver data array and TFD circular buffer */
Tomas Winkler73b7d742008-09-03 11:18:48 +0800497 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
498 if (ret)
499 goto err;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800500
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800501 txq->need_update = 0;
502
Johannes Berg1a716552009-11-06 14:52:51 -0800503 /*
Johannes Bergea9b3072010-11-10 18:25:45 -0800504 * For the default queues 0-3, set up the swq_id
505 * already -- all others need to get one later
506 * (if they need one at all).
Johannes Berg1a716552009-11-06 14:52:51 -0800507 */
Johannes Bergea9b3072010-11-10 18:25:45 -0800508 if (txq_id < 4)
509 iwl_set_swq_id(txq, txq_id, txq_id);
Johannes Berg45af8192009-06-19 13:52:43 -0700510
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800511 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
512 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
513 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
514
515 /* Initialize queue's high/low-water marks, and head/tail indexes */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700516 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
517 if (ret)
518 return ret;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800519
520 /* Tell device where to find queue */
Johannes Berg214d14d2011-05-04 07:50:44 -0700521 iwlagn_tx_queue_init(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800522
523 return 0;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800524err:
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700525 for (i = 0; i < slots_num; i++)
Tomas Winkler73b7d742008-09-03 11:18:48 +0800526 kfree(txq->cmd[i]);
Johannes Bergc2acea82009-07-24 11:13:05 -0700527out_free_arrays:
528 kfree(txq->meta);
529 kfree(txq->cmd);
Tomas Winkler73b7d742008-09-03 11:18:48 +0800530
Tomas Winkler73b7d742008-09-03 11:18:48 +0800531 return -ENOMEM;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800532}
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800533
Zhu Yide0f60ea2010-03-23 00:45:03 -0700534void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
535 int slots_num, u32 txq_id)
536{
Emmanuel Grumbach77e569e2011-06-21 11:03:01 -0700537 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
Zhu Yide0f60ea2010-03-23 00:45:03 -0700538
539 txq->need_update = 0;
540
541 /* Initialize queue's high/low-water marks, and head/tail indexes */
542 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
543
544 /* Tell device where to find queue */
Johannes Berg214d14d2011-05-04 07:50:44 -0700545 iwlagn_tx_queue_init(priv, txq);
Zhu Yide0f60ea2010-03-23 00:45:03 -0700546}
Zhu Yide0f60ea2010-03-23 00:45:03 -0700547
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800548/*************** HOST COMMAND QUEUE FUNCTIONS *****/
549
550/**
551 * iwl_enqueue_hcmd - enqueue a uCode command
552 * @priv: device private data point
553 * @cmd: a point to the ucode command structure
554 *
555 * The function returns < 0 values to indicate the operation is
556 * failed. On success, it turns the index (> 0) of command in the
557 * command queue.
558 */
559int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
560{
Johannes Berg13bb9482010-08-23 10:46:33 +0200561 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800562 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -0700563 struct iwl_device_cmd *out_cmd;
564 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +0800565 dma_addr_t phys_addr;
566 unsigned long flags;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800567 u32 idx;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700568 u16 copy_size, cmd_size;
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700569 bool is_ct_kill = false;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700570 bool had_nocopy = false;
571 int i;
572 u8 *cmd_dest;
573#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
574 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
575 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
576 int trace_idx;
577#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800578
Wey-Yi Guy3083d032011-05-06 17:06:44 -0700579 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
580 IWL_WARN(priv, "fw recovery, no hcmd send\n");
581 return -EIO;
582 }
583
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700584 copy_size = sizeof(out_cmd->hdr);
585 cmd_size = sizeof(out_cmd->hdr);
586
587 /* need one for the header if the first is NOCOPY */
588 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
589
590 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
591 if (!cmd->len[i])
592 continue;
593 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
594 had_nocopy = true;
595 } else {
596 /* NOCOPY must not be followed by normal! */
597 if (WARN_ON(had_nocopy))
598 return -EINVAL;
599 copy_size += cmd->len[i];
600 }
601 cmd_size += cmd->len[i];
602 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800603
Johannes Berg3e41ace2011-04-18 09:12:37 -0700604 /*
605 * If any of the command structures end up being larger than
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700606 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
607 * allocated into separate TFDs, then we will need to
608 * increase the size of the buffers.
Johannes Berg3e41ace2011-04-18 09:12:37 -0700609 */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700610 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
Johannes Berg3e41ace2011-04-18 09:12:37 -0700611 return -EINVAL;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800612
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700613 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
Reinette Chatref2f21b42009-10-30 14:36:15 -0700614 IWL_WARN(priv, "Not sending command - %s KILL\n",
615 iwl_is_rfkill(priv) ? "RF" : "CT");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800616 return -EIO;
617 }
618
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200619 spin_lock_irqsave(&priv->hcmd_lock, flags);
620
Johannes Bergc2acea82009-07-24 11:13:05 -0700621 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200622 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
623
Wey-Yi Guy2d237f72009-11-20 12:05:08 -0800624 IWL_ERR(priv, "No space in command queue\n");
Wey-Yi Guyf42e7662011-04-18 09:30:09 -0700625 is_ct_kill = iwl_check_for_ct_kill(priv);
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700626 if (!is_ct_kill) {
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700627 IWL_ERR(priv, "Restarting adapter due to queue full\n");
Johannes Berge6494372011-04-05 09:41:58 -0700628 iwlagn_fw_error(priv, false);
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700629 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800630 return -ENOSPC;
631 }
632
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700633 idx = get_cmd_index(q, q->write_ptr);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800634 out_cmd = txq->cmd[idx];
Johannes Bergc2acea82009-07-24 11:13:05 -0700635 out_meta = &txq->meta[idx];
636
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200637 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
638 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
639 return -ENOSPC;
640 }
641
Daniel C Halperin8ce73f32009-07-31 14:28:06 -0700642 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -0700643 if (cmd->flags & CMD_WANT_SKB)
644 out_meta->source = cmd;
645 if (cmd->flags & CMD_ASYNC)
646 out_meta->callback = cmd->callback;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800647
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700648 /* set up the header */
649
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800650 out_cmd->hdr.cmd = cmd->id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800651 out_cmd->hdr.flags = 0;
Johannes Berg13bb9482010-08-23 10:46:33 +0200652 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700653 INDEX_TO_SEQ(q->write_ptr));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800654
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700655 /* and copy the data that needs to be copied */
656
657 cmd_dest = &out_cmd->cmd.payload[0];
658 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
659 if (!cmd->len[i])
660 continue;
661 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
662 break;
663 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
664 cmd_dest += cmd->len[i];
Esti Kummerded2ae72008-08-04 16:00:45 +0800665 }
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700666
667 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
668 "%d bytes at %d[%d]:%d\n",
669 get_cmd_string(out_cmd->hdr.cmd),
670 out_cmd->hdr.cmd,
671 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
672 q->write_ptr, idx, priv->cmd_queue);
673
Reinette Chatredf833b12009-04-21 10:55:48 -0700674 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700675 copy_size, PCI_DMA_BIDIRECTIONAL);
Johannes Berg2c46f722011-04-28 07:27:10 -0700676 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
677 idx = -ENOMEM;
678 goto out;
679 }
680
FUJITA Tomonori2e724442010-06-03 14:19:20 +0900681 dma_unmap_addr_set(out_meta, mapping, phys_addr);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700682 dma_unmap_len_set(out_meta, len, copy_size);
683
684 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
685#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
686 trace_bufs[0] = &out_cmd->hdr;
687 trace_lens[0] = copy_size;
688 trace_idx = 1;
689#endif
690
691 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
692 if (!cmd->len[i])
693 continue;
694 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
695 continue;
696 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
John W. Linville5ee0a582011-06-27 15:38:05 -0400697 cmd->len[i], PCI_DMA_BIDIRECTIONAL);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700698 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
699 iwlagn_unmap_tfd(priv, out_meta,
Johannes Berge8154072011-06-27 07:54:49 -0700700 &txq->tfds[q->write_ptr],
John W. Linville5ee0a582011-06-27 15:38:05 -0400701 PCI_DMA_BIDIRECTIONAL);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700702 idx = -ENOMEM;
703 goto out;
704 }
705
706 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
707 cmd->len[i], 0);
708#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
709 trace_bufs[trace_idx] = cmd->data[i];
710 trace_lens[trace_idx] = cmd->len[i];
711 trace_idx++;
712#endif
713 }
Reinette Chatredf833b12009-04-21 10:55:48 -0700714
Johannes Berg2c46f722011-04-28 07:27:10 -0700715 out_meta->flags = cmd->flags | CMD_MAPPED;
716
717 txq->need_update = 1;
718
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700719 /* check that tracing gets all possible blocks */
720 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
721#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
722 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
723 trace_bufs[0], trace_lens[0],
724 trace_bufs[1], trace_lens[1],
725 trace_bufs[2], trace_lens[2]);
726#endif
Reinette Chatredf833b12009-04-21 10:55:48 -0700727
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800728 /* Increment and update queue's write index */
729 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800730 iwl_txq_update_write_ptr(priv, txq);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800731
Johannes Berg2c46f722011-04-28 07:27:10 -0700732 out:
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800733 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800734 return idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800735}
736
Tomas Winkler17b88922008-05-29 16:35:12 +0800737/**
738 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
739 *
740 * When FW advances 'R' index, all entries between old and new 'R' index
741 * need to be reclaimed. As result, some free space forms. If there is
742 * enough free space (> low mark), wake the stack that feeds us.
743 */
Daniel Halperin20ba2862011-05-16 21:46:28 -0700744static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
Tomas Winkler17b88922008-05-29 16:35:12 +0800745{
746 struct iwl_tx_queue *txq = &priv->txq[txq_id];
747 struct iwl_queue *q = &txq->q;
748 int nfreed = 0;
749
Tomas Winkler499b1882008-10-14 12:32:48 -0700750 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800751 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +0800752 "is out of range [0-%d] %d %d.\n", txq_id,
Tomas Winkler499b1882008-10-14 12:32:48 -0700753 idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +0800754 return;
755 }
756
Tomas Winkler499b1882008-10-14 12:32:48 -0700757 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
758 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
759
760 if (nfreed++ > 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800761 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +0800762 q->write_ptr, q->read_ptr);
Johannes Berge6494372011-04-05 09:41:58 -0700763 iwlagn_fw_error(priv, false);
Tomas Winkler17b88922008-05-29 16:35:12 +0800764 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800765
Tomas Winkler17b88922008-05-29 16:35:12 +0800766 }
767}
768
769/**
770 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
771 * @rxb: Rx buffer to reclaim
772 *
773 * If an Rx buffer has an async callback associated with it the callback
774 * will be executed. The attached skb (if present) will only be freed
775 * if the callback returns 1
776 */
777void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
778{
Zhu Yi2f301222009-10-09 17:19:45 +0800779 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +0800780 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
781 int txq_id = SEQ_TO_QUEUE(sequence);
782 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +0800783 int cmd_index;
Johannes Bergc2acea82009-07-24 11:13:05 -0700784 struct iwl_device_cmd *cmd;
785 struct iwl_cmd_meta *meta;
Johannes Berg13bb9482010-08-23 10:46:33 +0200786 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200787 unsigned long flags;
Tomas Winkler17b88922008-05-29 16:35:12 +0800788
789 /* If a Tx command is being handled and it isn't in the actual
790 * command queue then there a command routing bug has been introduced
791 * in the queue management code. */
Johannes Berg13bb9482010-08-23 10:46:33 +0200792 if (WARN(txq_id != priv->cmd_queue,
793 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
794 txq_id, priv->cmd_queue, sequence,
795 priv->txq[priv->cmd_queue].q.read_ptr,
796 priv->txq[priv->cmd_queue].q.write_ptr)) {
Reinette Chatreec741162009-07-24 11:13:08 -0700797 iwl_print_hex_error(priv, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +0200798 return;
Winkler, Tomas01ef93232008-11-07 09:58:45 -0800799 }
Tomas Winkler17b88922008-05-29 16:35:12 +0800800
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700801 cmd_index = get_cmd_index(&txq->q, index);
Zhu Yidd487442010-03-22 02:28:41 -0700802 cmd = txq->cmd[cmd_index];
803 meta = &txq->meta[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +0800804
John W. Linville5ee0a582011-06-27 15:38:05 -0400805 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
Reinette Chatrec33de622009-10-30 14:36:10 -0700806
Tomas Winkler17b88922008-05-29 16:35:12 +0800807 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -0700808 if (meta->flags & CMD_WANT_SKB) {
Zhu Yi2f301222009-10-09 17:19:45 +0800809 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
810 rxb->page = NULL;
Stanislaw Gruszka2624e962011-04-20 16:02:58 +0200811 } else if (meta->callback)
812 meta->callback(priv, cmd, pkt);
813
814 spin_lock_irqsave(&priv->hcmd_lock, flags);
Tomas Winkler17b88922008-05-29 16:35:12 +0800815
Daniel Halperin20ba2862011-05-16 21:46:28 -0700816 iwl_hcmd_queue_reclaim(priv, txq_id, index);
Tomas Winkler17b88922008-05-29 16:35:12 +0800817
Johannes Bergc2acea82009-07-24 11:13:05 -0700818 if (!(meta->flags & CMD_ASYNC)) {
Tomas Winkler17b88922008-05-29 16:35:12 +0800819 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
Frans Pop91dd6c22010-03-24 14:19:58 -0700820 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
Reinette Chatred2dfe6d2010-02-18 22:03:04 -0800821 get_cmd_string(cmd->hdr.cmd));
Tomas Winkler17b88922008-05-29 16:35:12 +0800822 wake_up_interruptible(&priv->wait_command_queue);
823 }
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200824
825 /* Mark as unmapped */
Zhu Yidd487442010-03-22 02:28:41 -0700826 meta->flags = 0;
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200827
828 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Tomas Winkler17b88922008-05-29 16:35:12 +0800829}