blob: 0513b90da040df6353a155e0c74747ced70deae5 [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Wey-Yi Guy4e318262011-12-27 11:21:32 -08003 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
Tomas Winklerfd4abac2008-05-15 13:54:07 +080029#include <linux/etherdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Emmanuel Grumbach253a6342011-07-11 07:39:46 -070031#include <linux/sched.h>
Emmanuel Grumbach253a6342011-07-11 07:39:46 -070032
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070033#include "iwl-debug.h"
34#include "iwl-csr.h"
35#include "iwl-prph.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080036#include "iwl-io.h"
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070037#include "iwl-agn-hw.h"
Emmanuel Grumbached277c92012-02-09 16:08:15 +020038#include "iwl-op-mode.h"
Johannes Bergc17d0682011-09-15 11:46:42 -070039#include "iwl-trans-pcie-int.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080040
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070041#define IWL_TX_CRC_SIZE 4
42#define IWL_TX_DELIMITER_SIZE 4
43
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +030044/**
45 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
46 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -070047void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +030048 struct iwl_tx_queue *txq,
49 u16 byte_cnt)
50{
Emmanuel Grumbach105183b2011-08-25 23:11:02 -070051 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
Emmanuel Grumbach105183b2011-08-25 23:11:02 -070052 struct iwl_trans_pcie *trans_pcie =
53 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +030054 int write_ptr = txq->q.write_ptr;
55 int txq_id = txq->q.id;
56 u8 sec_ctl = 0;
57 u8 sta_id = 0;
58 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
59 __le16 bc_ent;
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -070060 struct iwl_tx_cmd *tx_cmd =
61 (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +030062
Emmanuel Grumbach105183b2011-08-25 23:11:02 -070063 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +030065 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
66
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -070067 sta_id = tx_cmd->sta_id;
68 sec_ctl = tx_cmd->sec_ctl;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +030069
70 switch (sec_ctl & TX_CMD_SEC_MSK) {
71 case TX_CMD_SEC_CCM:
72 len += CCMP_MIC_LEN;
73 break;
74 case TX_CMD_SEC_TKIP:
75 len += TKIP_ICV_LEN;
76 break;
77 case TX_CMD_SEC_WEP:
78 len += WEP_IV_LEN + WEP_ICV_LEN;
79 break;
80 }
81
82 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
83
84 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
85
86 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
87 scd_bc_tbl[txq_id].
88 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
89}
90
Tomas Winklerfd4abac2008-05-15 13:54:07 +080091/**
92 * iwl_txq_update_write_ptr - Send new write index to hardware
93 */
Emmanuel Grumbachfd656932011-08-25 23:11:19 -070094void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
Tomas Winklerfd4abac2008-05-15 13:54:07 +080095{
96 u32 reg = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080097 int txq_id = txq->q.id;
98
99 if (txq->need_update == 0)
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800100 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800101
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700102 if (hw_params(trans).shadow_reg_enable) {
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800103 /* shadow register enabled */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200104 iwl_write32(trans, HBUS_TARG_WRPTR,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800105 txq->q.write_ptr | (txq_id << 8));
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800106 } else {
107 /* if we're trying to save power */
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700108 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800109 /* wake up nic if it's powered down ...
110 * uCode will wake up, and interrupt us again, so next
111 * time we'll skip this part. */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200112 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800113
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800114 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700115 IWL_DEBUG_INFO(trans,
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800116 "Tx queue %d requesting wakeup,"
117 " GP1 = 0x%x\n", txq_id, reg);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200118 iwl_set_bit(trans, CSR_GP_CNTRL,
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800119 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
120 return;
121 }
122
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200123 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800124 txq->q.write_ptr | (txq_id << 8));
125
126 /*
127 * else not in power-save mode,
128 * uCode will never sleep when we're
129 * trying to tx (during RFKILL, we're not trying to tx).
130 */
131 } else
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200132 iwl_write32(trans, HBUS_TARG_WRPTR,
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800133 txq->q.write_ptr | (txq_id << 8));
134 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800135 txq->need_update = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800136}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800137
Johannes Berg214d14d2011-05-04 07:50:44 -0700138static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
139{
140 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
141
142 dma_addr_t addr = get_unaligned_le32(&tb->lo);
143 if (sizeof(dma_addr_t) > sizeof(u32))
144 addr |=
145 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
146
147 return addr;
148}
149
150static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
151{
152 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
153
154 return le16_to_cpu(tb->hi_n_len) >> 4;
155}
156
157static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
158 dma_addr_t addr, u16 len)
159{
160 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
161 u16 hi_n_len = len << 4;
162
163 put_unaligned_le32(addr, &tb->lo);
164 if (sizeof(dma_addr_t) > sizeof(u32))
165 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
166
167 tb->hi_n_len = cpu_to_le16(hi_n_len);
168
169 tfd->num_tbs = idx + 1;
170}
171
172static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
173{
174 return tfd->num_tbs & 0x1f;
175}
176
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700177static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700178 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
Johannes Berg214d14d2011-05-04 07:50:44 -0700179{
Johannes Berg214d14d2011-05-04 07:50:44 -0700180 int i;
181 int num_tbs;
182
Johannes Berg214d14d2011-05-04 07:50:44 -0700183 /* Sanity check on number of chunks */
184 num_tbs = iwl_tfd_get_num_tbs(tfd);
185
186 if (num_tbs >= IWL_NUM_OF_TBS) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700187 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
Johannes Berg214d14d2011-05-04 07:50:44 -0700188 /* @todo issue fatal error, it is quite serious situation */
189 return;
190 }
191
192 /* Unmap tx_cmd */
193 if (num_tbs)
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200194 dma_unmap_single(trans->dev,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700195 dma_unmap_addr(meta, mapping),
196 dma_unmap_len(meta, len),
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700197 DMA_BIDIRECTIONAL);
Johannes Berg214d14d2011-05-04 07:50:44 -0700198
199 /* Unmap chunks, if any. */
200 for (i = 1; i < num_tbs; i++)
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200201 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
Johannes Berge8154072011-06-27 07:54:49 -0700202 iwl_tfd_tb_get_len(tfd, i), dma_dir);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700203}
204
205/**
206 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700207 * @trans - transport private data
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700208 * @txq - tx queue
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700209 * @index - the index of the TFD to be freed
Emmanuel Grumbach39644e92011-09-15 11:46:29 -0700210 *@dma_dir - the direction of the DMA mapping
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700211 *
212 * Does NOT advance any TFD circular buffer read/write indexes
213 * Does NOT free the TFD itself (which is within circular buffer)
214 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700215void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
Emmanuel Grumbach39644e92011-09-15 11:46:29 -0700216 int index, enum dma_data_direction dma_dir)
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700217{
218 struct iwl_tfd *tfd_tmp = txq->tfds;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700219
Johannes Berg015c15e2012-03-05 11:24:24 -0800220 lockdep_assert_held(&txq->lock);
221
Emmanuel Grumbach39644e92011-09-15 11:46:29 -0700222 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
Johannes Berg214d14d2011-05-04 07:50:44 -0700223
224 /* free SKB */
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700225 if (txq->skbs) {
Johannes Berg214d14d2011-05-04 07:50:44 -0700226 struct sk_buff *skb;
227
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700228 skb = txq->skbs[index];
Johannes Berg214d14d2011-05-04 07:50:44 -0700229
Emmanuel Grumbach909e9b22011-09-15 11:46:30 -0700230 /* Can be called from irqs-disabled context
231 * If skb is not NULL, it means that the whole queue is being
232 * freed and that the queue is not empty - free the skb
233 */
Johannes Berg214d14d2011-05-04 07:50:44 -0700234 if (skb) {
Emmanuel Grumbached277c92012-02-09 16:08:15 +0200235 iwl_op_mode_free_skb(trans->op_mode, skb);
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700236 txq->skbs[index] = NULL;
Johannes Berg214d14d2011-05-04 07:50:44 -0700237 }
238 }
239}
240
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700241int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
Johannes Berg214d14d2011-05-04 07:50:44 -0700242 struct iwl_tx_queue *txq,
243 dma_addr_t addr, u16 len,
Johannes Berg4c42db02011-05-04 07:50:48 -0700244 u8 reset)
Johannes Berg214d14d2011-05-04 07:50:44 -0700245{
246 struct iwl_queue *q;
247 struct iwl_tfd *tfd, *tfd_tmp;
248 u32 num_tbs;
249
250 q = &txq->q;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700251 tfd_tmp = txq->tfds;
Johannes Berg214d14d2011-05-04 07:50:44 -0700252 tfd = &tfd_tmp[q->write_ptr];
253
254 if (reset)
255 memset(tfd, 0, sizeof(*tfd));
256
257 num_tbs = iwl_tfd_get_num_tbs(tfd);
258
259 /* Each TFD can point to a maximum 20 Tx buffers */
260 if (num_tbs >= IWL_NUM_OF_TBS) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700261 IWL_ERR(trans, "Error can not send more than %d chunks\n",
Johannes Berg214d14d2011-05-04 07:50:44 -0700262 IWL_NUM_OF_TBS);
263 return -EINVAL;
264 }
265
266 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
267 return -EINVAL;
268
269 if (unlikely(addr & ~IWL_TX_DMA_MASK))
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700270 IWL_ERR(trans, "Unaligned address = %llx\n",
Johannes Berg214d14d2011-05-04 07:50:44 -0700271 (unsigned long long)addr);
272
273 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
274
275 return 0;
276}
277
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800278/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
279 * DMA services
280 *
281 * Theory of operation
282 *
283 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
284 * of buffer descriptors, each of which points to one or more data buffers for
285 * the device to read from or fill. Driver and device exchange status of each
286 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
287 * entries in each circular buffer, to protect against confusing empty and full
288 * queue states.
289 *
290 * The device reads or writes the data in the queues via the device's several
291 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
292 *
293 * For Tx queue, there are low mark and high mark limits. If, after queuing
294 * the packet for Tx, free space become < low mark, Tx queue stopped. When
295 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
296 * Tx queue resumed.
297 *
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800298 ***************************************************/
299
300int iwl_queue_space(const struct iwl_queue *q)
301{
302 int s = q->read_ptr - q->write_ptr;
303
304 if (q->read_ptr > q->write_ptr)
305 s -= q->n_bd;
306
307 if (s <= 0)
308 s += q->n_window;
309 /* keep some reserve to not confuse empty and full situations */
310 s -= 2;
311 if (s < 0)
312 s = 0;
313 return s;
314}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800315
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800316/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800317 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
318 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700319int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800320{
321 q->n_bd = count;
322 q->n_window = slots_num;
323 q->id = id;
324
325 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
326 * and iwl_queue_dec_wrap are broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700327 if (WARN_ON(!is_power_of_2(count)))
328 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800329
330 /* slots_num must be power-of-two size, otherwise
331 * get_cmd_index is broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700332 if (WARN_ON(!is_power_of_2(slots_num)))
333 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800334
335 q->low_mark = q->n_window / 4;
336 if (q->low_mark < 4)
337 q->low_mark = 4;
338
339 q->high_mark = q->n_window / 8;
340 if (q->high_mark < 2)
341 q->high_mark = 2;
342
343 q->write_ptr = q->read_ptr = 0;
344
345 return 0;
346}
347
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700348static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300349 struct iwl_tx_queue *txq)
350{
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700351 struct iwl_trans_pcie *trans_pcie =
352 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700353 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300354 int txq_id = txq->q.id;
355 int read_ptr = txq->q.read_ptr;
356 u8 sta_id = 0;
357 __le16 bc_ent;
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -0700358 struct iwl_tx_cmd *tx_cmd =
359 (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300360
361 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
362
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700363 if (txq_id != trans->shrd->cmd_queue)
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -0700364 sta_id = tx_cmd->sta_id;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300365
366 bc_ent = cpu_to_le16(1 | (sta_id << 12));
367 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
368
369 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
370 scd_bc_tbl[txq_id].
371 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
372}
373
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700374static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300375 u16 txq_id)
376{
377 u32 tbl_dw_addr;
378 u32 tbl_dw;
379 u16 scd_q2ratid;
380
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700381 struct iwl_trans_pcie *trans_pcie =
382 IWL_TRANS_GET_PCIE_TRANS(trans);
383
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300384 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
385
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700386 tbl_dw_addr = trans_pcie->scd_base_addr +
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300387 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
388
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200389 tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300390
391 if (txq_id & 0x1)
392 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
393 else
394 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
395
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200396 iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300397
398 return 0;
399}
400
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700401static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300402{
403 /* Simply stop the queue, but don't change any configuration;
404 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200405 iwl_write_prph(trans,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300406 SCD_QUEUE_STATUS_BITS(txq_id),
407 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
408 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
409}
410
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700411void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300412 int txq_id, u32 index)
413{
Emmanuel Grumbach631b84c2011-12-07 09:30:21 +0200414 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200415 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300416 (index & 0xff) | (txq_id << 8));
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200417 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300418}
419
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700420void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300421 struct iwl_tx_queue *txq,
422 int tx_fifo_id, int scd_retry)
423{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700424 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300425 int txq_id = txq->q.id;
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700426 int active =
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700427 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300428
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200429 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300430 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
431 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
432 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
433 SCD_QUEUE_STTS_REG_MSK);
434
435 txq->sched_retry = scd_retry;
436
Emmanuel Grumbach1dcedc82012-01-19 08:27:03 +0200437 if (active)
438 IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
439 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
440 else
441 IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
442 scd_retry ? "BA" : "AC/CMD", txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300443}
444
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700445static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
446 u8 ctx, u16 tid)
Emmanuel Grumbachba562f72011-08-25 23:11:22 -0700447{
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700448 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
Emmanuel Grumbachba562f72011-08-25 23:11:22 -0700449 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700450 return ac_to_fifo[tid_to_ac[tid]];
Emmanuel Grumbachba562f72011-08-25 23:11:22 -0700451
452 /* no support for TIDs 8-15 yet */
453 return -EINVAL;
454}
455
Emmanuel Grumbach76bc10f2011-11-21 13:25:31 +0200456static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
457{
458 if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
459 return false;
460 return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
461 hw_params(trans).num_ampdu_queues);
462}
463
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700464void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
465 enum iwl_rxon_context_id ctx, int sta_id,
Emmanuel Grumbach822e8b22011-11-21 13:25:31 +0200466 int tid, int frame_limit, u16 ssn)
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300467{
Emmanuel Grumbach822e8b22011-11-21 13:25:31 +0200468 int tx_fifo, txq_id;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300469 u16 ra_tid;
470 unsigned long flags;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300471
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700472 struct iwl_trans_pcie *trans_pcie =
473 IWL_TRANS_GET_PCIE_TRANS(trans);
474
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300475 if (WARN_ON(sta_id == IWL_INVALID_STATION))
476 return;
Emmanuel Grumbach5f85a782011-08-25 23:11:18 -0700477 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300478 return;
479
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700480 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
Emmanuel Grumbachba562f72011-08-25 23:11:22 -0700481 if (WARN_ON(tx_fifo < 0)) {
482 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
483 return;
484 }
485
Emmanuel Grumbach76bc10f2011-11-21 13:25:31 +0200486 txq_id = trans_pcie->agg_txq[sta_id][tid];
487 if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
488 IWL_ERR(trans,
489 "queue number out of range: %d, must be %d to %d\n",
490 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
491 IWLAGN_FIRST_AMPDU_QUEUE +
492 hw_params(trans).num_ampdu_queues - 1);
493 return;
494 }
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300495
496 ra_tid = BUILD_RAxTID(sta_id, tid);
497
Johannes Berg7b114882012-02-05 13:55:11 -0800498 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300499
500 /* Stop this Tx queue before configuring it */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700501 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300502
503 /* Map receiver-address / traffic-ID to this queue */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700504 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300505
506 /* Set this queue as a chain-building queue */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200507 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300508
509 /* enable aggregations for the queue */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200510 iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300511
512 /* Place first TFD at index corresponding to start sequence number.
513 * Assumes that ssn_idx is valid (!= 0xFFF) */
Emmanuel Grumbach822e8b22011-11-21 13:25:31 +0200514 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
515 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
516 iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300517
518 /* Set up Tx window size and frame limit for this queue */
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200519 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300520 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
521 sizeof(u32),
522 ((frame_limit <<
523 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
524 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
525 ((frame_limit <<
526 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
527 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
528
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200529 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300530
531 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700532 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700533 tx_fifo, 1);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300534
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700535 trans_pcie->txq[txq_id].sta_id = sta_id;
536 trans_pcie->txq[txq_id].tid = tid;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -0700537
Johannes Berg7b114882012-02-05 13:55:11 -0800538 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300539}
540
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700541/*
542 * Find first available (lowest unused) Tx Queue, mark it "active".
543 * Called only when finding queue for aggregation.
544 * Should never return anything < 7, because they should already
545 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
546 */
547static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
548{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700549 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700550 int txq_id;
551
552 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
553 if (!test_and_set_bit(txq_id,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700554 &trans_pcie->txq_ctx_active_msk))
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700555 return txq_id;
556 return -1;
557}
558
559int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
Emmanuel Grumbach3c69b592011-11-21 13:25:31 +0200560 int sta_id, int tid)
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700561{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700562 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Wey-Yi Guy143bb152011-09-15 11:46:54 -0700563 int txq_id;
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700564
565 txq_id = iwlagn_txq_ctx_activate_free(trans);
566 if (txq_id == -1) {
567 IWL_ERR(trans, "No free aggregation queue available\n");
568 return -ENXIO;
569 }
570
Emmanuel Grumbach76bc10f2011-11-21 13:25:31 +0200571 trans_pcie->agg_txq[sta_id][tid] = txq_id;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700572 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700573
Emmanuel Grumbach288712a2011-08-25 23:11:25 -0700574 return 0;
575}
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300576
Emmanuel Grumbachbc237732011-11-21 13:25:31 +0200577int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
Emmanuel Grumbach7f01d562011-08-25 23:11:27 -0700578{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach76bc10f2011-11-21 13:25:31 +0200580 u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
Emmanuel Grumbachbc237732011-11-21 13:25:31 +0200581
Emmanuel Grumbach76bc10f2011-11-21 13:25:31 +0200582 if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
Emmanuel Grumbachbc237732011-11-21 13:25:31 +0200583 IWL_ERR(trans,
584 "queue number out of range: %d, must be %d to %d\n",
585 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
586 IWLAGN_FIRST_AMPDU_QUEUE +
587 hw_params(trans).num_ampdu_queues - 1);
588 return -EINVAL;
589 }
590
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700591 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300592
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200593 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300594
Emmanuel Grumbach76bc10f2011-11-21 13:25:31 +0200595 trans_pcie->agg_txq[sta_id][tid] = 0;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700596 trans_pcie->txq[txq_id].q.read_ptr = 0;
597 trans_pcie->txq[txq_id].q.write_ptr = 0;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300598 /* supposes that ssn_idx is valid (!= 0xFFF) */
Emmanuel Grumbachba562f72011-08-25 23:11:22 -0700599 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300600
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200601 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700602 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
603 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300604 return 0;
605}
606
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800607/*************** HOST COMMAND QUEUE FUNCTIONS *****/
608
609/**
610 * iwl_enqueue_hcmd - enqueue a uCode command
611 * @priv: device private data point
612 * @cmd: a point to the ucode command structure
613 *
614 * The function returns < 0 values to indicate the operation is
615 * failed. On success, it turns the index (> 0) of command in the
616 * command queue.
617 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700618static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800619{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700620 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
621 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800622 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -0700623 struct iwl_device_cmd *out_cmd;
624 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +0800625 dma_addr_t phys_addr;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800626 u32 idx;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700627 u16 copy_size, cmd_size;
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700628 bool is_ct_kill = false;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700629 bool had_nocopy = false;
630 int i;
631 u8 *cmd_dest;
632#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
633 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
634 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
635 int trace_idx;
636#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800637
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700638 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
639 IWL_WARN(trans, "fw recovery, no hcmd send\n");
Wey-Yi Guy3083d032011-05-06 17:06:44 -0700640 return -EIO;
641 }
642
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700643 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
Wey-Yi Guyeedb6e32011-07-08 08:46:27 -0700644 !(cmd->flags & CMD_ON_DEMAND)) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700645 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
Wey-Yi Guyeedb6e32011-07-08 08:46:27 -0700646 return -EIO;
647 }
648
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700649 copy_size = sizeof(out_cmd->hdr);
650 cmd_size = sizeof(out_cmd->hdr);
651
652 /* need one for the header if the first is NOCOPY */
653 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
654
655 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
656 if (!cmd->len[i])
657 continue;
658 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
659 had_nocopy = true;
660 } else {
661 /* NOCOPY must not be followed by normal! */
662 if (WARN_ON(had_nocopy))
663 return -EINVAL;
664 copy_size += cmd->len[i];
665 }
666 cmd_size += cmd->len[i];
667 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800668
Johannes Berg3e41ace2011-04-18 09:12:37 -0700669 /*
670 * If any of the command structures end up being larger than
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700671 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
672 * allocated into separate TFDs, then we will need to
673 * increase the size of the buffers.
Johannes Berg3e41ace2011-04-18 09:12:37 -0700674 */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700675 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
Johannes Berg3e41ace2011-04-18 09:12:37 -0700676 return -EINVAL;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800677
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700678 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
679 IWL_WARN(trans, "Not sending command - %s KILL\n",
680 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800681 return -EIO;
682 }
683
Johannes Berg015c15e2012-03-05 11:24:24 -0800684 spin_lock_bh(&txq->lock);
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200685
Johannes Bergc2acea82009-07-24 11:13:05 -0700686 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Johannes Berg015c15e2012-03-05 11:24:24 -0800687 spin_unlock_bh(&txq->lock);
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200688
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700689 IWL_ERR(trans, "No space in command queue\n");
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700690 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700691 if (!is_ct_kill) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700692 IWL_ERR(trans, "Restarting adapter queue is full\n");
Emmanuel Grumbachbcb93212012-02-09 16:08:15 +0200693 iwl_op_mode_nic_error(trans->op_mode);
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700694 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800695 return -ENOSPC;
696 }
697
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700698 idx = get_cmd_index(q, q->write_ptr);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800699 out_cmd = txq->cmd[idx];
Johannes Bergc2acea82009-07-24 11:13:05 -0700700 out_meta = &txq->meta[idx];
701
Daniel C Halperin8ce73f32009-07-31 14:28:06 -0700702 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -0700703 if (cmd->flags & CMD_WANT_SKB)
704 out_meta->source = cmd;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800705
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700706 /* set up the header */
707
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800708 out_cmd->hdr.cmd = cmd->id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800709 out_cmd->hdr.flags = 0;
Emmanuel Grumbachcefeaa52011-08-25 23:10:40 -0700710 out_cmd->hdr.sequence =
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700711 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
Emmanuel Grumbachcefeaa52011-08-25 23:10:40 -0700712 INDEX_TO_SEQ(q->write_ptr));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800713
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700714 /* and copy the data that needs to be copied */
715
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -0700716 cmd_dest = out_cmd->payload;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700717 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
718 if (!cmd->len[i])
719 continue;
720 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
721 break;
722 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
723 cmd_dest += cmd->len[i];
Esti Kummerded2ae72008-08-04 16:00:45 +0800724 }
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700725
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700726 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700727 "%d bytes at %d[%d]:%d\n",
728 get_cmd_string(out_cmd->hdr.cmd),
729 out_cmd->hdr.cmd,
730 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700731 q->write_ptr, idx, trans->shrd->cmd_queue);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700732
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200733 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700734 DMA_BIDIRECTIONAL);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200735 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
Johannes Berg2c46f722011-04-28 07:27:10 -0700736 idx = -ENOMEM;
737 goto out;
738 }
739
FUJITA Tomonori2e724442010-06-03 14:19:20 +0900740 dma_unmap_addr_set(out_meta, mapping, phys_addr);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700741 dma_unmap_len_set(out_meta, len, copy_size);
742
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700743 iwlagn_txq_attach_buf_to_tfd(trans, txq,
744 phys_addr, copy_size, 1);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700745#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
746 trace_bufs[0] = &out_cmd->hdr;
747 trace_lens[0] = copy_size;
748 trace_idx = 1;
749#endif
750
751 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
752 if (!cmd->len[i])
753 continue;
754 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
755 continue;
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200756 phys_addr = dma_map_single(trans->dev,
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700757 (void *)cmd->data[i],
John W. Linville3be3fdb2011-06-28 13:53:32 -0400758 cmd->len[i], DMA_BIDIRECTIONAL);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +0200759 if (dma_mapping_error(trans->dev, phys_addr)) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700760 iwlagn_unmap_tfd(trans, out_meta,
Johannes Berge8154072011-06-27 07:54:49 -0700761 &txq->tfds[q->write_ptr],
John W. Linville3be3fdb2011-06-28 13:53:32 -0400762 DMA_BIDIRECTIONAL);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700763 idx = -ENOMEM;
764 goto out;
765 }
766
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700767 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700768 cmd->len[i], 0);
769#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
770 trace_bufs[trace_idx] = cmd->data[i];
771 trace_lens[trace_idx] = cmd->len[i];
772 trace_idx++;
773#endif
774 }
Reinette Chatredf833b12009-04-21 10:55:48 -0700775
Emmanuel Grumbachafaf6b52011-07-08 08:46:09 -0700776 out_meta->flags = cmd->flags;
Johannes Berg2c46f722011-04-28 07:27:10 -0700777
778 txq->need_update = 1;
779
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700780 /* check that tracing gets all possible blocks */
781 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
782#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700783 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700784 trace_bufs[0], trace_lens[0],
785 trace_bufs[1], trace_lens[1],
786 trace_bufs[2], trace_lens[2]);
787#endif
Reinette Chatredf833b12009-04-21 10:55:48 -0700788
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800789 /* Increment and update queue's write index */
790 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700791 iwl_txq_update_write_ptr(trans, txq);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800792
Johannes Berg2c46f722011-04-28 07:27:10 -0700793 out:
Johannes Berg015c15e2012-03-05 11:24:24 -0800794 spin_unlock_bh(&txq->lock);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800795 return idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800796}
797
Tomas Winkler17b88922008-05-29 16:35:12 +0800798/**
799 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
800 *
801 * When FW advances 'R' index, all entries between old and new 'R' index
802 * need to be reclaimed. As result, some free space forms. If there is
803 * enough free space (> low mark), wake the stack that feeds us.
804 */
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700805static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
806 int idx)
Tomas Winkler17b88922008-05-29 16:35:12 +0800807{
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700808 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700809 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
Tomas Winkler17b88922008-05-29 16:35:12 +0800810 struct iwl_queue *q = &txq->q;
811 int nfreed = 0;
812
Johannes Berg015c15e2012-03-05 11:24:24 -0800813 lockdep_assert_held(&txq->lock);
814
Tomas Winkler499b1882008-10-14 12:32:48 -0700815 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700816 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
Daniel Halperin2e5d04d2011-05-27 08:40:28 -0700817 "index %d is out of range [0-%d] %d %d.\n", __func__,
818 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +0800819 return;
820 }
821
Tomas Winkler499b1882008-10-14 12:32:48 -0700822 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
823 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
824
825 if (nfreed++ > 0) {
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700826 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +0800827 q->write_ptr, q->read_ptr);
Emmanuel Grumbachbcb93212012-02-09 16:08:15 +0200828 iwl_op_mode_nic_error(trans->op_mode);
Tomas Winkler17b88922008-05-29 16:35:12 +0800829 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800830
Tomas Winkler17b88922008-05-29 16:35:12 +0800831 }
832}
833
834/**
835 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
836 * @rxb: Rx buffer to reclaim
Emmanuel Grumbach247c61d2011-09-20 15:37:23 -0700837 * @handler_status: return value of the handler of the command
838 * (put in setup_rx_handlers)
Tomas Winkler17b88922008-05-29 16:35:12 +0800839 *
840 * If an Rx buffer has an async callback associated with it the callback
841 * will be executed. The attached skb (if present) will only be freed
842 * if the callback returns 1
843 */
Johannes Berg48a2d662012-03-05 11:24:39 -0800844void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
Emmanuel Grumbach247c61d2011-09-20 15:37:23 -0700845 int handler_status)
Tomas Winkler17b88922008-05-29 16:35:12 +0800846{
Zhu Yi2f301222009-10-09 17:19:45 +0800847 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +0800848 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
849 int txq_id = SEQ_TO_QUEUE(sequence);
850 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +0800851 int cmd_index;
Johannes Bergc2acea82009-07-24 11:13:05 -0700852 struct iwl_device_cmd *cmd;
853 struct iwl_cmd_meta *meta;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700854 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
855 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
Tomas Winkler17b88922008-05-29 16:35:12 +0800856
857 /* If a Tx command is being handled and it isn't in the actual
858 * command queue then there a command routing bug has been introduced
859 * in the queue management code. */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700860 if (WARN(txq_id != trans->shrd->cmd_queue,
Johannes Berg13bb9482010-08-23 10:46:33 +0200861 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700862 txq_id, trans->shrd->cmd_queue, sequence,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700863 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
864 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700865 iwl_print_hex_error(trans, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +0200866 return;
Winkler, Tomas01ef93232008-11-07 09:58:45 -0800867 }
Tomas Winkler17b88922008-05-29 16:35:12 +0800868
Johannes Berg015c15e2012-03-05 11:24:24 -0800869 spin_lock(&txq->lock);
870
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700871 cmd_index = get_cmd_index(&txq->q, index);
Zhu Yidd487442010-03-22 02:28:41 -0700872 cmd = txq->cmd[cmd_index];
873 meta = &txq->meta[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +0800874
John W. Linville4d8b6142011-09-20 14:11:55 -0400875 txq->time_stamp = jiffies;
876
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700877 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
878 DMA_BIDIRECTIONAL);
Reinette Chatrec33de622009-10-30 14:36:10 -0700879
Tomas Winkler17b88922008-05-29 16:35:12 +0800880 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -0700881 if (meta->flags & CMD_WANT_SKB) {
Johannes Berg48a2d662012-03-05 11:24:39 -0800882 struct page *p = rxb_steal_page(rxb);
Johannes Berg65b94a42012-03-05 11:24:38 -0800883
Johannes Berg65b94a42012-03-05 11:24:38 -0800884 meta->source->resp_pkt = pkt;
885 meta->source->_rx_page_addr = (unsigned long)page_address(p);
886 meta->source->_rx_page_order = hw_params(trans).rx_page_order;
887 meta->source->handler_status = handler_status;
Emmanuel Grumbach247c61d2011-09-20 15:37:23 -0700888 }
Stanislaw Gruszka2624e962011-04-20 16:02:58 +0200889
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700890 iwl_hcmd_queue_reclaim(trans, txq_id, index);
Tomas Winkler17b88922008-05-29 16:35:12 +0800891
Johannes Bergc2acea82009-07-24 11:13:05 -0700892 if (!(meta->flags & CMD_ASYNC)) {
Wey-Yi Guy05c89b92011-10-10 07:26:48 -0700893 if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
894 IWL_WARN(trans,
895 "HCMD_ACTIVE already clear for command %s\n",
896 get_cmd_string(cmd->hdr.cmd));
897 }
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700898 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
899 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
Reinette Chatred2dfe6d2010-02-18 22:03:04 -0800900 get_cmd_string(cmd->hdr.cmd));
Johannes Bergeffd4d92011-09-15 11:46:52 -0700901 wake_up(&trans->shrd->wait_command_queue);
Tomas Winkler17b88922008-05-29 16:35:12 +0800902 }
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200903
Zhu Yidd487442010-03-22 02:28:41 -0700904 meta->flags = 0;
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200905
Johannes Berg015c15e2012-03-05 11:24:24 -0800906 spin_unlock(&txq->lock);
Tomas Winkler17b88922008-05-29 16:35:12 +0800907}
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700908
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700909#define HOST_COMPLETE_TIMEOUT (2 * HZ)
910
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700911static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700912{
913 int ret;
914
915 /* An asynchronous command can not expect an SKB to be set. */
916 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
917 return -EINVAL;
918
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700919
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700920 ret = iwl_enqueue_hcmd(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700921 if (ret < 0) {
Todd Previteb36b1102011-11-10 06:55:02 -0800922 IWL_DEBUG_QUIET_RFKILL(trans,
923 "Error sending %s: enqueue_hcmd failed: %d\n",
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700924 get_cmd_string(cmd->id), ret);
925 return ret;
926 }
927 return 0;
928}
929
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700930static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700931{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700932 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700933 int cmd_idx;
934 int ret;
935
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700936 lockdep_assert_held(&trans->shrd->mutex);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700937
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700938 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700939 get_cmd_string(cmd->id));
940
Wey-Yi Guy94b3c452011-11-10 06:55:19 -0800941 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
942 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
943 get_cmd_string(cmd->id));
944 return -ECANCELED;
945 }
946 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
947 IWL_ERR(trans, "Command %s failed: FW Error\n",
948 get_cmd_string(cmd->id));
949 return -EIO;
950 }
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700951 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
952 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700953 get_cmd_string(cmd->id));
954
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700955 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700956 if (cmd_idx < 0) {
957 ret = cmd_idx;
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700958 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
Todd Previteb36b1102011-11-10 06:55:02 -0800959 IWL_DEBUG_QUIET_RFKILL(trans,
960 "Error sending %s: enqueue_hcmd failed: %d\n",
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700961 get_cmd_string(cmd->id), ret);
962 return ret;
963 }
964
Johannes Bergeffd4d92011-09-15 11:46:52 -0700965 ret = wait_event_timeout(trans->shrd->wait_command_queue,
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700966 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700967 HOST_COMPLETE_TIMEOUT);
968 if (!ret) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700969 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
Wey-Yi Guyd10630a2011-10-10 07:26:46 -0700970 struct iwl_tx_queue *txq =
Emmanuel Grumbach397ede32011-10-10 07:27:18 -0700971 &trans_pcie->txq[trans->shrd->cmd_queue];
Wey-Yi Guyd10630a2011-10-10 07:26:46 -0700972 struct iwl_queue *q = &txq->q;
973
Todd Previteb36b1102011-11-10 06:55:02 -0800974 IWL_DEBUG_QUIET_RFKILL(trans,
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700975 "Error sending %s: time out after %dms.\n",
976 get_cmd_string(cmd->id),
977 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
978
Todd Previteb36b1102011-11-10 06:55:02 -0800979 IWL_DEBUG_QUIET_RFKILL(trans,
Wey-Yi Guyd10630a2011-10-10 07:26:46 -0700980 "Current CMD queue read_ptr %d write_ptr %d\n",
981 q->read_ptr, q->write_ptr);
982
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700983 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
984 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700985 "%s\n", get_cmd_string(cmd->id));
986 ret = -ETIMEDOUT;
987 goto cancel;
988 }
989 }
990
Johannes Berg65b94a42012-03-05 11:24:38 -0800991 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700992 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
Emmanuel Grumbach253a6342011-07-11 07:39:46 -0700993 get_cmd_string(cmd->id));
994 ret = -EIO;
995 goto cancel;
996 }
997
998 return 0;
999
1000cancel:
1001 if (cmd->flags & CMD_WANT_SKB) {
1002 /*
1003 * Cancel the CMD_WANT_SKB flag for the cmd in the
1004 * TX cmd queue. Otherwise in case the cmd comes
1005 * in later, it will possibly set an invalid
1006 * address (cmd->meta.source).
1007 */
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001008 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001009 ~CMD_WANT_SKB;
1010 }
Emmanuel Grumbach9cac4942011-11-10 06:55:20 -08001011
Johannes Berg65b94a42012-03-05 11:24:38 -08001012 if (cmd->resp_pkt) {
1013 iwl_free_resp(cmd);
1014 cmd->resp_pkt = NULL;
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001015 }
1016
1017 return ret;
1018}
1019
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001020int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001021{
1022 if (cmd->flags & CMD_ASYNC)
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001023 return iwl_send_cmd_async(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001024
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001025 return iwl_send_cmd_sync(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001026}
1027
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001028/* Frees buffers until index _not_ inclusive */
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001029int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1030 struct sk_buff_head *skbs)
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001031{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001032 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1033 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001034 struct iwl_queue *q = &txq->q;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001035 int last_to_free;
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001036 int freed = 0;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001037
Emmanuel Grumbach39644e92011-09-15 11:46:29 -07001038 /* This function is not meant to release cmd queue*/
1039 if (WARN_ON(txq_id == trans->shrd->cmd_queue))
1040 return 0;
1041
Johannes Berg015c15e2012-03-05 11:24:24 -08001042 lockdep_assert_held(&txq->lock);
1043
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001044 /*Since we free until index _not_ inclusive, the one before index is
1045 * the last we will free. This one must be used */
1046 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1047
1048 if ((index >= q->n_bd) ||
1049 (iwl_queue_used(q, last_to_free) == 0)) {
1050 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1051 "last_to_free %d is out of range [0-%d] %d %d.\n",
1052 __func__, txq_id, last_to_free, q->n_bd,
1053 q->write_ptr, q->read_ptr);
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001054 return 0;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001055 }
1056
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001057 if (WARN_ON(!skb_queue_empty(skbs)))
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001058 return 0;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001059
1060 for (;
1061 q->read_ptr != index;
1062 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1063
Emmanuel Grumbach2c452292011-08-25 23:11:21 -07001064 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001065 continue;
1066
Emmanuel Grumbach2c452292011-08-25 23:11:21 -07001067 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001068
Emmanuel Grumbach2c452292011-08-25 23:11:21 -07001069 txq->skbs[txq->q.read_ptr] = NULL;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001070
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001071 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001072
Emmanuel Grumbach39644e92011-09-15 11:46:29 -07001073 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001074 freed++;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001075 }
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001076 return freed;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001077}