blob: 2e89040e63be69c156786bfb09fbc7c8812fc1fa [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Reinette Chatre01f81622009-01-08 10:20:02 -08003 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Tomas Winklerfd4abac2008-05-15 13:54:07 +080030#include <linux/etherdevice.h>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080031#include <net/mac80211.h>
32#include "iwl-eeprom.h"
33#include "iwl-dev.h"
34#include "iwl-core.h"
35#include "iwl-sta.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38
Tomas Winkler30e553e2008-05-29 16:35:16 +080039static const u16 default_tid_to_tx_fifo[] = {
40 IWL_TX_FIFO_AC1,
41 IWL_TX_FIFO_AC0,
42 IWL_TX_FIFO_AC0,
43 IWL_TX_FIFO_AC1,
44 IWL_TX_FIFO_AC2,
45 IWL_TX_FIFO_AC2,
46 IWL_TX_FIFO_AC3,
47 IWL_TX_FIFO_AC3,
48 IWL_TX_FIFO_NONE,
49 IWL_TX_FIFO_NONE,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_AC3
57};
58
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -080059static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
61{
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
63 if (!ptr->addr)
64 return -ENOMEM;
65 ptr->size = size;
66 return 0;
67}
68
69static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
71{
72 if (unlikely(!ptr->addr))
73 return;
74
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
77}
78
Tomas Winklerfd4abac2008-05-15 13:54:07 +080079/**
80 * iwl_txq_update_write_ptr - Send new write index to hardware
81 */
82int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
83{
84 u32 reg = 0;
85 int ret = 0;
86 int txq_id = txq->q.id;
87
88 if (txq->need_update == 0)
89 return ret;
90
91 /* if we're trying to save power */
92 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
93 /* wake up nic if it's powered down ...
94 * uCode will wake up, and interrupt us again, so next
95 * time we'll skip this part. */
96 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
97
98 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Tomas Winklere1623442009-01-27 14:27:56 -080099 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800100 iwl_set_bit(priv, CSR_GP_CNTRL,
101 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
102 return ret;
103 }
104
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800105 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
106 txq->q.write_ptr | (txq_id << 8));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800107
108 /* else not in power-save mode, uCode will never sleep when we're
109 * trying to tx (during RFKILL, we're not trying to tx). */
110 } else
111 iwl_write32(priv, HBUS_TARG_WRPTR,
112 txq->q.write_ptr | (txq_id << 8));
113
114 txq->need_update = 0;
115
116 return ret;
117}
118EXPORT_SYMBOL(iwl_txq_update_write_ptr);
119
120
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800121/**
122 * iwl_tx_queue_free - Deallocate DMA queue.
123 * @txq: Transmit queue to deallocate.
124 *
125 * Empty queue by removing and destroying all BD's.
126 * Free all buffers.
127 * 0-fill, but do not free "txq" descriptor structure.
128 */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800129void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800130{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800131 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Tomas Winkler443cfd42008-05-15 13:53:57 +0800132 struct iwl_queue *q = &txq->q;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800133 struct pci_dev *dev = priv->pci_dev;
Tomas Winkler961ba602008-10-14 12:32:44 -0700134 int i, len;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800135
136 if (q->n_bd == 0)
137 return;
138
139 /* first, empty all BD's */
140 for (; q->write_ptr != q->read_ptr;
141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800143
144 len = sizeof(struct iwl_cmd) * q->n_window;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800145
146 /* De-alloc array of command/tx buffers */
Tomas Winkler961ba602008-10-14 12:32:44 -0700147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800148 kfree(txq->cmd[i]);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800149
150 /* De-alloc circular buffer of TFDs */
151 if (txq->q.n_bd)
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800152 pci_free_consistent(dev, priv->hw_params.tfd_size *
Tomas Winkler499b1882008-10-14 12:32:48 -0700153 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800154
155 /* De-alloc array of per-TFD driver data */
156 kfree(txq->txb);
157 txq->txb = NULL;
158
159 /* 0-fill queue descriptor structure */
160 memset(txq, 0, sizeof(*txq));
161}
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800162EXPORT_SYMBOL(iwl_tx_queue_free);
Tomas Winkler961ba602008-10-14 12:32:44 -0700163
164/**
165 * iwl_cmd_queue_free - Deallocate DMA queue.
166 * @txq: Transmit queue to deallocate.
167 *
168 * Empty queue by removing and destroying all BD's.
169 * Free all buffers.
170 * 0-fill, but do not free "txq" descriptor structure.
171 */
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700172void iwl_cmd_queue_free(struct iwl_priv *priv)
Tomas Winkler961ba602008-10-14 12:32:44 -0700173{
174 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
175 struct iwl_queue *q = &txq->q;
176 struct pci_dev *dev = priv->pci_dev;
177 int i, len;
178
179 if (q->n_bd == 0)
180 return;
181
182 len = sizeof(struct iwl_cmd) * q->n_window;
183 len += IWL_MAX_SCAN_SIZE;
184
185 /* De-alloc array of command/tx buffers */
186 for (i = 0; i <= TFD_CMD_SLOTS; i++)
187 kfree(txq->cmd[i]);
188
189 /* De-alloc circular buffer of TFDs */
190 if (txq->q.n_bd)
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700191 pci_free_consistent(dev, priv->hw_params.tfd_size *
Tomas Winkler499b1882008-10-14 12:32:48 -0700192 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Tomas Winkler961ba602008-10-14 12:32:44 -0700193
194 /* 0-fill queue descriptor structure */
195 memset(txq, 0, sizeof(*txq));
196}
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700197EXPORT_SYMBOL(iwl_cmd_queue_free);
198
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800199/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
200 * DMA services
201 *
202 * Theory of operation
203 *
204 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
205 * of buffer descriptors, each of which points to one or more data buffers for
206 * the device to read from or fill. Driver and device exchange status of each
207 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
208 * entries in each circular buffer, to protect against confusing empty and full
209 * queue states.
210 *
211 * The device reads or writes the data in the queues via the device's several
212 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
213 *
214 * For Tx queue, there are low mark and high mark limits. If, after queuing
215 * the packet for Tx, free space become < low mark, Tx queue stopped. When
216 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
217 * Tx queue resumed.
218 *
219 * See more detailed info in iwl-4965-hw.h.
220 ***************************************************/
221
222int iwl_queue_space(const struct iwl_queue *q)
223{
224 int s = q->read_ptr - q->write_ptr;
225
226 if (q->read_ptr > q->write_ptr)
227 s -= q->n_bd;
228
229 if (s <= 0)
230 s += q->n_window;
231 /* keep some reserve to not confuse empty and full situations */
232 s -= 2;
233 if (s < 0)
234 s = 0;
235 return s;
236}
237EXPORT_SYMBOL(iwl_queue_space);
238
239
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800240/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800241 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
242 */
Tomas Winkler443cfd42008-05-15 13:53:57 +0800243static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800244 int count, int slots_num, u32 id)
245{
246 q->n_bd = count;
247 q->n_window = slots_num;
248 q->id = id;
249
250 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
251 * and iwl_queue_dec_wrap are broken. */
252 BUG_ON(!is_power_of_2(count));
253
254 /* slots_num must be power-of-two size, otherwise
255 * get_cmd_index is broken. */
256 BUG_ON(!is_power_of_2(slots_num));
257
258 q->low_mark = q->n_window / 4;
259 if (q->low_mark < 4)
260 q->low_mark = 4;
261
262 q->high_mark = q->n_window / 8;
263 if (q->high_mark < 2)
264 q->high_mark = 2;
265
266 q->write_ptr = q->read_ptr = 0;
267
268 return 0;
269}
270
271/**
272 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
273 */
274static int iwl_tx_queue_alloc(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800275 struct iwl_tx_queue *txq, u32 id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800276{
277 struct pci_dev *dev = priv->pci_dev;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800278 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800279
280 /* Driver private data, only for Tx (not command) queues,
281 * not shared with device. */
282 if (id != IWL_CMD_QUEUE_NUM) {
283 txq->txb = kmalloc(sizeof(txq->txb[0]) *
284 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
285 if (!txq->txb) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800286 IWL_ERR(priv, "kmalloc for auxiliary BD "
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800287 "structures failed\n");
288 goto error;
289 }
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800290 } else {
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800291 txq->txb = NULL;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800292 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800293
294 /* Circular buffer of transmit frame descriptors (TFDs),
295 * shared with device */
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800296 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800297
Tomas Winkler499b1882008-10-14 12:32:48 -0700298 if (!txq->tfds) {
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800299 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800300 goto error;
301 }
302 txq->q.id = id;
303
304 return 0;
305
306 error:
307 kfree(txq->txb);
308 txq->txb = NULL;
309
310 return -ENOMEM;
311}
312
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800313/**
314 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
315 */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800316int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
317 int slots_num, u32 txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800318{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800319 int i, len;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800320 int ret;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800321
322 /*
323 * Alloc buffer array for commands (Tx or other types of commands).
324 * For the command queue (#4), allocate command space + one big
325 * command for scan, since scan command is very huge; the system will
326 * not have two scans at the same time, so only one is needed.
327 * For normal Tx queues (all other queues), no super-size command
328 * space is needed.
329 */
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800330 len = sizeof(struct iwl_cmd);
331 for (i = 0; i <= slots_num; i++) {
332 if (i == slots_num) {
333 if (txq_id == IWL_CMD_QUEUE_NUM)
334 len += IWL_MAX_SCAN_SIZE;
335 else
336 continue;
337 }
338
John W. Linville49898852008-09-02 15:07:18 -0400339 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800340 if (!txq->cmd[i])
Tomas Winkler73b7d742008-09-03 11:18:48 +0800341 goto err;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800342 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800343
344 /* Alloc driver data array and TFD circular buffer */
Tomas Winkler73b7d742008-09-03 11:18:48 +0800345 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
346 if (ret)
347 goto err;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800348
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800349 txq->need_update = 0;
350
351 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
352 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
353 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
354
355 /* Initialize queue's high/low-water marks, and head/tail indexes */
356 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
357
358 /* Tell device where to find queue */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800359 priv->cfg->ops->lib->txq_init(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800360
361 return 0;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800362err:
363 for (i = 0; i < slots_num; i++) {
364 kfree(txq->cmd[i]);
365 txq->cmd[i] = NULL;
366 }
367
368 if (txq_id == IWL_CMD_QUEUE_NUM) {
369 kfree(txq->cmd[slots_num]);
370 txq->cmd[slots_num] = NULL;
371 }
372 return -ENOMEM;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800373}
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800374EXPORT_SYMBOL(iwl_tx_queue_init);
375
Tomas Winklerda1bc452008-05-29 16:35:00 +0800376/**
377 * iwl_hw_txq_ctx_free - Free TXQ Context
378 *
379 * Destroy all TX DMA queues and structures
380 */
381void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
382{
383 int txq_id;
384
385 /* Tx queues */
386 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
Tomas Winkler961ba602008-10-14 12:32:44 -0700387 if (txq_id == IWL_CMD_QUEUE_NUM)
388 iwl_cmd_queue_free(priv);
389 else
390 iwl_tx_queue_free(priv, txq_id);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800391
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800392 iwl_free_dma_ptr(priv, &priv->kw);
393
394 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800395}
396EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
397
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800398/**
399 * iwl_txq_ctx_reset - Reset TX queue context
Tomas Winklera96a27f2008-10-23 23:48:56 -0700400 * Destroys all DMA structures and initialize them again
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800401 *
402 * @param priv
403 * @return error code
404 */
405int iwl_txq_ctx_reset(struct iwl_priv *priv)
406{
407 int ret = 0;
408 int txq_id, slots_num;
Tomas Winklerda1bc452008-05-29 16:35:00 +0800409 unsigned long flags;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800410
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800411 /* Free all tx/cmd queues and keep-warm buffer */
412 iwl_hw_txq_ctx_free(priv);
413
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800414 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
415 priv->hw_params.scd_bc_tbls_size);
416 if (ret) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800417 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800418 goto error_bc_tbls;
419 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800420 /* Alloc keep-warm buffer */
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800421 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800422 if (ret) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800423 IWL_ERR(priv, "Keep Warm allocation failed\n");
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800424 goto error_kw;
425 }
Tomas Winklerda1bc452008-05-29 16:35:00 +0800426 spin_lock_irqsave(&priv->lock, flags);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800427
428 /* Turn off all Tx DMA fifos */
Tomas Winklerda1bc452008-05-29 16:35:00 +0800429 priv->cfg->ops->lib->txq_set_sched(priv, 0);
430
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800431 /* Tell NIC where to find the "keep warm" buffer */
432 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
433
Tomas Winklerda1bc452008-05-29 16:35:00 +0800434 spin_unlock_irqrestore(&priv->lock, flags);
435
Tomas Winklerda1bc452008-05-29 16:35:00 +0800436 /* Alloc and init all Tx queues, including the command queue (#4) */
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800437 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
438 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
439 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
440 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
441 txq_id);
442 if (ret) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800443 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800444 goto error;
445 }
446 }
447
448 return ret;
449
450 error:
451 iwl_hw_txq_ctx_free(priv);
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800452 iwl_free_dma_ptr(priv, &priv->kw);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800453 error_kw:
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800454 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
455 error_bc_tbls:
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800456 return ret;
457}
Emmanuel Grumbacha33c2f42008-09-03 11:26:56 +0800458
Tomas Winklerda1bc452008-05-29 16:35:00 +0800459/**
460 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
461 */
462void iwl_txq_ctx_stop(struct iwl_priv *priv)
463{
Zhu Yif3f911d2008-12-02 12:14:04 -0800464 int ch;
Tomas Winklerda1bc452008-05-29 16:35:00 +0800465 unsigned long flags;
466
Tomas Winklerda1bc452008-05-29 16:35:00 +0800467 /* Turn off all Tx DMA fifos */
468 spin_lock_irqsave(&priv->lock, flags);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800469
470 priv->cfg->ops->lib->txq_set_sched(priv, 0);
471
472 /* Stop each Tx DMA channel, and wait for it to be idle */
Zhu Yif3f911d2008-12-02 12:14:04 -0800473 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
474 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800475 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
Zhu Yif3f911d2008-12-02 12:14:04 -0800476 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
Zhu, Yif0566582008-12-05 07:58:38 -0800477 1000);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800478 }
Tomas Winklerda1bc452008-05-29 16:35:00 +0800479 spin_unlock_irqrestore(&priv->lock, flags);
480
481 /* Deallocate memory for all Tx queues */
482 iwl_hw_txq_ctx_free(priv);
483}
484EXPORT_SYMBOL(iwl_txq_ctx_stop);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800485
486/*
487 * handle build REPLY_TX command notification.
488 */
489static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
490 struct iwl_tx_cmd *tx_cmd,
Johannes Berge039fa42008-05-15 12:55:29 +0200491 struct ieee80211_tx_info *info,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800492 struct ieee80211_hdr *hdr,
Rami Rosen0e7690f2008-12-18 18:04:51 +0200493 u8 std_id)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800494{
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700495 __le16 fc = hdr->frame_control;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800496 __le32 tx_flags = tx_cmd->tx_flags;
497
498 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
Johannes Berge039fa42008-05-15 12:55:29 +0200499 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800500 tx_flags |= TX_CMD_FLG_ACK_MSK;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700501 if (ieee80211_is_mgmt(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800502 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700503 if (ieee80211_is_probe_resp(fc) &&
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800504 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
505 tx_flags |= TX_CMD_FLG_TSF_MSK;
506 } else {
507 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
508 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
509 }
510
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700511 if (ieee80211_is_back_req(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800512 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
513
514
515 tx_cmd->sta_id = std_id;
Harvey Harrison8b7b1e02008-06-11 14:21:56 -0700516 if (ieee80211_has_morefrags(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800517 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
518
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700519 if (ieee80211_is_data_qos(fc)) {
520 u8 *qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800521 tx_cmd->tid_tspec = qc[0] & 0xf;
522 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
523 } else {
524 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
525 }
526
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800527 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800528
529 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
530 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
531
532 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700533 if (ieee80211_is_mgmt(fc)) {
534 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800535 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
536 else
537 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
538 } else {
539 tx_cmd->timeout.pm_frame_timeout = 0;
540 }
541
542 tx_cmd->driver_txop = 0;
543 tx_cmd->tx_flags = tx_flags;
544 tx_cmd->next_frame_len = 0;
545}
546
547#define RTS_HCCA_RETRY_LIMIT 3
548#define RTS_DFAULT_RETRY_LIMIT 60
549
550static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
551 struct iwl_tx_cmd *tx_cmd,
Johannes Berge039fa42008-05-15 12:55:29 +0200552 struct ieee80211_tx_info *info,
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700553 __le16 fc, int sta_id,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800554 int is_hcca)
555{
Tomas Winkler76eff182008-10-14 12:32:45 -0700556 u32 rate_flags = 0;
557 int rate_idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800558 u8 rts_retry_limit = 0;
559 u8 data_retry_limit = 0;
560 u8 rate_plcp;
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200561
Johannes Berge039fa42008-05-15 12:55:29 +0200562 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200563 IWL_RATE_COUNT - 1);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800564
565 rate_plcp = iwl_rates[rate_idx].plcp;
566
567 rts_retry_limit = (is_hcca) ?
568 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
569
570 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
571 rate_flags |= RATE_MCS_CCK_MSK;
572
573
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700574 if (ieee80211_is_probe_resp(fc)) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800575 data_retry_limit = 3;
576 if (data_retry_limit < rts_retry_limit)
577 rts_retry_limit = data_retry_limit;
578 } else
579 data_retry_limit = IWL_DEFAULT_TX_RETRY;
580
581 if (priv->data_retry_limit != -1)
582 data_retry_limit = priv->data_retry_limit;
583
584
585 if (ieee80211_is_data(fc)) {
586 tx_cmd->initial_rate_index = 0;
587 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
588 } else {
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700589 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
590 case cpu_to_le16(IEEE80211_STYPE_AUTH):
591 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
592 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
593 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800594 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
595 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
596 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
597 }
598 break;
599 default:
600 break;
601 }
602
Tomas Winkler76eff182008-10-14 12:32:45 -0700603 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
604 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800605 }
606
607 tx_cmd->rts_retry_limit = rts_retry_limit;
608 tx_cmd->data_retry_limit = data_retry_limit;
Tomas Winklere7d326a2008-06-12 09:47:11 +0800609 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800610}
611
612static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
Johannes Berge039fa42008-05-15 12:55:29 +0200613 struct ieee80211_tx_info *info,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800614 struct iwl_tx_cmd *tx_cmd,
615 struct sk_buff *skb_frag,
616 int sta_id)
617{
Johannes Berge039fa42008-05-15 12:55:29 +0200618 struct ieee80211_key_conf *keyconf = info->control.hw_key;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800619
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800620 switch (keyconf->alg) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800621 case ALG_CCMP:
622 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800623 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
Johannes Berge039fa42008-05-15 12:55:29 +0200624 if (info->flags & IEEE80211_TX_CTL_AMPDU)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800625 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
Tomas Winklere1623442009-01-27 14:27:56 -0800626 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800627 break;
628
629 case ALG_TKIP:
630 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800631 ieee80211_get_tkip_key(keyconf, skb_frag,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800632 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
Tomas Winklere1623442009-01-27 14:27:56 -0800633 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800634 break;
635
636 case ALG_WEP:
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800637 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800638 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
639
640 if (keyconf->keylen == WEP_KEY_LEN_128)
641 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
642
643 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800644
Tomas Winklere1623442009-01-27 14:27:56 -0800645 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800646 "with key %d\n", keyconf->keyidx);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800647 break;
648
649 default:
Tomas Winkler978785a2008-12-19 10:37:31 +0800650 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800651 break;
652 }
653}
654
655static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
656{
657 /* 0 - mgmt, 1 - cnt, 2 - data */
658 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
659 priv->tx_stats[idx].cnt++;
660 priv->tx_stats[idx].bytes += len;
661}
662
663/*
664 * start REPLY_TX command process
665 */
Johannes Berge039fa42008-05-15 12:55:29 +0200666int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800667{
668 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +0200669 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Tomas Winklerf3674222008-08-04 16:00:44 +0800670 struct iwl_tx_queue *txq;
671 struct iwl_queue *q;
672 struct iwl_cmd *out_cmd;
673 struct iwl_tx_cmd *tx_cmd;
674 int swq_id, txq_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800675 dma_addr_t phys_addr;
676 dma_addr_t txcmd_phys;
677 dma_addr_t scratch_phys;
Tomas Winklerb88b15d2008-10-14 12:32:49 -0700678 u16 len, len_org;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800679 u16 seq_number = 0;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700680 __le16 fc;
Rami Rosen0e7690f2008-12-18 18:04:51 +0200681 u8 hdr_len;
Tomas Winklerf3674222008-08-04 16:00:44 +0800682 u8 sta_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800683 u8 wait_write_ptr = 0;
684 u8 tid = 0;
685 u8 *qc = NULL;
686 unsigned long flags;
687 int ret;
688
689 spin_lock_irqsave(&priv->lock, flags);
690 if (iwl_is_rfkill(priv)) {
Tomas Winklere1623442009-01-27 14:27:56 -0800691 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800692 goto drop_unlock;
693 }
694
Johannes Berge039fa42008-05-15 12:55:29 +0200695 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200696 IWL_INVALID_RATE) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800697 IWL_ERR(priv, "ERROR: No TX rate available.\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800698 goto drop_unlock;
699 }
700
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700701 fc = hdr->frame_control;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800702
703#ifdef CONFIG_IWLWIFI_DEBUG
704 if (ieee80211_is_auth(fc))
Tomas Winklere1623442009-01-27 14:27:56 -0800705 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700706 else if (ieee80211_is_assoc_req(fc))
Tomas Winklere1623442009-01-27 14:27:56 -0800707 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700708 else if (ieee80211_is_reassoc_req(fc))
Tomas Winklere1623442009-01-27 14:27:56 -0800709 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800710#endif
711
712 /* drop all data frame if we are not associated */
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700713 if (ieee80211_is_data(fc) &&
Wey-Yi Guy279b05d2009-04-20 14:37:00 -0700714 (!iwl_is_monitor_mode(priv) ||
Stefanik Gábord10c4ec2008-09-03 11:26:59 +0800715 !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
716 (!iwl_is_associated(priv) ||
Johannes Berg05c914f2008-09-11 00:01:58 +0200717 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
Stefanik Gábord10c4ec2008-09-03 11:26:59 +0800718 !priv->assoc_station_added)) {
Tomas Winklere1623442009-01-27 14:27:56 -0800719 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800720 goto drop_unlock;
721 }
722
Harvey Harrison7294ec92008-07-15 18:43:59 -0700723 hdr_len = ieee80211_hdrlen(fc);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800724
725 /* Find (or create) index into station table for destination station */
726 sta_id = iwl_get_sta_id(priv, hdr);
727 if (sta_id == IWL_INVALID_STATION) {
Tomas Winklere1623442009-01-27 14:27:56 -0800728 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
Johannes Berge1749612008-10-27 15:59:26 -0700729 hdr->addr1);
Johannes Berg3995bd92009-07-24 11:13:14 -0700730 goto drop_unlock;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800731 }
732
Tomas Winklere1623442009-01-27 14:27:56 -0800733 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800734
Tomas Winklerf3674222008-08-04 16:00:44 +0800735 swq_id = skb_get_queue_mapping(skb);
736 txq_id = swq_id;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700737 if (ieee80211_is_data_qos(fc)) {
738 qc = ieee80211_get_qos_ctl(hdr);
Harvey Harrison7294ec92008-07-15 18:43:59 -0700739 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
Tomas Winklerf3674222008-08-04 16:00:44 +0800740 seq_number = priv->stations[sta_id].tid[tid].seq_number;
741 seq_number &= IEEE80211_SCTL_SEQ;
742 hdr->seq_ctrl = hdr->seq_ctrl &
Harvey Harrisonc1b4aa32009-01-29 13:26:44 -0800743 cpu_to_le16(IEEE80211_SCTL_FRAG);
Tomas Winklerf3674222008-08-04 16:00:44 +0800744 hdr->seq_ctrl |= cpu_to_le16(seq_number);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800745 seq_number += 0x10;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800746 /* aggregation is on for this <sta,tid> */
Johannes Berge4e72fb2009-03-23 17:28:42 +0100747 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800748 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
Johannes Berge4e72fb2009-03-23 17:28:42 +0100749 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
750 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800751 }
752
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800753 txq = &priv->txq[txq_id];
754 q = &txq->q;
Tomas Winkler3fd07a12008-10-23 23:48:49 -0700755 txq->swq_id = swq_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800756
Johannes Berg3995bd92009-07-24 11:13:14 -0700757 if (unlikely(iwl_queue_space(q) < q->high_mark))
758 goto drop_unlock;
759
760 if (ieee80211_is_data_qos(fc))
761 priv->stations[sta_id].tid[tid].tfds_in_queue++;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800762
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800763 /* Set up driver data for this TFD */
764 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
765 txq->txb[q->write_ptr].skb[0] = skb;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800766
767 /* Set up first empty entry in queue's array of Tx/cmd buffers */
Tomas Winklerb88b15d2008-10-14 12:32:49 -0700768 out_cmd = txq->cmd[q->write_ptr];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800769 tx_cmd = &out_cmd->cmd.tx;
770 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
771 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
772
773 /*
774 * Set up the Tx-command (not MAC!) header.
775 * Store the chosen Tx queue and TFD index within the sequence field;
776 * after Tx, uCode's Tx response will return this value so driver can
777 * locate the frame within the tx queue and do post-tx processing.
778 */
779 out_cmd->hdr.cmd = REPLY_TX;
780 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
781 INDEX_TO_SEQ(q->write_ptr)));
782
783 /* Copy MAC header from skb into command buffer */
784 memcpy(tx_cmd->hdr, hdr, hdr_len);
785
Reinette Chatredf833b12009-04-21 10:55:48 -0700786
787 /* Total # bytes to be transmitted */
788 len = (u16)skb->len;
789 tx_cmd->len = cpu_to_le16(len);
790
791 if (info->control.hw_key)
792 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
793
794 /* TODO need this for burst mode later on */
795 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
796
797 /* set is_hcca to 0; it probably will never be implemented */
798 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
799
800 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
801
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800802 /*
803 * Use the first empty entry in this queue's command buffer array
804 * to contain the Tx command and MAC header concatenated together
805 * (payload data will be in another buffer).
806 * Size of this varies, due to varying MAC header length.
807 * If end is not dword aligned, we'll have 2 extra bytes at the end
808 * of the MAC header (device reads on dword boundaries).
809 * We'll tell device about this padding later.
810 */
811 len = sizeof(struct iwl_tx_cmd) +
812 sizeof(struct iwl_cmd_header) + hdr_len;
813
814 len_org = len;
815 len = (len + 3) & ~3;
816
817 if (len_org != len)
818 len_org = 1;
819 else
820 len_org = 0;
821
Reinette Chatredf833b12009-04-21 10:55:48 -0700822 /* Tell NIC about any 2-byte padding after MAC header */
823 if (len_org)
824 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
825
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800826 /* Physical address of this Tx command's header (not MAC header!),
827 * within command buffer array. */
Tomas Winkler499b1882008-10-14 12:32:48 -0700828 txcmd_phys = pci_map_single(priv->pci_dev,
Reinette Chatredf833b12009-04-21 10:55:48 -0700829 &out_cmd->hdr, len,
Fenghua Yu96891ce2009-02-18 15:54:33 -0800830 PCI_DMA_BIDIRECTIONAL);
Tomas Winkler499b1882008-10-14 12:32:48 -0700831 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
Reinette Chatredf833b12009-04-21 10:55:48 -0700832 pci_unmap_len_set(&out_cmd->meta, len, len);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800833 /* Add buffer containing Tx command and MAC(!) header to TFD's
834 * first entry */
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800835 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
836 txcmd_phys, len, 1, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800837
Reinette Chatredf833b12009-04-21 10:55:48 -0700838 if (!ieee80211_has_morefrags(hdr->frame_control)) {
839 txq->need_update = 1;
840 if (qc)
841 priv->stations[sta_id].tid[tid].seq_number = seq_number;
842 } else {
843 wait_write_ptr = 1;
844 txq->need_update = 0;
845 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800846
847 /* Set up TFD's 2nd entry to point directly to remainder of skb,
848 * if any (802.11 null frames have no payload). */
849 len = skb->len - hdr_len;
850 if (len) {
851 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
852 len, PCI_DMA_TODEVICE);
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800853 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
854 phys_addr, len,
855 0, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800856 }
857
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800858 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
Reinette Chatredf833b12009-04-21 10:55:48 -0700859 offsetof(struct iwl_tx_cmd, scratch);
860
861 len = sizeof(struct iwl_tx_cmd) +
862 sizeof(struct iwl_cmd_header) + hdr_len;
863 /* take back ownership of DMA buffer to enable update */
864 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
865 len, PCI_DMA_BIDIRECTIONAL);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800866 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
Tomas Winkler499b1882008-10-14 12:32:48 -0700867 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800868
Reinette Chatred2ee9cd2009-04-21 10:55:47 -0700869 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
870 le16_to_cpu(out_cmd->hdr.sequence));
871 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800872 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800873 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
874
875 /* Set up entry for this TFD in Tx byte-count array */
Reinette Chatre7b80ece2009-07-09 10:33:39 -0700876 if (info->flags & IEEE80211_TX_CTL_AMPDU)
877 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
Reinette Chatredf833b12009-04-21 10:55:48 -0700878 le16_to_cpu(tx_cmd->len));
879
880 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
881 len, PCI_DMA_BIDIRECTIONAL);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800882
883 /* Tell device the write index *just past* this latest filled TFD */
884 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
885 ret = iwl_txq_update_write_ptr(priv, txq);
886 spin_unlock_irqrestore(&priv->lock, flags);
887
888 if (ret)
889 return ret;
890
Tomas Winkler143b09e2008-07-24 21:33:42 +0300891 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800892 if (wait_write_ptr) {
893 spin_lock_irqsave(&priv->lock, flags);
894 txq->need_update = 1;
895 iwl_txq_update_write_ptr(priv, txq);
896 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler143b09e2008-07-24 21:33:42 +0300897 } else {
Johannes Berge4e72fb2009-03-23 17:28:42 +0100898 iwl_stop_queue(priv, txq->swq_id);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800899 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800900 }
901
902 return 0;
903
904drop_unlock:
905 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800906 return -1;
907}
908EXPORT_SYMBOL(iwl_tx_skb);
909
910/*************** HOST COMMAND QUEUE FUNCTIONS *****/
911
912/**
913 * iwl_enqueue_hcmd - enqueue a uCode command
914 * @priv: device private data point
915 * @cmd: a point to the ucode command structure
916 *
917 * The function returns < 0 values to indicate the operation is
918 * failed. On success, it turns the index (> 0) of command in the
919 * command queue.
920 */
921int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
922{
923 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
924 struct iwl_queue *q = &txq->q;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800925 struct iwl_cmd *out_cmd;
Tomas Winklerf3674222008-08-04 16:00:44 +0800926 dma_addr_t phys_addr;
927 unsigned long flags;
928 int len, ret;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800929 u32 idx;
930 u16 fix_size;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800931
932 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
933 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
934
935 /* If any of the command structures end up being larger than
936 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
937 * we will need to increase the size of the TFD entries */
938 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
939 !(cmd->meta.flags & CMD_SIZE_HUGE));
940
941 if (iwl_is_rfkill(priv)) {
Tomas Winklere1623442009-01-27 14:27:56 -0800942 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800943 return -EIO;
944 }
945
946 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800947 IWL_ERR(priv, "No space for Tx\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800948 return -ENOSPC;
949 }
950
951 spin_lock_irqsave(&priv->hcmd_lock, flags);
952
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800953 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800954 out_cmd = txq->cmd[idx];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800955
956 out_cmd->hdr.cmd = cmd->id;
957 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
958 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
959
960 /* At this point, the out_cmd now has all of the incoming cmd
961 * information */
962
963 out_cmd->hdr.flags = 0;
964 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
965 INDEX_TO_SEQ(q->write_ptr));
966 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
Tomas Winkler9734cb22008-09-03 11:26:52 +0800967 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
Reinette Chatredf833b12009-04-21 10:55:48 -0700968 len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
969 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
Tomas Winkler499b1882008-10-14 12:32:48 -0700970
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800971
Esti Kummerded2ae72008-08-04 16:00:45 +0800972#ifdef CONFIG_IWLWIFI_DEBUG
973 switch (out_cmd->hdr.cmd) {
974 case REPLY_TX_LINK_QUALITY_CMD:
975 case SENSITIVITY_CMD:
Tomas Winklere1623442009-01-27 14:27:56 -0800976 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
Esti Kummerded2ae72008-08-04 16:00:45 +0800977 "%d bytes at %d[%d]:%d\n",
978 get_cmd_string(out_cmd->hdr.cmd),
979 out_cmd->hdr.cmd,
980 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
981 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
982 break;
983 default:
Tomas Winklere1623442009-01-27 14:27:56 -0800984 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
Esti Kummerded2ae72008-08-04 16:00:45 +0800985 "%d bytes at %d[%d]:%d\n",
986 get_cmd_string(out_cmd->hdr.cmd),
987 out_cmd->hdr.cmd,
988 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
989 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
990 }
991#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800992 txq->need_update = 1;
993
Samuel Ortiz518099a2009-01-19 15:30:27 -0800994 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
995 /* Set up entry in queue's byte count circular buffer */
996 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800997
Reinette Chatredf833b12009-04-21 10:55:48 -0700998 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
999 fix_size, PCI_DMA_BIDIRECTIONAL);
1000 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1001 pci_unmap_len_set(&out_cmd->meta, len, fix_size);
1002
1003 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1004 phys_addr, fix_size, 1,
1005 U32_PAD(cmd->len));
1006
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001007 /* Increment and update queue's write index */
1008 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1009 ret = iwl_txq_update_write_ptr(priv, txq);
1010
1011 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1012 return ret ? ret : idx;
1013}
1014
Tomas Winkler17b88922008-05-29 16:35:12 +08001015int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1016{
1017 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1018 struct iwl_queue *q = &txq->q;
1019 struct iwl_tx_info *tx_info;
1020 int nfreed = 0;
1021
1022 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001023 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +08001024 "is out of range [0-%d] %d %d.\n", txq_id,
1025 index, q->n_bd, q->write_ptr, q->read_ptr);
1026 return 0;
1027 }
1028
Tomas Winkler499b1882008-10-14 12:32:48 -07001029 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1030 q->read_ptr != index;
1031 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
Tomas Winkler17b88922008-05-29 16:35:12 +08001032
1033 tx_info = &txq->txb[txq->q.read_ptr];
1034 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1035 tx_info->skb[0] = NULL;
Tomas Winkler17b88922008-05-29 16:35:12 +08001036
Tomas Winkler972cf442008-05-29 16:35:13 +08001037 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1038 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1039
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -08001040 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
Tomas Winkler17b88922008-05-29 16:35:12 +08001041 nfreed++;
1042 }
1043 return nfreed;
1044}
1045EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1046
1047
1048/**
1049 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1050 *
1051 * When FW advances 'R' index, all entries between old and new 'R' index
1052 * need to be reclaimed. As result, some free space forms. If there is
1053 * enough free space (> low mark), wake the stack that feeds us.
1054 */
Tomas Winkler499b1882008-10-14 12:32:48 -07001055static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1056 int idx, int cmd_idx)
Tomas Winkler17b88922008-05-29 16:35:12 +08001057{
1058 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1059 struct iwl_queue *q = &txq->q;
1060 int nfreed = 0;
1061
Tomas Winkler499b1882008-10-14 12:32:48 -07001062 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001063 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +08001064 "is out of range [0-%d] %d %d.\n", txq_id,
Tomas Winkler499b1882008-10-14 12:32:48 -07001065 idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +08001066 return;
1067 }
1068
Tomas Winkler499b1882008-10-14 12:32:48 -07001069 pci_unmap_single(priv->pci_dev,
1070 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
1071 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
Fenghua Yu96891ce2009-02-18 15:54:33 -08001072 PCI_DMA_BIDIRECTIONAL);
Tomas Winkler17b88922008-05-29 16:35:12 +08001073
Tomas Winkler499b1882008-10-14 12:32:48 -07001074 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1075 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1076
1077 if (nfreed++ > 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001078 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +08001079 q->write_ptr, q->read_ptr);
1080 queue_work(priv->workqueue, &priv->restart);
1081 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +08001082
Tomas Winkler17b88922008-05-29 16:35:12 +08001083 }
1084}
1085
1086/**
1087 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1088 * @rxb: Rx buffer to reclaim
1089 *
1090 * If an Rx buffer has an async callback associated with it the callback
1091 * will be executed. The attached skb (if present) will only be freed
1092 * if the callback returns 1
1093 */
1094void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1095{
1096 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1097 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1098 int txq_id = SEQ_TO_QUEUE(sequence);
1099 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +08001100 int cmd_index;
Tomas Winkler9734cb22008-09-03 11:26:52 +08001101 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
Tomas Winkler17b88922008-05-29 16:35:12 +08001102 struct iwl_cmd *cmd;
1103
1104 /* If a Tx command is being handled and it isn't in the actual
1105 * command queue then there a command routing bug has been introduced
1106 * in the queue management code. */
Johannes Berg55d6a3c2008-09-23 19:18:43 +02001107 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
Winkler, Tomas01ef93232008-11-07 09:58:45 -08001108 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1109 txq_id, sequence,
1110 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1111 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1112 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +02001113 return;
Winkler, Tomas01ef93232008-11-07 09:58:45 -08001114 }
Tomas Winkler17b88922008-05-29 16:35:12 +08001115
1116 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +08001117 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +08001118
1119 /* Input error checking is done when commands are added to queue. */
1120 if (cmd->meta.flags & CMD_WANT_SKB) {
1121 cmd->meta.source->u.skb = rxb->skb;
1122 rxb->skb = NULL;
1123 } else if (cmd->meta.u.callback &&
1124 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1125 rxb->skb = NULL;
1126
Tomas Winkler499b1882008-10-14 12:32:48 -07001127 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
Tomas Winkler17b88922008-05-29 16:35:12 +08001128
1129 if (!(cmd->meta.flags & CMD_ASYNC)) {
1130 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1131 wake_up_interruptible(&priv->wait_command_queue);
1132 }
1133}
1134EXPORT_SYMBOL(iwl_tx_cmd_complete);
1135
Tomas Winkler30e553e2008-05-29 16:35:16 +08001136/*
1137 * Find first available (lowest unused) Tx Queue, mark it "active".
1138 * Called only when finding queue for aggregation.
1139 * Should never return anything < 7, because they should already
1140 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1141 */
1142static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1143{
1144 int txq_id;
1145
1146 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1147 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1148 return txq_id;
1149 return -1;
1150}
1151
1152int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1153{
1154 int sta_id;
1155 int tx_fifo;
1156 int txq_id;
1157 int ret;
1158 unsigned long flags;
1159 struct iwl_tid_data *tid_data;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001160
1161 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1162 tx_fifo = default_tid_to_tx_fifo[tid];
1163 else
1164 return -EINVAL;
1165
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001166 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
Johannes Berge1749612008-10-27 15:59:26 -07001167 __func__, ra, tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001168
1169 sta_id = iwl_find_station(priv, ra);
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001170 if (sta_id == IWL_INVALID_STATION) {
1171 IWL_ERR(priv, "Start AGG on invalid station\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001172 return -ENXIO;
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001173 }
Roel Kluin082e7082009-07-25 23:34:31 +02001174 if (unlikely(tid >= MAX_TID_COUNT))
1175 return -EINVAL;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001176
1177 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001178 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001179 return -ENXIO;
1180 }
1181
1182 txq_id = iwl_txq_ctx_activate_free(priv);
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001183 if (txq_id == -1) {
1184 IWL_ERR(priv, "No free aggregation queue available\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001185 return -ENXIO;
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001186 }
Tomas Winkler30e553e2008-05-29 16:35:16 +08001187
1188 spin_lock_irqsave(&priv->sta_lock, flags);
1189 tid_data = &priv->stations[sta_id].tid[tid];
1190 *ssn = SEQ_TO_SN(tid_data->seq_number);
1191 tid_data->agg.txq_id = txq_id;
1192 spin_unlock_irqrestore(&priv->sta_lock, flags);
1193
1194 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1195 sta_id, tid, *ssn);
1196 if (ret)
1197 return ret;
1198
1199 if (tid_data->tfds_in_queue == 0) {
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001200 IWL_DEBUG_HT(priv, "HW queue is empty\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001201 tid_data->agg.state = IWL_AGG_ON;
1202 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1203 } else {
Tomas Winklere1623442009-01-27 14:27:56 -08001204 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
Tomas Winkler30e553e2008-05-29 16:35:16 +08001205 tid_data->tfds_in_queue);
1206 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1207 }
1208 return ret;
1209}
1210EXPORT_SYMBOL(iwl_tx_agg_start);
1211
1212int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1213{
1214 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1215 struct iwl_tid_data *tid_data;
1216 int ret, write_ptr, read_ptr;
1217 unsigned long flags;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001218
1219 if (!ra) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001220 IWL_ERR(priv, "ra = NULL\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001221 return -EINVAL;
1222 }
1223
1224 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1225 tx_fifo_id = default_tid_to_tx_fifo[tid];
1226 else
1227 return -EINVAL;
1228
1229 sta_id = iwl_find_station(priv, ra);
1230
Wey-Yi Guya2f1cbe2009-03-17 21:51:52 -07001231 if (sta_id == IWL_INVALID_STATION) {
1232 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001233 return -ENXIO;
Wey-Yi Guya2f1cbe2009-03-17 21:51:52 -07001234 }
Tomas Winkler30e553e2008-05-29 16:35:16 +08001235
1236 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001237 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001238
1239 tid_data = &priv->stations[sta_id].tid[tid];
1240 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1241 txq_id = tid_data->agg.txq_id;
1242 write_ptr = priv->txq[txq_id].q.write_ptr;
1243 read_ptr = priv->txq[txq_id].q.read_ptr;
1244
1245 /* The queue is not empty */
1246 if (write_ptr != read_ptr) {
Tomas Winklere1623442009-01-27 14:27:56 -08001247 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001248 priv->stations[sta_id].tid[tid].agg.state =
1249 IWL_EMPTYING_HW_QUEUE_DELBA;
1250 return 0;
1251 }
1252
Tomas Winklere1623442009-01-27 14:27:56 -08001253 IWL_DEBUG_HT(priv, "HW queue is empty\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001254 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1255
1256 spin_lock_irqsave(&priv->lock, flags);
1257 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1258 tx_fifo_id);
1259 spin_unlock_irqrestore(&priv->lock, flags);
1260
1261 if (ret)
1262 return ret;
1263
1264 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1265
1266 return 0;
1267}
1268EXPORT_SYMBOL(iwl_tx_agg_stop);
1269
1270int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1271{
1272 struct iwl_queue *q = &priv->txq[txq_id].q;
1273 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1274 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1275
1276 switch (priv->stations[sta_id].tid[tid].agg.state) {
1277 case IWL_EMPTYING_HW_QUEUE_DELBA:
1278 /* We are reclaiming the last packet of the */
1279 /* aggregated HW queue */
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001280 if ((txq_id == tid_data->agg.txq_id) &&
1281 (q->read_ptr == q->write_ptr)) {
Tomas Winkler30e553e2008-05-29 16:35:16 +08001282 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1283 int tx_fifo = default_tid_to_tx_fifo[tid];
Tomas Winklere1623442009-01-27 14:27:56 -08001284 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001285 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1286 ssn, tx_fifo);
1287 tid_data->agg.state = IWL_AGG_OFF;
1288 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1289 }
1290 break;
1291 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1292 /* We are reclaiming the last packet of the queue */
1293 if (tid_data->tfds_in_queue == 0) {
Tomas Winklere1623442009-01-27 14:27:56 -08001294 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001295 tid_data->agg.state = IWL_AGG_ON;
1296 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1297 }
1298 break;
1299 }
1300 return 0;
1301}
1302EXPORT_SYMBOL(iwl_txq_check_empty);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001303
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001304/**
1305 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1306 *
1307 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1308 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1309 */
1310static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1311 struct iwl_ht_agg *agg,
1312 struct iwl_compressed_ba_resp *ba_resp)
1313
1314{
1315 int i, sh, ack;
1316 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1317 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1318 u64 bitmap;
1319 int successes = 0;
1320 struct ieee80211_tx_info *info;
1321
1322 if (unlikely(!agg->wait_for_ba)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001323 IWL_ERR(priv, "Received BA when not expected\n");
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001324 return -EINVAL;
1325 }
1326
1327 /* Mark that the expected block-ack response arrived */
1328 agg->wait_for_ba = 0;
Tomas Winklere1623442009-01-27 14:27:56 -08001329 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001330
1331 /* Calculate shift to align block-ack bits with our Tx window bits */
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001332 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001333 if (sh < 0) /* tbw something is wrong with indices */
1334 sh += 0x100;
1335
1336 /* don't use 64-bit values for now */
1337 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1338
1339 if (agg->frame_count > (64 - sh)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001340 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001341 return -1;
1342 }
1343
1344 /* check for success or failure according to the
1345 * transmitted bitmap and block-ack bitmap */
1346 bitmap &= agg->bitmap;
1347
1348 /* For each frame attempted in aggregation,
1349 * update driver's record of tx frame's status. */
1350 for (i = 0; i < agg->frame_count ; i++) {
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08001351 ack = bitmap & (1ULL << i);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001352 successes += !!ack;
Tomas Winklere1623442009-01-27 14:27:56 -08001353 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
Abhijeet Kolekarc3056062008-11-12 13:14:08 -08001354 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001355 agg->start_idx + i);
1356 }
1357
1358 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1359 memset(&info->status, 0, sizeof(info->status));
1360 info->flags = IEEE80211_TX_STAT_ACK;
1361 info->flags |= IEEE80211_TX_STAT_AMPDU;
1362 info->status.ampdu_ack_map = successes;
1363 info->status.ampdu_ack_len = agg->frame_count;
1364 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1365
Tomas Winklere1623442009-01-27 14:27:56 -08001366 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001367
1368 return 0;
1369}
1370
1371/**
1372 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1373 *
1374 * Handles block-acknowledge notification from device, which reports success
1375 * of frames sent via aggregation.
1376 */
1377void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1378 struct iwl_rx_mem_buffer *rxb)
1379{
1380 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1381 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001382 struct iwl_tx_queue *txq = NULL;
1383 struct iwl_ht_agg *agg;
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001384 int index;
1385 int sta_id;
1386 int tid;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001387
1388 /* "flow" corresponds to Tx queue */
1389 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1390
1391 /* "ssn" is start of block-ack Tx window, corresponds to index
1392 * (in Tx queue's circular buffer) of first TFD/frame in window */
1393 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1394
1395 if (scd_flow >= priv->hw_params.max_txq_num) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001396 IWL_ERR(priv,
1397 "BUG_ON scd_flow is bigger than number of queues\n");
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001398 return;
1399 }
1400
1401 txq = &priv->txq[scd_flow];
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001402 sta_id = ba_resp->sta_id;
1403 tid = ba_resp->tid;
1404 agg = &priv->stations[sta_id].tid[tid].agg;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001405
1406 /* Find index just before block-ack window */
1407 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1408
1409 /* TODO: Need to get this copy more safely - now good for debug */
1410
Tomas Winklere1623442009-01-27 14:27:56 -08001411 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001412 "sta_id = %d\n",
1413 agg->wait_for_ba,
Johannes Berge1749612008-10-27 15:59:26 -07001414 (u8 *) &ba_resp->sta_addr_lo32,
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001415 ba_resp->sta_id);
Tomas Winklere1623442009-01-27 14:27:56 -08001416 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001417 "%d, scd_ssn = %d\n",
1418 ba_resp->tid,
1419 ba_resp->seq_ctl,
1420 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1421 ba_resp->scd_flow,
1422 ba_resp->scd_ssn);
Tomas Winklere1623442009-01-27 14:27:56 -08001423 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001424 agg->start_idx,
1425 (unsigned long long)agg->bitmap);
1426
1427 /* Update driver's record of ACK vs. not for each frame in window */
1428 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1429
1430 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1431 * block-ack window (we assume that they've been successfully
1432 * transmitted ... if not, it's too late anyway). */
1433 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1434 /* calculate mac80211 ampdu sw queue to wake */
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001435 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001436 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001437
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001438 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1439 priv->mac80211_registered &&
1440 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
Johannes Berge4e72fb2009-03-23 17:28:42 +01001441 iwl_wake_queue(priv, txq->swq_id);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001442
1443 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001444 }
1445}
1446EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1447
Helmut Schaa994d31f2008-07-02 12:17:06 +02001448#ifdef CONFIG_IWLWIFI_DEBUG
Tomas Winklera332f8d2008-05-29 16:35:08 +08001449#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1450
1451const char *iwl_get_tx_fail_reason(u32 status)
1452{
1453 switch (status & TX_STATUS_MSK) {
1454 case TX_STATUS_SUCCESS:
1455 return "SUCCESS";
1456 TX_STATUS_ENTRY(SHORT_LIMIT);
1457 TX_STATUS_ENTRY(LONG_LIMIT);
1458 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1459 TX_STATUS_ENTRY(MGMNT_ABORT);
1460 TX_STATUS_ENTRY(NEXT_FRAG);
1461 TX_STATUS_ENTRY(LIFE_EXPIRE);
1462 TX_STATUS_ENTRY(DEST_PS);
1463 TX_STATUS_ENTRY(ABORTED);
1464 TX_STATUS_ENTRY(BT_RETRY);
1465 TX_STATUS_ENTRY(STA_INVALID);
1466 TX_STATUS_ENTRY(FRAG_DROPPED);
1467 TX_STATUS_ENTRY(TID_DISABLE);
1468 TX_STATUS_ENTRY(FRAME_FLUSHED);
1469 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1470 TX_STATUS_ENTRY(TX_LOCKED);
1471 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1472 }
1473
1474 return "UNKNOWN";
1475}
1476EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1477#endif /* CONFIG_IWLWIFI_DEBUG */