blob: 066491fc75f25b642d5760f5be3589ebdd858ae3 [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Reinette Chatre01f81622009-01-08 10:20:02 -08003 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Tomas Winklerfd4abac2008-05-15 13:54:07 +080030#include <linux/etherdevice.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040031#include <linux/sched.h>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080032#include <net/mac80211.h>
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
Tomas Winkler30e553e2008-05-29 16:35:16 +080040static const u16 default_tid_to_tx_fifo[] = {
41 IWL_TX_FIFO_AC1,
42 IWL_TX_FIFO_AC0,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC1,
45 IWL_TX_FIFO_AC2,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC3,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_NONE,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_AC3
58};
59
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -080060static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
61 struct iwl_dma_ptr *ptr, size_t size)
62{
63 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
64 if (!ptr->addr)
65 return -ENOMEM;
66 ptr->size = size;
67 return 0;
68}
69
70static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
71 struct iwl_dma_ptr *ptr)
72{
73 if (unlikely(!ptr->addr))
74 return;
75
76 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
77 memset(ptr, 0, sizeof(*ptr));
78}
79
Tomas Winklerfd4abac2008-05-15 13:54:07 +080080/**
81 * iwl_txq_update_write_ptr - Send new write index to hardware
82 */
83int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84{
85 u32 reg = 0;
86 int ret = 0;
87 int txq_id = txq->q.id;
88
89 if (txq->need_update == 0)
90 return ret;
91
92 /* if we're trying to save power */
93 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
94 /* wake up nic if it's powered down ...
95 * uCode will wake up, and interrupt us again, so next
96 * time we'll skip this part. */
97 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
98
99 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Ben Cahill309e7312009-11-06 14:53:03 -0800100 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
101 txq_id, reg);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800102 iwl_set_bit(priv, CSR_GP_CNTRL,
103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
104 return ret;
105 }
106
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800107 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
108 txq->q.write_ptr | (txq_id << 8));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800109
110 /* else not in power-save mode, uCode will never sleep when we're
111 * trying to tx (during RFKILL, we're not trying to tx). */
112 } else
113 iwl_write32(priv, HBUS_TARG_WRPTR,
114 txq->q.write_ptr | (txq_id << 8));
115
116 txq->need_update = 0;
117
118 return ret;
119}
120EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121
122
Wey-Yi Guya239a8b2010-02-19 15:47:32 -0800123void iwl_free_tfds_in_queue(struct iwl_priv *priv,
124 int sta_id, int tid, int freed)
125{
126 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
127 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
128 else {
129 IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
130 priv->stations[sta_id].tid[tid].tfds_in_queue,
131 freed);
132 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
133 }
134}
135EXPORT_SYMBOL(iwl_free_tfds_in_queue);
136
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800137/**
138 * iwl_tx_queue_free - Deallocate DMA queue.
139 * @txq: Transmit queue to deallocate.
140 *
141 * Empty queue by removing and destroying all BD's.
142 * Free all buffers.
143 * 0-fill, but do not free "txq" descriptor structure.
144 */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800145void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800146{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800147 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Tomas Winkler443cfd42008-05-15 13:53:57 +0800148 struct iwl_queue *q = &txq->q;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800149 struct pci_dev *dev = priv->pci_dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700150 int i;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800151
152 if (q->n_bd == 0)
153 return;
154
155 /* first, empty all BD's */
156 for (; q->write_ptr != q->read_ptr;
157 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800158 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800159
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800160 /* De-alloc array of command/tx buffers */
Tomas Winkler961ba602008-10-14 12:32:44 -0700161 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800162 kfree(txq->cmd[i]);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800163
164 /* De-alloc circular buffer of TFDs */
165 if (txq->q.n_bd)
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800166 pci_free_consistent(dev, priv->hw_params.tfd_size *
Tomas Winkler499b1882008-10-14 12:32:48 -0700167 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800168
169 /* De-alloc array of per-TFD driver data */
170 kfree(txq->txb);
171 txq->txb = NULL;
172
Johannes Bergc2acea82009-07-24 11:13:05 -0700173 /* deallocate arrays */
174 kfree(txq->cmd);
175 kfree(txq->meta);
176 txq->cmd = NULL;
177 txq->meta = NULL;
178
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800179 /* 0-fill queue descriptor structure */
180 memset(txq, 0, sizeof(*txq));
181}
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800182EXPORT_SYMBOL(iwl_tx_queue_free);
Tomas Winkler961ba602008-10-14 12:32:44 -0700183
184/**
185 * iwl_cmd_queue_free - Deallocate DMA queue.
186 * @txq: Transmit queue to deallocate.
187 *
188 * Empty queue by removing and destroying all BD's.
189 * Free all buffers.
190 * 0-fill, but do not free "txq" descriptor structure.
191 */
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700192void iwl_cmd_queue_free(struct iwl_priv *priv)
Tomas Winkler961ba602008-10-14 12:32:44 -0700193{
194 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
195 struct iwl_queue *q = &txq->q;
196 struct pci_dev *dev = priv->pci_dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700197 int i;
Tomas Winkler961ba602008-10-14 12:32:44 -0700198
199 if (q->n_bd == 0)
200 return;
201
Tomas Winkler961ba602008-10-14 12:32:44 -0700202 /* De-alloc array of command/tx buffers */
203 for (i = 0; i <= TFD_CMD_SLOTS; i++)
204 kfree(txq->cmd[i]);
205
206 /* De-alloc circular buffer of TFDs */
207 if (txq->q.n_bd)
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700208 pci_free_consistent(dev, priv->hw_params.tfd_size *
Tomas Winkler499b1882008-10-14 12:32:48 -0700209 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Tomas Winkler961ba602008-10-14 12:32:44 -0700210
Reinette Chatre28142982009-09-25 14:24:22 -0700211 /* deallocate arrays */
212 kfree(txq->cmd);
213 kfree(txq->meta);
214 txq->cmd = NULL;
215 txq->meta = NULL;
216
Tomas Winkler961ba602008-10-14 12:32:44 -0700217 /* 0-fill queue descriptor structure */
218 memset(txq, 0, sizeof(*txq));
219}
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700220EXPORT_SYMBOL(iwl_cmd_queue_free);
221
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800222/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
223 * DMA services
224 *
225 * Theory of operation
226 *
227 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
228 * of buffer descriptors, each of which points to one or more data buffers for
229 * the device to read from or fill. Driver and device exchange status of each
230 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
231 * entries in each circular buffer, to protect against confusing empty and full
232 * queue states.
233 *
234 * The device reads or writes the data in the queues via the device's several
235 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
236 *
237 * For Tx queue, there are low mark and high mark limits. If, after queuing
238 * the packet for Tx, free space become < low mark, Tx queue stopped. When
239 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
240 * Tx queue resumed.
241 *
242 * See more detailed info in iwl-4965-hw.h.
243 ***************************************************/
244
245int iwl_queue_space(const struct iwl_queue *q)
246{
247 int s = q->read_ptr - q->write_ptr;
248
249 if (q->read_ptr > q->write_ptr)
250 s -= q->n_bd;
251
252 if (s <= 0)
253 s += q->n_window;
254 /* keep some reserve to not confuse empty and full situations */
255 s -= 2;
256 if (s < 0)
257 s = 0;
258 return s;
259}
260EXPORT_SYMBOL(iwl_queue_space);
261
262
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800263/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800264 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
265 */
Tomas Winkler443cfd42008-05-15 13:53:57 +0800266static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800267 int count, int slots_num, u32 id)
268{
269 q->n_bd = count;
270 q->n_window = slots_num;
271 q->id = id;
272
273 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
274 * and iwl_queue_dec_wrap are broken. */
275 BUG_ON(!is_power_of_2(count));
276
277 /* slots_num must be power-of-two size, otherwise
278 * get_cmd_index is broken. */
279 BUG_ON(!is_power_of_2(slots_num));
280
281 q->low_mark = q->n_window / 4;
282 if (q->low_mark < 4)
283 q->low_mark = 4;
284
285 q->high_mark = q->n_window / 8;
286 if (q->high_mark < 2)
287 q->high_mark = 2;
288
289 q->write_ptr = q->read_ptr = 0;
290
291 return 0;
292}
293
294/**
295 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
296 */
297static int iwl_tx_queue_alloc(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800298 struct iwl_tx_queue *txq, u32 id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800299{
300 struct pci_dev *dev = priv->pci_dev;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800301 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800302
303 /* Driver private data, only for Tx (not command) queues,
304 * not shared with device. */
305 if (id != IWL_CMD_QUEUE_NUM) {
306 txq->txb = kmalloc(sizeof(txq->txb[0]) *
307 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
308 if (!txq->txb) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800309 IWL_ERR(priv, "kmalloc for auxiliary BD "
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800310 "structures failed\n");
311 goto error;
312 }
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800313 } else {
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800314 txq->txb = NULL;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800315 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800316
317 /* Circular buffer of transmit frame descriptors (TFDs),
318 * shared with device */
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800319 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800320
Tomas Winkler499b1882008-10-14 12:32:48 -0700321 if (!txq->tfds) {
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800322 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800323 goto error;
324 }
325 txq->q.id = id;
326
327 return 0;
328
329 error:
330 kfree(txq->txb);
331 txq->txb = NULL;
332
333 return -ENOMEM;
334}
335
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800336/**
337 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
338 */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800339int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
340 int slots_num, u32 txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800341{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800342 int i, len;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800343 int ret;
Johannes Bergc2acea82009-07-24 11:13:05 -0700344 int actual_slots = slots_num;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800345
346 /*
347 * Alloc buffer array for commands (Tx or other types of commands).
348 * For the command queue (#4), allocate command space + one big
349 * command for scan, since scan command is very huge; the system will
350 * not have two scans at the same time, so only one is needed.
351 * For normal Tx queues (all other queues), no super-size command
352 * space is needed.
353 */
Johannes Bergc2acea82009-07-24 11:13:05 -0700354 if (txq_id == IWL_CMD_QUEUE_NUM)
355 actual_slots++;
356
357 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
358 GFP_KERNEL);
359 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
360 GFP_KERNEL);
361
362 if (!txq->meta || !txq->cmd)
363 goto out_free_arrays;
364
365 len = sizeof(struct iwl_device_cmd);
366 for (i = 0; i < actual_slots; i++) {
367 /* only happens for cmd queue */
368 if (i == slots_num)
Abhijeet Kolekar89612122010-02-19 11:49:49 -0800369 len = IWL_MAX_CMD_SIZE;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800370
John W. Linville49898852008-09-02 15:07:18 -0400371 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800372 if (!txq->cmd[i])
Tomas Winkler73b7d742008-09-03 11:18:48 +0800373 goto err;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800374 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800375
376 /* Alloc driver data array and TFD circular buffer */
Tomas Winkler73b7d742008-09-03 11:18:48 +0800377 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
378 if (ret)
379 goto err;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800380
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800381 txq->need_update = 0;
382
Johannes Berg1a716552009-11-06 14:52:51 -0800383 /*
384 * Aggregation TX queues will get their ID when aggregation begins;
385 * they overwrite the setting done here. The command FIFO doesn't
386 * need an swq_id so don't set one to catch errors, all others can
387 * be set up to the identity mapping.
388 */
389 if (txq_id != IWL_CMD_QUEUE_NUM)
Johannes Berg45af8192009-06-19 13:52:43 -0700390 txq->swq_id = txq_id;
391
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800392 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
393 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
394 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
395
396 /* Initialize queue's high/low-water marks, and head/tail indexes */
397 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
398
399 /* Tell device where to find queue */
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800400 priv->cfg->ops->lib->txq_init(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800401
402 return 0;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800403err:
Johannes Bergc2acea82009-07-24 11:13:05 -0700404 for (i = 0; i < actual_slots; i++)
Tomas Winkler73b7d742008-09-03 11:18:48 +0800405 kfree(txq->cmd[i]);
Johannes Bergc2acea82009-07-24 11:13:05 -0700406out_free_arrays:
407 kfree(txq->meta);
408 kfree(txq->cmd);
Tomas Winkler73b7d742008-09-03 11:18:48 +0800409
Tomas Winkler73b7d742008-09-03 11:18:48 +0800410 return -ENOMEM;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800411}
Samuel Ortiza8e74e272009-01-23 13:45:14 -0800412EXPORT_SYMBOL(iwl_tx_queue_init);
413
Tomas Winklerda1bc452008-05-29 16:35:00 +0800414/**
415 * iwl_hw_txq_ctx_free - Free TXQ Context
416 *
417 * Destroy all TX DMA queues and structures
418 */
419void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
420{
421 int txq_id;
422
423 /* Tx queues */
akpm@linux-foundation.org77ca7d92009-12-14 15:56:54 -0800424 if (priv->txq) {
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700425 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
426 txq_id++)
427 if (txq_id == IWL_CMD_QUEUE_NUM)
428 iwl_cmd_queue_free(priv);
429 else
430 iwl_tx_queue_free(priv, txq_id);
akpm@linux-foundation.org77ca7d92009-12-14 15:56:54 -0800431 }
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800432 iwl_free_dma_ptr(priv, &priv->kw);
433
434 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700435
436 /* free tx queue structure */
437 iwl_free_txq_mem(priv);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800438}
439EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
440
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800441/**
442 * iwl_txq_ctx_reset - Reset TX queue context
Tomas Winklera96a27f2008-10-23 23:48:56 -0700443 * Destroys all DMA structures and initialize them again
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800444 *
445 * @param priv
446 * @return error code
447 */
448int iwl_txq_ctx_reset(struct iwl_priv *priv)
449{
450 int ret = 0;
451 int txq_id, slots_num;
Tomas Winklerda1bc452008-05-29 16:35:00 +0800452 unsigned long flags;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800453
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800454 /* Free all tx/cmd queues and keep-warm buffer */
455 iwl_hw_txq_ctx_free(priv);
456
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800457 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
458 priv->hw_params.scd_bc_tbls_size);
459 if (ret) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800460 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800461 goto error_bc_tbls;
462 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800463 /* Alloc keep-warm buffer */
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800464 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800465 if (ret) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800466 IWL_ERR(priv, "Keep Warm allocation failed\n");
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800467 goto error_kw;
468 }
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700469
470 /* allocate tx queue structure */
471 ret = iwl_alloc_txq_mem(priv);
472 if (ret)
473 goto error;
474
Tomas Winklerda1bc452008-05-29 16:35:00 +0800475 spin_lock_irqsave(&priv->lock, flags);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800476
477 /* Turn off all Tx DMA fifos */
Tomas Winklerda1bc452008-05-29 16:35:00 +0800478 priv->cfg->ops->lib->txq_set_sched(priv, 0);
479
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800480 /* Tell NIC where to find the "keep warm" buffer */
481 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
482
Tomas Winklerda1bc452008-05-29 16:35:00 +0800483 spin_unlock_irqrestore(&priv->lock, flags);
484
Tomas Winklerda1bc452008-05-29 16:35:00 +0800485 /* Alloc and init all Tx queues, including the command queue (#4) */
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800486 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
487 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
488 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
489 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
490 txq_id);
491 if (ret) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800492 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800493 goto error;
494 }
495 }
496
497 return ret;
498
499 error:
500 iwl_hw_txq_ctx_free(priv);
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800501 iwl_free_dma_ptr(priv, &priv->kw);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800502 error_kw:
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800503 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
504 error_bc_tbls:
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800505 return ret;
506}
Emmanuel Grumbacha33c2f42008-09-03 11:26:56 +0800507
Tomas Winklerda1bc452008-05-29 16:35:00 +0800508/**
509 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
510 */
511void iwl_txq_ctx_stop(struct iwl_priv *priv)
512{
Zhu Yif3f911d2008-12-02 12:14:04 -0800513 int ch;
Tomas Winklerda1bc452008-05-29 16:35:00 +0800514 unsigned long flags;
515
Tomas Winklerda1bc452008-05-29 16:35:00 +0800516 /* Turn off all Tx DMA fifos */
517 spin_lock_irqsave(&priv->lock, flags);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800518
519 priv->cfg->ops->lib->txq_set_sched(priv, 0);
520
521 /* Stop each Tx DMA channel, and wait for it to be idle */
Zhu Yif3f911d2008-12-02 12:14:04 -0800522 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
523 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800524 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
Zhu Yif3f911d2008-12-02 12:14:04 -0800525 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
Zhu, Yif0566582008-12-05 07:58:38 -0800526 1000);
Tomas Winklerda1bc452008-05-29 16:35:00 +0800527 }
Tomas Winklerda1bc452008-05-29 16:35:00 +0800528 spin_unlock_irqrestore(&priv->lock, flags);
529
530 /* Deallocate memory for all Tx queues */
531 iwl_hw_txq_ctx_free(priv);
532}
533EXPORT_SYMBOL(iwl_txq_ctx_stop);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800534
535/*
536 * handle build REPLY_TX command notification.
537 */
538static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
539 struct iwl_tx_cmd *tx_cmd,
Johannes Berge039fa42008-05-15 12:55:29 +0200540 struct ieee80211_tx_info *info,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800541 struct ieee80211_hdr *hdr,
Rami Rosen0e7690f2008-12-18 18:04:51 +0200542 u8 std_id)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800543{
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700544 __le16 fc = hdr->frame_control;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800545 __le32 tx_flags = tx_cmd->tx_flags;
546
547 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
Johannes Berge039fa42008-05-15 12:55:29 +0200548 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800549 tx_flags |= TX_CMD_FLG_ACK_MSK;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700550 if (ieee80211_is_mgmt(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800551 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700552 if (ieee80211_is_probe_resp(fc) &&
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800553 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
554 tx_flags |= TX_CMD_FLG_TSF_MSK;
555 } else {
556 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
557 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
558 }
559
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700560 if (ieee80211_is_back_req(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800561 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
562
563
564 tx_cmd->sta_id = std_id;
Harvey Harrison8b7b1e02008-06-11 14:21:56 -0700565 if (ieee80211_has_morefrags(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800566 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
567
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700568 if (ieee80211_is_data_qos(fc)) {
569 u8 *qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800570 tx_cmd->tid_tspec = qc[0] & 0xf;
571 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
572 } else {
573 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
574 }
575
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800576 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800577
578 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
579 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
580
581 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700582 if (ieee80211_is_mgmt(fc)) {
583 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800584 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
585 else
586 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
587 } else {
588 tx_cmd->timeout.pm_frame_timeout = 0;
589 }
590
591 tx_cmd->driver_txop = 0;
592 tx_cmd->tx_flags = tx_flags;
593 tx_cmd->next_frame_len = 0;
594}
595
596#define RTS_HCCA_RETRY_LIMIT 3
597#define RTS_DFAULT_RETRY_LIMIT 60
598
599static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
600 struct iwl_tx_cmd *tx_cmd,
Johannes Berge039fa42008-05-15 12:55:29 +0200601 struct ieee80211_tx_info *info,
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700602 __le16 fc, int is_hcca)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800603{
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700604 u32 rate_flags;
Tomas Winkler76eff182008-10-14 12:32:45 -0700605 int rate_idx;
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700606 u8 rts_retry_limit;
607 u8 data_retry_limit;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800608 u8 rate_plcp;
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200609
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700610 /* Set retry limit on DATA packets and Probe Responses*/
Abhijeet Kolekar1f0436f2009-10-09 13:20:32 -0700611 if (ieee80211_is_probe_resp(fc))
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700612 data_retry_limit = 3;
613 else
614 data_retry_limit = IWL_DEFAULT_TX_RETRY;
615 tx_cmd->data_retry_limit = data_retry_limit;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800616
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700617 /* Set retry limit on RTS packets */
618 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
619 RTS_DFAULT_RETRY_LIMIT;
620 if (data_retry_limit < rts_retry_limit)
621 rts_retry_limit = data_retry_limit;
622 tx_cmd->rts_retry_limit = rts_retry_limit;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800623
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700624 /* DATA packets will use the uCode station table for rate/antenna
625 * selection */
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800626 if (ieee80211_is_data(fc)) {
627 tx_cmd->initial_rate_index = 0;
628 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700629 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800630 }
631
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700632 /**
633 * If the current TX rate stored in mac80211 has the MCS bit set, it's
634 * not really a TX rate. Thus, we use the lowest supported rate for
635 * this band. Also use the lowest supported rate if the stored rate
636 * index is invalid.
637 */
638 rate_idx = info->control.rates[0].idx;
639 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
640 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
641 rate_idx = rate_lowest_index(&priv->bands[info->band],
642 info->control.sta);
643 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
644 if (info->band == IEEE80211_BAND_5GHZ)
645 rate_idx += IWL_FIRST_OFDM_RATE;
646 /* Get PLCP rate for tx_cmd->rate_n_flags */
647 rate_plcp = iwl_rates[rate_idx].plcp;
648 /* Zero out flags for this packet */
649 rate_flags = 0;
650
651 /* Set CCK flag as needed */
652 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
653 rate_flags |= RATE_MCS_CCK_MSK;
654
655 /* Set up RTS and CTS flags for certain packets */
656 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
657 case cpu_to_le16(IEEE80211_STYPE_AUTH):
658 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
659 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
660 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
661 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
662 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
663 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
664 }
665 break;
666 default:
667 break;
668 }
669
670 /* Set up antennas */
671 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
672 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
673
674 /* Set the rate in the TX cmd */
Tomas Winklere7d326ac2008-06-12 09:47:11 +0800675 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800676}
677
678static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
Johannes Berge039fa42008-05-15 12:55:29 +0200679 struct ieee80211_tx_info *info,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800680 struct iwl_tx_cmd *tx_cmd,
681 struct sk_buff *skb_frag,
682 int sta_id)
683{
Johannes Berge039fa42008-05-15 12:55:29 +0200684 struct ieee80211_key_conf *keyconf = info->control.hw_key;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800685
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800686 switch (keyconf->alg) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800687 case ALG_CCMP:
688 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800689 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
Johannes Berge039fa42008-05-15 12:55:29 +0200690 if (info->flags & IEEE80211_TX_CTL_AMPDU)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800691 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
Tomas Winklere1623442009-01-27 14:27:56 -0800692 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800693 break;
694
695 case ALG_TKIP:
696 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800697 ieee80211_get_tkip_key(keyconf, skb_frag,
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800698 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
Tomas Winklere1623442009-01-27 14:27:56 -0800699 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800700 break;
701
702 case ALG_WEP:
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800703 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800704 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
705
706 if (keyconf->keylen == WEP_KEY_LEN_128)
707 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
708
709 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800710
Tomas Winklere1623442009-01-27 14:27:56 -0800711 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +0800712 "with key %d\n", keyconf->keyidx);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800713 break;
714
715 default:
Tomas Winkler978785a2008-12-19 10:37:31 +0800716 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800717 break;
718 }
719}
720
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800721/*
722 * start REPLY_TX command process
723 */
Johannes Berge039fa42008-05-15 12:55:29 +0200724int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800725{
726 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +0200727 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Johannes Berg6ab10ff2009-11-13 11:56:37 -0800728 struct ieee80211_sta *sta = info->control.sta;
729 struct iwl_station_priv *sta_priv = NULL;
Tomas Winklerf3674222008-08-04 16:00:44 +0800730 struct iwl_tx_queue *txq;
731 struct iwl_queue *q;
Johannes Bergc2acea82009-07-24 11:13:05 -0700732 struct iwl_device_cmd *out_cmd;
733 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +0800734 struct iwl_tx_cmd *tx_cmd;
735 int swq_id, txq_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800736 dma_addr_t phys_addr;
737 dma_addr_t txcmd_phys;
738 dma_addr_t scratch_phys;
Johannes Bergbe1a71a2009-10-02 13:44:02 -0700739 u16 len, len_org, firstlen, secondlen;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800740 u16 seq_number = 0;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700741 __le16 fc;
Rami Rosen0e7690f2008-12-18 18:04:51 +0200742 u8 hdr_len;
Tomas Winklerf3674222008-08-04 16:00:44 +0800743 u8 sta_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800744 u8 wait_write_ptr = 0;
745 u8 tid = 0;
746 u8 *qc = NULL;
747 unsigned long flags;
748 int ret;
749
750 spin_lock_irqsave(&priv->lock, flags);
751 if (iwl_is_rfkill(priv)) {
Tomas Winklere1623442009-01-27 14:27:56 -0800752 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800753 goto drop_unlock;
754 }
755
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700756 fc = hdr->frame_control;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800757
758#ifdef CONFIG_IWLWIFI_DEBUG
759 if (ieee80211_is_auth(fc))
Tomas Winklere1623442009-01-27 14:27:56 -0800760 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700761 else if (ieee80211_is_assoc_req(fc))
Tomas Winklere1623442009-01-27 14:27:56 -0800762 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700763 else if (ieee80211_is_reassoc_req(fc))
Tomas Winklere1623442009-01-27 14:27:56 -0800764 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800765#endif
766
Gábor Stefanikaa065262009-08-21 20:44:09 +0200767 /* drop all non-injected data frame if we are not associated */
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700768 if (ieee80211_is_data(fc) &&
Gábor Stefanikaa065262009-08-21 20:44:09 +0200769 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
Stefanik Gábord10c4ec2008-09-03 11:26:59 +0800770 (!iwl_is_associated(priv) ||
Johannes Berg05c914f2008-09-11 00:01:58 +0200771 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
Stefanik Gábord10c4ec2008-09-03 11:26:59 +0800772 !priv->assoc_station_added)) {
Tomas Winklere1623442009-01-27 14:27:56 -0800773 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800774 goto drop_unlock;
775 }
776
Harvey Harrison7294ec92008-07-15 18:43:59 -0700777 hdr_len = ieee80211_hdrlen(fc);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800778
779 /* Find (or create) index into station table for destination station */
Gábor Stefanikaa065262009-08-21 20:44:09 +0200780 if (info->flags & IEEE80211_TX_CTL_INJECTED)
781 sta_id = priv->hw_params.bcast_sta_id;
782 else
783 sta_id = iwl_get_sta_id(priv, hdr);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800784 if (sta_id == IWL_INVALID_STATION) {
Tomas Winklere1623442009-01-27 14:27:56 -0800785 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
Johannes Berge1749612008-10-27 15:59:26 -0700786 hdr->addr1);
Johannes Berg3995bd92009-07-24 11:13:14 -0700787 goto drop_unlock;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800788 }
789
Tomas Winklere1623442009-01-27 14:27:56 -0800790 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800791
Johannes Berg6ab10ff2009-11-13 11:56:37 -0800792 if (sta)
793 sta_priv = (void *)sta->drv_priv;
794
795 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
796 sta_priv->asleep) {
797 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
798 /*
799 * This sends an asynchronous command to the device,
800 * but we can rely on it being processed before the
801 * next frame is processed -- and the next frame to
802 * this station is the one that will consume this
803 * counter.
804 * For now set the counter to just 1 since we do not
805 * support uAPSD yet.
806 */
807 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
808 }
809
Johannes Berg45af8192009-06-19 13:52:43 -0700810 txq_id = skb_get_queue_mapping(skb);
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -0700811 if (ieee80211_is_data_qos(fc)) {
812 qc = ieee80211_get_qos_ctl(hdr);
Harvey Harrison7294ec92008-07-15 18:43:59 -0700813 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
Reinette Chatree6a6cf42009-08-13 13:30:50 -0700814 if (unlikely(tid >= MAX_TID_COUNT))
815 goto drop_unlock;
Tomas Winklerf3674222008-08-04 16:00:44 +0800816 seq_number = priv->stations[sta_id].tid[tid].seq_number;
817 seq_number &= IEEE80211_SCTL_SEQ;
818 hdr->seq_ctrl = hdr->seq_ctrl &
Harvey Harrisonc1b4aa32009-01-29 13:26:44 -0800819 cpu_to_le16(IEEE80211_SCTL_FRAG);
Tomas Winklerf3674222008-08-04 16:00:44 +0800820 hdr->seq_ctrl |= cpu_to_le16(seq_number);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800821 seq_number += 0x10;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800822 /* aggregation is on for this <sta,tid> */
Johannes Berg45af8192009-06-19 13:52:43 -0700823 if (info->flags & IEEE80211_TX_CTL_AMPDU)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800824 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800825 }
826
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800827 txq = &priv->txq[txq_id];
Johannes Berg45af8192009-06-19 13:52:43 -0700828 swq_id = txq->swq_id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800829 q = &txq->q;
830
Johannes Berg3995bd92009-07-24 11:13:14 -0700831 if (unlikely(iwl_queue_space(q) < q->high_mark))
832 goto drop_unlock;
833
834 if (ieee80211_is_data_qos(fc))
835 priv->stations[sta_id].tid[tid].tfds_in_queue++;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800836
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800837 /* Set up driver data for this TFD */
838 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
839 txq->txb[q->write_ptr].skb[0] = skb;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800840
841 /* Set up first empty entry in queue's array of Tx/cmd buffers */
Tomas Winklerb88b15d2008-10-14 12:32:49 -0700842 out_cmd = txq->cmd[q->write_ptr];
Johannes Bergc2acea82009-07-24 11:13:05 -0700843 out_meta = &txq->meta[q->write_ptr];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800844 tx_cmd = &out_cmd->cmd.tx;
845 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
846 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
847
848 /*
849 * Set up the Tx-command (not MAC!) header.
850 * Store the chosen Tx queue and TFD index within the sequence field;
851 * after Tx, uCode's Tx response will return this value so driver can
852 * locate the frame within the tx queue and do post-tx processing.
853 */
854 out_cmd->hdr.cmd = REPLY_TX;
855 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
856 INDEX_TO_SEQ(q->write_ptr)));
857
858 /* Copy MAC header from skb into command buffer */
859 memcpy(tx_cmd->hdr, hdr, hdr_len);
860
Reinette Chatredf833b12009-04-21 10:55:48 -0700861
862 /* Total # bytes to be transmitted */
863 len = (u16)skb->len;
864 tx_cmd->len = cpu_to_le16(len);
865
866 if (info->control.hw_key)
867 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
868
869 /* TODO need this for burst mode later on */
870 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
Wey-Yi Guy20594eb2009-08-07 15:41:39 -0700871 iwl_dbg_log_tx_data_frame(priv, len, hdr);
Reinette Chatredf833b12009-04-21 10:55:48 -0700872
873 /* set is_hcca to 0; it probably will never be implemented */
Daniel C Halperinb58ef2142009-08-28 09:44:46 -0700874 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
Reinette Chatredf833b12009-04-21 10:55:48 -0700875
Wey-Yi Guy22fdf3c2009-08-07 15:41:40 -0700876 iwl_update_stats(priv, true, fc, len);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800877 /*
878 * Use the first empty entry in this queue's command buffer array
879 * to contain the Tx command and MAC header concatenated together
880 * (payload data will be in another buffer).
881 * Size of this varies, due to varying MAC header length.
882 * If end is not dword aligned, we'll have 2 extra bytes at the end
883 * of the MAC header (device reads on dword boundaries).
884 * We'll tell device about this padding later.
885 */
886 len = sizeof(struct iwl_tx_cmd) +
887 sizeof(struct iwl_cmd_header) + hdr_len;
888
889 len_org = len;
Johannes Bergbe1a71a2009-10-02 13:44:02 -0700890 firstlen = len = (len + 3) & ~3;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800891
892 if (len_org != len)
893 len_org = 1;
894 else
895 len_org = 0;
896
Reinette Chatredf833b12009-04-21 10:55:48 -0700897 /* Tell NIC about any 2-byte padding after MAC header */
898 if (len_org)
899 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
900
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800901 /* Physical address of this Tx command's header (not MAC header!),
902 * within command buffer array. */
Tomas Winkler499b1882008-10-14 12:32:48 -0700903 txcmd_phys = pci_map_single(priv->pci_dev,
Reinette Chatredf833b12009-04-21 10:55:48 -0700904 &out_cmd->hdr, len,
Fenghua Yu96891ce2009-02-18 15:54:33 -0800905 PCI_DMA_BIDIRECTIONAL);
Johannes Bergc2acea82009-07-24 11:13:05 -0700906 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
907 pci_unmap_len_set(out_meta, len, len);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800908 /* Add buffer containing Tx command and MAC(!) header to TFD's
909 * first entry */
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800910 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
911 txcmd_phys, len, 1, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800912
Reinette Chatredf833b12009-04-21 10:55:48 -0700913 if (!ieee80211_has_morefrags(hdr->frame_control)) {
914 txq->need_update = 1;
915 if (qc)
916 priv->stations[sta_id].tid[tid].seq_number = seq_number;
917 } else {
918 wait_write_ptr = 1;
919 txq->need_update = 0;
920 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800921
922 /* Set up TFD's 2nd entry to point directly to remainder of skb,
923 * if any (802.11 null frames have no payload). */
Johannes Bergbe1a71a2009-10-02 13:44:02 -0700924 secondlen = len = skb->len - hdr_len;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800925 if (len) {
926 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
927 len, PCI_DMA_TODEVICE);
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800928 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
929 phys_addr, len,
930 0, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800931 }
932
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800933 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
Reinette Chatredf833b12009-04-21 10:55:48 -0700934 offsetof(struct iwl_tx_cmd, scratch);
935
936 len = sizeof(struct iwl_tx_cmd) +
937 sizeof(struct iwl_cmd_header) + hdr_len;
938 /* take back ownership of DMA buffer to enable update */
939 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
940 len, PCI_DMA_BIDIRECTIONAL);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800941 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
Tomas Winkler499b1882008-10-14 12:32:48 -0700942 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800943
Reinette Chatred2ee9cd2009-04-21 10:55:47 -0700944 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
945 le16_to_cpu(out_cmd->hdr.sequence));
946 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
Reinette Chatre3d816c72009-08-07 15:41:37 -0700947 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
948 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800949
950 /* Set up entry for this TFD in Tx byte-count array */
Reinette Chatre7b80ece2009-07-09 10:33:39 -0700951 if (info->flags & IEEE80211_TX_CTL_AMPDU)
952 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
Reinette Chatredf833b12009-04-21 10:55:48 -0700953 le16_to_cpu(tx_cmd->len));
954
955 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
956 len, PCI_DMA_BIDIRECTIONAL);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800957
Johannes Bergbe1a71a2009-10-02 13:44:02 -0700958 trace_iwlwifi_dev_tx(priv,
959 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
960 sizeof(struct iwl_tfd),
961 &out_cmd->hdr, firstlen,
962 skb->data + hdr_len, secondlen);
963
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800964 /* Tell device the write index *just past* this latest filled TFD */
965 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
966 ret = iwl_txq_update_write_ptr(priv, txq);
967 spin_unlock_irqrestore(&priv->lock, flags);
968
Johannes Berg6ab10ff2009-11-13 11:56:37 -0800969 /*
970 * At this point the frame is "transmitted" successfully
971 * and we will get a TX status notification eventually,
972 * regardless of the value of ret. "ret" only indicates
973 * whether or not we should update the write pointer.
974 */
975
976 /* avoid atomic ops if it isn't an associated client */
977 if (sta_priv && sta_priv->client)
978 atomic_inc(&sta_priv->pending_frames);
979
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800980 if (ret)
981 return ret;
982
Tomas Winkler143b09e2008-07-24 21:33:42 +0300983 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800984 if (wait_write_ptr) {
985 spin_lock_irqsave(&priv->lock, flags);
986 txq->need_update = 1;
987 iwl_txq_update_write_ptr(priv, txq);
988 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler143b09e2008-07-24 21:33:42 +0300989 } else {
Johannes Berge4e72fb2009-03-23 17:28:42 +0100990 iwl_stop_queue(priv, txq->swq_id);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800991 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800992 }
993
994 return 0;
995
996drop_unlock:
997 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800998 return -1;
999}
1000EXPORT_SYMBOL(iwl_tx_skb);
1001
1002/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1003
1004/**
1005 * iwl_enqueue_hcmd - enqueue a uCode command
1006 * @priv: device private data point
1007 * @cmd: a point to the ucode command structure
1008 *
1009 * The function returns < 0 values to indicate the operation is
1010 * failed. On success, it turns the index (> 0) of command in the
1011 * command queue.
1012 */
1013int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1014{
1015 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1016 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -07001017 struct iwl_device_cmd *out_cmd;
1018 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +08001019 dma_addr_t phys_addr;
1020 unsigned long flags;
1021 int len, ret;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001022 u32 idx;
1023 u16 fix_size;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001024
1025 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1026 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1027
1028 /* If any of the command structures end up being larger than
1029 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
Abhijeet Kolekar89612122010-02-19 11:49:49 -08001030 * we will need to increase the size of the TFD entries
1031 * Also, check to see if command buffer should not exceed the size
1032 * of device_cmd and max_cmd_size. */
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001033 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
Johannes Bergc2acea82009-07-24 11:13:05 -07001034 !(cmd->flags & CMD_SIZE_HUGE));
Abhijeet Kolekar89612122010-02-19 11:49:49 -08001035 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001036
Wey-Yi Guy7812b162009-10-02 13:43:58 -07001037 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
Reinette Chatref2f21b42009-10-30 14:36:15 -07001038 IWL_WARN(priv, "Not sending command - %s KILL\n",
1039 iwl_is_rfkill(priv) ? "RF" : "CT");
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001040 return -EIO;
1041 }
1042
Johannes Bergc2acea82009-07-24 11:13:05 -07001043 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Wey-Yi Guy2d237f72009-11-20 12:05:08 -08001044 IWL_ERR(priv, "No space in command queue\n");
Wey-Yi Guy7812b162009-10-02 13:43:58 -07001045 if (iwl_within_ct_kill_margin(priv))
1046 iwl_tt_enter_ct_kill(priv);
1047 else {
1048 IWL_ERR(priv, "Restarting adapter due to queue full\n");
1049 queue_work(priv->workqueue, &priv->restart);
1050 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001051 return -ENOSPC;
1052 }
1053
1054 spin_lock_irqsave(&priv->hcmd_lock, flags);
1055
Johannes Bergc2acea82009-07-24 11:13:05 -07001056 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +08001057 out_cmd = txq->cmd[idx];
Johannes Bergc2acea82009-07-24 11:13:05 -07001058 out_meta = &txq->meta[idx];
1059
Daniel C Halperin8ce73f32009-07-31 14:28:06 -07001060 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -07001061 out_meta->flags = cmd->flags;
1062 if (cmd->flags & CMD_WANT_SKB)
1063 out_meta->source = cmd;
1064 if (cmd->flags & CMD_ASYNC)
1065 out_meta->callback = cmd->callback;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001066
1067 out_cmd->hdr.cmd = cmd->id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001068 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1069
1070 /* At this point, the out_cmd now has all of the incoming cmd
1071 * information */
1072
1073 out_cmd->hdr.flags = 0;
1074 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1075 INDEX_TO_SEQ(q->write_ptr));
Johannes Bergc2acea82009-07-24 11:13:05 -07001076 if (cmd->flags & CMD_SIZE_HUGE)
Tomas Winkler9734cb22008-09-03 11:26:52 +08001077 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
Johannes Bergc2acea82009-07-24 11:13:05 -07001078 len = sizeof(struct iwl_device_cmd);
Abhijeet Kolekar89612122010-02-19 11:49:49 -08001079 if (idx == TFD_CMD_SLOTS)
1080 len = IWL_MAX_CMD_SIZE;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001081
Esti Kummerded2ae72008-08-04 16:00:45 +08001082#ifdef CONFIG_IWLWIFI_DEBUG
1083 switch (out_cmd->hdr.cmd) {
1084 case REPLY_TX_LINK_QUALITY_CMD:
1085 case SENSITIVITY_CMD:
Tomas Winklere1623442009-01-27 14:27:56 -08001086 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
Esti Kummerded2ae72008-08-04 16:00:45 +08001087 "%d bytes at %d[%d]:%d\n",
1088 get_cmd_string(out_cmd->hdr.cmd),
1089 out_cmd->hdr.cmd,
1090 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1091 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1092 break;
1093 default:
Tomas Winklere1623442009-01-27 14:27:56 -08001094 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
Esti Kummerded2ae72008-08-04 16:00:45 +08001095 "%d bytes at %d[%d]:%d\n",
1096 get_cmd_string(out_cmd->hdr.cmd),
1097 out_cmd->hdr.cmd,
1098 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1099 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1100 }
1101#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001102 txq->need_update = 1;
1103
Samuel Ortiz518099a2009-01-19 15:30:27 -08001104 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1105 /* Set up entry in queue's byte count circular buffer */
1106 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001107
Reinette Chatredf833b12009-04-21 10:55:48 -07001108 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1109 fix_size, PCI_DMA_BIDIRECTIONAL);
Johannes Bergc2acea82009-07-24 11:13:05 -07001110 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1111 pci_unmap_len_set(out_meta, len, fix_size);
Reinette Chatredf833b12009-04-21 10:55:48 -07001112
Johannes Bergbe1a71a2009-10-02 13:44:02 -07001113 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1114
Reinette Chatredf833b12009-04-21 10:55:48 -07001115 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1116 phys_addr, fix_size, 1,
1117 U32_PAD(cmd->len));
1118
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001119 /* Increment and update queue's write index */
1120 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1121 ret = iwl_txq_update_write_ptr(priv, txq);
1122
1123 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1124 return ret ? ret : idx;
1125}
1126
Johannes Berg6ab10ff2009-11-13 11:56:37 -08001127static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1128{
1129 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1130 struct ieee80211_sta *sta;
1131 struct iwl_station_priv *sta_priv;
1132
1133 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1134 if (sta) {
1135 sta_priv = (void *)sta->drv_priv;
1136 /* avoid atomic ops if this isn't a client */
1137 if (sta_priv->client &&
1138 atomic_dec_return(&sta_priv->pending_frames) == 0)
1139 ieee80211_sta_block_awake(priv->hw, sta, false);
1140 }
1141
1142 ieee80211_tx_status_irqsafe(priv->hw, skb);
1143}
1144
Tomas Winkler17b88922008-05-29 16:35:12 +08001145int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1146{
1147 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1148 struct iwl_queue *q = &txq->q;
1149 struct iwl_tx_info *tx_info;
1150 int nfreed = 0;
Stanislaw Gruszkaa120e912010-02-19 15:47:33 -08001151 struct ieee80211_hdr *hdr;
Tomas Winkler17b88922008-05-29 16:35:12 +08001152
1153 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001154 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +08001155 "is out of range [0-%d] %d %d.\n", txq_id,
1156 index, q->n_bd, q->write_ptr, q->read_ptr);
1157 return 0;
1158 }
1159
Tomas Winkler499b1882008-10-14 12:32:48 -07001160 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1161 q->read_ptr != index;
1162 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
Tomas Winkler17b88922008-05-29 16:35:12 +08001163
1164 tx_info = &txq->txb[txq->q.read_ptr];
Johannes Berg6ab10ff2009-11-13 11:56:37 -08001165 iwl_tx_status(priv, tx_info->skb[0]);
Stanislaw Gruszkaa120e912010-02-19 15:47:33 -08001166
1167 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1168 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1169 nfreed++;
Tomas Winkler17b88922008-05-29 16:35:12 +08001170 tx_info->skb[0] = NULL;
Tomas Winkler17b88922008-05-29 16:35:12 +08001171
Tomas Winkler972cf442008-05-29 16:35:13 +08001172 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1173 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1174
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -08001175 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
Tomas Winkler17b88922008-05-29 16:35:12 +08001176 }
1177 return nfreed;
1178}
1179EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1180
1181
1182/**
1183 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1184 *
1185 * When FW advances 'R' index, all entries between old and new 'R' index
1186 * need to be reclaimed. As result, some free space forms. If there is
1187 * enough free space (> low mark), wake the stack that feeds us.
1188 */
Tomas Winkler499b1882008-10-14 12:32:48 -07001189static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1190 int idx, int cmd_idx)
Tomas Winkler17b88922008-05-29 16:35:12 +08001191{
1192 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1193 struct iwl_queue *q = &txq->q;
1194 int nfreed = 0;
1195
Tomas Winkler499b1882008-10-14 12:32:48 -07001196 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001197 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +08001198 "is out of range [0-%d] %d %d.\n", txq_id,
Tomas Winkler499b1882008-10-14 12:32:48 -07001199 idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +08001200 return;
1201 }
1202
Tomas Winkler499b1882008-10-14 12:32:48 -07001203 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1204 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1205
1206 if (nfreed++ > 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001207 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +08001208 q->write_ptr, q->read_ptr);
1209 queue_work(priv->workqueue, &priv->restart);
1210 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +08001211
Tomas Winkler17b88922008-05-29 16:35:12 +08001212 }
1213}
1214
1215/**
1216 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1217 * @rxb: Rx buffer to reclaim
1218 *
1219 * If an Rx buffer has an async callback associated with it the callback
1220 * will be executed. The attached skb (if present) will only be freed
1221 * if the callback returns 1
1222 */
1223void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1224{
Zhu Yi2f301222009-10-09 17:19:45 +08001225 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +08001226 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1227 int txq_id = SEQ_TO_QUEUE(sequence);
1228 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +08001229 int cmd_index;
Tomas Winkler9734cb22008-09-03 11:26:52 +08001230 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
Johannes Bergc2acea82009-07-24 11:13:05 -07001231 struct iwl_device_cmd *cmd;
1232 struct iwl_cmd_meta *meta;
Tomas Winkler17b88922008-05-29 16:35:12 +08001233
1234 /* If a Tx command is being handled and it isn't in the actual
1235 * command queue then there a command routing bug has been introduced
1236 * in the queue management code. */
Johannes Berg55d6a3c2008-09-23 19:18:43 +02001237 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
Winkler, Tomas01ef93232008-11-07 09:58:45 -08001238 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1239 txq_id, sequence,
1240 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1241 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
Reinette Chatreec741162009-07-24 11:13:08 -07001242 iwl_print_hex_error(priv, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +02001243 return;
Winkler, Tomas01ef93232008-11-07 09:58:45 -08001244 }
Tomas Winkler17b88922008-05-29 16:35:12 +08001245
1246 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +08001247 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
Johannes Bergc2acea82009-07-24 11:13:05 -07001248 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +08001249
Reinette Chatrec33de622009-10-30 14:36:10 -07001250 pci_unmap_single(priv->pci_dev,
1251 pci_unmap_addr(meta, mapping),
1252 pci_unmap_len(meta, len),
1253 PCI_DMA_BIDIRECTIONAL);
1254
Tomas Winkler17b88922008-05-29 16:35:12 +08001255 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -07001256 if (meta->flags & CMD_WANT_SKB) {
Zhu Yi2f301222009-10-09 17:19:45 +08001257 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1258 rxb->page = NULL;
Johannes Berg5696aea2009-07-24 11:13:06 -07001259 } else if (meta->callback)
Zhu Yi2f301222009-10-09 17:19:45 +08001260 meta->callback(priv, cmd, pkt);
Tomas Winkler17b88922008-05-29 16:35:12 +08001261
Tomas Winkler499b1882008-10-14 12:32:48 -07001262 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
Tomas Winkler17b88922008-05-29 16:35:12 +08001263
Johannes Bergc2acea82009-07-24 11:13:05 -07001264 if (!(meta->flags & CMD_ASYNC)) {
Tomas Winkler17b88922008-05-29 16:35:12 +08001265 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1266 wake_up_interruptible(&priv->wait_command_queue);
1267 }
1268}
1269EXPORT_SYMBOL(iwl_tx_cmd_complete);
1270
Tomas Winkler30e553e2008-05-29 16:35:16 +08001271/*
1272 * Find first available (lowest unused) Tx Queue, mark it "active".
1273 * Called only when finding queue for aggregation.
1274 * Should never return anything < 7, because they should already
1275 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1276 */
1277static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1278{
1279 int txq_id;
1280
1281 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1282 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1283 return txq_id;
1284 return -1;
1285}
1286
1287int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1288{
1289 int sta_id;
1290 int tx_fifo;
1291 int txq_id;
1292 int ret;
1293 unsigned long flags;
1294 struct iwl_tid_data *tid_data;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001295
1296 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1297 tx_fifo = default_tid_to_tx_fifo[tid];
1298 else
1299 return -EINVAL;
1300
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001301 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
Johannes Berge1749612008-10-27 15:59:26 -07001302 __func__, ra, tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001303
1304 sta_id = iwl_find_station(priv, ra);
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001305 if (sta_id == IWL_INVALID_STATION) {
1306 IWL_ERR(priv, "Start AGG on invalid station\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001307 return -ENXIO;
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001308 }
Roel Kluin082e7082009-07-25 23:34:31 +02001309 if (unlikely(tid >= MAX_TID_COUNT))
1310 return -EINVAL;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001311
1312 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001313 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001314 return -ENXIO;
1315 }
1316
1317 txq_id = iwl_txq_ctx_activate_free(priv);
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001318 if (txq_id == -1) {
1319 IWL_ERR(priv, "No free aggregation queue available\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001320 return -ENXIO;
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001321 }
Tomas Winkler30e553e2008-05-29 16:35:16 +08001322
1323 spin_lock_irqsave(&priv->sta_lock, flags);
1324 tid_data = &priv->stations[sta_id].tid[tid];
1325 *ssn = SEQ_TO_SN(tid_data->seq_number);
1326 tid_data->agg.txq_id = txq_id;
Johannes Berg45af8192009-06-19 13:52:43 -07001327 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001328 spin_unlock_irqrestore(&priv->sta_lock, flags);
1329
1330 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1331 sta_id, tid, *ssn);
1332 if (ret)
1333 return ret;
1334
1335 if (tid_data->tfds_in_queue == 0) {
Wey-Yi Guy3eb92962009-04-01 14:33:25 -07001336 IWL_DEBUG_HT(priv, "HW queue is empty\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001337 tid_data->agg.state = IWL_AGG_ON;
Johannes Bergc951ad32009-11-16 12:00:38 +01001338 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001339 } else {
Tomas Winklere1623442009-01-27 14:27:56 -08001340 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
Tomas Winkler30e553e2008-05-29 16:35:16 +08001341 tid_data->tfds_in_queue);
1342 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1343 }
1344 return ret;
1345}
1346EXPORT_SYMBOL(iwl_tx_agg_start);
1347
1348int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1349{
1350 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1351 struct iwl_tid_data *tid_data;
1352 int ret, write_ptr, read_ptr;
1353 unsigned long flags;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001354
1355 if (!ra) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001356 IWL_ERR(priv, "ra = NULL\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001357 return -EINVAL;
1358 }
1359
Reinette Chatree6a6cf42009-08-13 13:30:50 -07001360 if (unlikely(tid >= MAX_TID_COUNT))
1361 return -EINVAL;
1362
Tomas Winkler30e553e2008-05-29 16:35:16 +08001363 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1364 tx_fifo_id = default_tid_to_tx_fifo[tid];
1365 else
1366 return -EINVAL;
1367
1368 sta_id = iwl_find_station(priv, ra);
1369
Wey-Yi Guya2f1cbe2009-03-17 21:51:52 -07001370 if (sta_id == IWL_INVALID_STATION) {
1371 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001372 return -ENXIO;
Wey-Yi Guya2f1cbe2009-03-17 21:51:52 -07001373 }
Tomas Winkler30e553e2008-05-29 16:35:16 +08001374
Johannes Berg827d42c2009-11-22 12:28:41 +01001375 if (priv->stations[sta_id].tid[tid].agg.state ==
1376 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1377 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
John W. Linville9b1cb212009-12-07 16:37:42 -05001378 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
Johannes Berg827d42c2009-11-22 12:28:41 +01001379 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1380 return 0;
1381 }
1382
Tomas Winkler30e553e2008-05-29 16:35:16 +08001383 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
Johannes Berg827d42c2009-11-22 12:28:41 +01001384 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001385
1386 tid_data = &priv->stations[sta_id].tid[tid];
1387 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1388 txq_id = tid_data->agg.txq_id;
1389 write_ptr = priv->txq[txq_id].q.write_ptr;
1390 read_ptr = priv->txq[txq_id].q.read_ptr;
1391
1392 /* The queue is not empty */
1393 if (write_ptr != read_ptr) {
Tomas Winklere1623442009-01-27 14:27:56 -08001394 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001395 priv->stations[sta_id].tid[tid].agg.state =
1396 IWL_EMPTYING_HW_QUEUE_DELBA;
1397 return 0;
1398 }
1399
Tomas Winklere1623442009-01-27 14:27:56 -08001400 IWL_DEBUG_HT(priv, "HW queue is empty\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001401 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1402
1403 spin_lock_irqsave(&priv->lock, flags);
1404 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1405 tx_fifo_id);
1406 spin_unlock_irqrestore(&priv->lock, flags);
1407
1408 if (ret)
1409 return ret;
1410
Johannes Bergc951ad32009-11-16 12:00:38 +01001411 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001412
1413 return 0;
1414}
1415EXPORT_SYMBOL(iwl_tx_agg_stop);
1416
1417int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1418{
1419 struct iwl_queue *q = &priv->txq[txq_id].q;
1420 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1421 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1422
1423 switch (priv->stations[sta_id].tid[tid].agg.state) {
1424 case IWL_EMPTYING_HW_QUEUE_DELBA:
1425 /* We are reclaiming the last packet of the */
1426 /* aggregated HW queue */
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001427 if ((txq_id == tid_data->agg.txq_id) &&
1428 (q->read_ptr == q->write_ptr)) {
Tomas Winkler30e553e2008-05-29 16:35:16 +08001429 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1430 int tx_fifo = default_tid_to_tx_fifo[tid];
Tomas Winklere1623442009-01-27 14:27:56 -08001431 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001432 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1433 ssn, tx_fifo);
1434 tid_data->agg.state = IWL_AGG_OFF;
Johannes Bergc951ad32009-11-16 12:00:38 +01001435 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001436 }
1437 break;
1438 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1439 /* We are reclaiming the last packet of the queue */
1440 if (tid_data->tfds_in_queue == 0) {
Tomas Winklere1623442009-01-27 14:27:56 -08001441 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08001442 tid_data->agg.state = IWL_AGG_ON;
Johannes Bergc951ad32009-11-16 12:00:38 +01001443 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001444 }
1445 break;
1446 }
1447 return 0;
1448}
1449EXPORT_SYMBOL(iwl_txq_check_empty);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001450
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001451/**
1452 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1453 *
1454 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1455 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1456 */
1457static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1458 struct iwl_ht_agg *agg,
1459 struct iwl_compressed_ba_resp *ba_resp)
1460
1461{
1462 int i, sh, ack;
1463 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1464 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1465 u64 bitmap;
1466 int successes = 0;
1467 struct ieee80211_tx_info *info;
1468
1469 if (unlikely(!agg->wait_for_ba)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001470 IWL_ERR(priv, "Received BA when not expected\n");
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001471 return -EINVAL;
1472 }
1473
1474 /* Mark that the expected block-ack response arrived */
1475 agg->wait_for_ba = 0;
Tomas Winklere1623442009-01-27 14:27:56 -08001476 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001477
1478 /* Calculate shift to align block-ack bits with our Tx window bits */
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001479 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001480 if (sh < 0) /* tbw something is wrong with indices */
1481 sh += 0x100;
1482
1483 /* don't use 64-bit values for now */
1484 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1485
1486 if (agg->frame_count > (64 - sh)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001487 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001488 return -1;
1489 }
1490
1491 /* check for success or failure according to the
1492 * transmitted bitmap and block-ack bitmap */
1493 bitmap &= agg->bitmap;
1494
1495 /* For each frame attempted in aggregation,
1496 * update driver's record of tx frame's status. */
1497 for (i = 0; i < agg->frame_count ; i++) {
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08001498 ack = bitmap & (1ULL << i);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001499 successes += !!ack;
Tomas Winklere1623442009-01-27 14:27:56 -08001500 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
Abhijeet Kolekarc3056062008-11-12 13:14:08 -08001501 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001502 agg->start_idx + i);
1503 }
1504
1505 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1506 memset(&info->status, 0, sizeof(info->status));
Daniel C Halperin91a55ae2009-09-17 10:43:49 -07001507 info->flags |= IEEE80211_TX_STAT_ACK;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001508 info->flags |= IEEE80211_TX_STAT_AMPDU;
1509 info->status.ampdu_ack_map = successes;
1510 info->status.ampdu_ack_len = agg->frame_count;
1511 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1512
Tomas Winklere1623442009-01-27 14:27:56 -08001513 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001514
1515 return 0;
1516}
1517
1518/**
1519 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1520 *
1521 * Handles block-acknowledge notification from device, which reports success
1522 * of frames sent via aggregation.
1523 */
1524void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1525 struct iwl_rx_mem_buffer *rxb)
1526{
Zhu Yi2f301222009-10-09 17:19:45 +08001527 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001528 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001529 struct iwl_tx_queue *txq = NULL;
1530 struct iwl_ht_agg *agg;
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001531 int index;
1532 int sta_id;
1533 int tid;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001534
1535 /* "flow" corresponds to Tx queue */
1536 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1537
1538 /* "ssn" is start of block-ack Tx window, corresponds to index
1539 * (in Tx queue's circular buffer) of first TFD/frame in window */
1540 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1541
1542 if (scd_flow >= priv->hw_params.max_txq_num) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001543 IWL_ERR(priv,
1544 "BUG_ON scd_flow is bigger than number of queues\n");
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001545 return;
1546 }
1547
1548 txq = &priv->txq[scd_flow];
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001549 sta_id = ba_resp->sta_id;
1550 tid = ba_resp->tid;
1551 agg = &priv->stations[sta_id].tid[tid].agg;
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001552
1553 /* Find index just before block-ack window */
1554 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1555
1556 /* TODO: Need to get this copy more safely - now good for debug */
1557
Tomas Winklere1623442009-01-27 14:27:56 -08001558 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001559 "sta_id = %d\n",
1560 agg->wait_for_ba,
Johannes Berge1749612008-10-27 15:59:26 -07001561 (u8 *) &ba_resp->sta_addr_lo32,
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001562 ba_resp->sta_id);
Tomas Winklere1623442009-01-27 14:27:56 -08001563 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001564 "%d, scd_ssn = %d\n",
1565 ba_resp->tid,
1566 ba_resp->seq_ctl,
1567 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1568 ba_resp->scd_flow,
1569 ba_resp->scd_ssn);
Tomas Winklere1623442009-01-27 14:27:56 -08001570 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001571 agg->start_idx,
1572 (unsigned long long)agg->bitmap);
1573
1574 /* Update driver's record of ACK vs. not for each frame in window */
1575 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1576
1577 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1578 * block-ack window (we assume that they've been successfully
1579 * transmitted ... if not, it's too late anyway). */
1580 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1581 /* calculate mac80211 ampdu sw queue to wake */
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001582 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
Wey-Yi Guya239a8b2010-02-19 15:47:32 -08001583 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001584
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001585 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1586 priv->mac80211_registered &&
1587 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
Johannes Berge4e72fb2009-03-23 17:28:42 +01001588 iwl_wake_queue(priv, txq->swq_id);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001589
1590 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
Emmanuel Grumbach653fa4a2008-06-30 17:23:11 +08001591 }
1592}
1593EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1594
Helmut Schaa994d31f2008-07-02 12:17:06 +02001595#ifdef CONFIG_IWLWIFI_DEBUG
Tomas Winklera332f8d62008-05-29 16:35:08 +08001596#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1597
1598const char *iwl_get_tx_fail_reason(u32 status)
1599{
1600 switch (status & TX_STATUS_MSK) {
1601 case TX_STATUS_SUCCESS:
1602 return "SUCCESS";
1603 TX_STATUS_ENTRY(SHORT_LIMIT);
1604 TX_STATUS_ENTRY(LONG_LIMIT);
1605 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1606 TX_STATUS_ENTRY(MGMNT_ABORT);
1607 TX_STATUS_ENTRY(NEXT_FRAG);
1608 TX_STATUS_ENTRY(LIFE_EXPIRE);
1609 TX_STATUS_ENTRY(DEST_PS);
1610 TX_STATUS_ENTRY(ABORTED);
1611 TX_STATUS_ENTRY(BT_RETRY);
1612 TX_STATUS_ENTRY(STA_INVALID);
1613 TX_STATUS_ENTRY(FRAG_DROPPED);
1614 TX_STATUS_ENTRY(TID_DISABLE);
1615 TX_STATUS_ENTRY(FRAME_FLUSHED);
1616 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1617 TX_STATUS_ENTRY(TX_LOCKED);
1618 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1619 }
1620
1621 return "UNKNOWN";
1622}
1623EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1624#endif /* CONFIG_IWLWIFI_DEBUG */