blob: fed554accedce1d2514e56ae48dfa2a3fdb149ad [file] [log] [blame]
Tomas Winklera55360e2008-05-05 10:22:28 +08001/******************************************************************************
2 *
Reinette Chatre1f447802010-01-15 13:43:41 -08003 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
Tomas Winklera55360e2008-05-05 10:22:28 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Tomas Winklera55360e2008-05-05 10:22:28 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Emmanuel Grumbach1781a072008-06-30 17:23:09 +080030#include <linux/etherdevice.h>
Tomas Winklera55360e2008-05-05 10:22:28 +080031#include <net/mac80211.h>
Tomas Winklera05ffd32008-07-10 14:28:42 +030032#include <asm/unaligned.h>
Tomas Winklera55360e2008-05-05 10:22:28 +080033#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
Tomas Winklerc1354752008-05-29 16:35:04 +080038#include "iwl-calib.h"
Tomas Winklera55360e2008-05-05 10:22:28 +080039#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_rx_queue_space);
122
123/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800126void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
Tomas Winklera55360e2008-05-05 10:22:28 +0800127{
Tomas Winklera55360e2008-05-05 10:22:28 +0800128 unsigned long flags;
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800129 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
130 u32 reg;
Tomas Winklera55360e2008-05-05 10:22:28 +0800131
132 spin_lock_irqsave(&q->lock, flags);
133
134 if (q->need_update == 0)
135 goto exit_unlock;
136
137 /* If power-saving is in use, make sure device is awake */
138 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
139 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
140
141 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Ben Cahill309e7312009-11-06 14:53:03 -0800142 IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
143 reg);
Tomas Winklera55360e2008-05-05 10:22:28 +0800144 iwl_set_bit(priv, CSR_GP_CNTRL,
145 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
146 goto exit_unlock;
147 }
148
Mohamed Abbas4752c932009-05-22 11:01:51 -0700149 q->write_actual = (q->write & ~0x7);
150 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
Tomas Winklera55360e2008-05-05 10:22:28 +0800151
152 /* Else device is assumed to be awake */
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800153 } else {
Tomas Winklera55360e2008-05-05 10:22:28 +0800154 /* Device expects a multiple of 8 */
Mohamed Abbas4752c932009-05-22 11:01:51 -0700155 q->write_actual = (q->write & ~0x7);
156 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800157 }
Tomas Winklera55360e2008-05-05 10:22:28 +0800158
159 q->need_update = 0;
160
161 exit_unlock:
162 spin_unlock_irqrestore(&q->lock, flags);
Tomas Winklera55360e2008-05-05 10:22:28 +0800163}
164EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
165/**
166 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
167 */
168static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
169 dma_addr_t dma_addr)
170{
171 return cpu_to_le32((u32)(dma_addr >> 8));
172}
173
174/**
175 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
176 *
177 * If there are slots in the RX queue that need to be restocked,
178 * and we have free pre-allocated buffers, fill the ranks as much
179 * as we can, pulling from rx_free.
180 *
181 * This moves the 'write' index forward to catch up with 'processed', and
182 * also updates the memory address in the firmware to reference the new
183 * target buffer.
184 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800185void iwl_rx_queue_restock(struct iwl_priv *priv)
Tomas Winklera55360e2008-05-05 10:22:28 +0800186{
187 struct iwl_rx_queue *rxq = &priv->rxq;
188 struct list_head *element;
189 struct iwl_rx_mem_buffer *rxb;
190 unsigned long flags;
191 int write;
Tomas Winklera55360e2008-05-05 10:22:28 +0800192
193 spin_lock_irqsave(&rxq->lock, flags);
194 write = rxq->write & ~0x7;
195 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
196 /* Get next free Rx buffer, remove from free list */
197 element = rxq->rx_free.next;
198 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
199 list_del(element);
200
201 /* Point to Rx buffer via next RBD in circular buffer */
Zhu Yi2f301222009-10-09 17:19:45 +0800202 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
Tomas Winklera55360e2008-05-05 10:22:28 +0800203 rxq->queue[rxq->write] = rxb;
204 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
205 rxq->free_count--;
206 }
207 spin_unlock_irqrestore(&rxq->lock, flags);
208 /* If the pre-allocated buffer pool is dropping low, schedule to
209 * refill it */
210 if (rxq->free_count <= RX_LOW_WATERMARK)
211 queue_work(priv->workqueue, &priv->rx_replenish);
212
213
214 /* If we've added more space for the firmware to place data, tell it.
215 * Increment device's write pointer in multiples of 8. */
Mohamed Abbas4752c932009-05-22 11:01:51 -0700216 if (rxq->write_actual != (rxq->write & ~0x7)) {
Tomas Winklera55360e2008-05-05 10:22:28 +0800217 spin_lock_irqsave(&rxq->lock, flags);
218 rxq->need_update = 1;
219 spin_unlock_irqrestore(&rxq->lock, flags);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800220 iwl_rx_queue_update_write_ptr(priv, rxq);
Tomas Winklera55360e2008-05-05 10:22:28 +0800221 }
Tomas Winklera55360e2008-05-05 10:22:28 +0800222}
223EXPORT_SYMBOL(iwl_rx_queue_restock);
224
225
226/**
227 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
228 *
229 * When moving to rx_free an SKB is allocated for the slot.
230 *
231 * Also restock the Rx queue via iwl_rx_queue_restock.
232 * This is called as a scheduled work item (except for during initialization)
233 */
Mohamed Abbas4752c932009-05-22 11:01:51 -0700234void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
Tomas Winklera55360e2008-05-05 10:22:28 +0800235{
236 struct iwl_rx_queue *rxq = &priv->rxq;
237 struct list_head *element;
238 struct iwl_rx_mem_buffer *rxb;
Zhu Yi2f301222009-10-09 17:19:45 +0800239 struct page *page;
Tomas Winklera55360e2008-05-05 10:22:28 +0800240 unsigned long flags;
Zhu Yi29b1b262009-10-23 13:42:25 -0700241 gfp_t gfp_mask = priority;
Zhu Yif1bc4ac2008-12-17 16:52:33 +0800242
243 while (1) {
244 spin_lock_irqsave(&rxq->lock, flags);
Reinette Chatrede0bd502009-09-11 10:38:12 -0700245 if (list_empty(&rxq->rx_used)) {
246 spin_unlock_irqrestore(&rxq->lock, flags);
247 return;
248 }
249 spin_unlock_irqrestore(&rxq->lock, flags);
250
Reinette Chatref82a9242009-09-17 10:43:56 -0700251 if (rxq->free_count > RX_LOW_WATERMARK)
Zhu Yi29b1b262009-10-23 13:42:25 -0700252 gfp_mask |= __GFP_NOWARN;
Reinette Chatrede0bd502009-09-11 10:38:12 -0700253
Zhu Yi2f301222009-10-09 17:19:45 +0800254 if (priv->hw_params.rx_page_order > 0)
Zhu Yi29b1b262009-10-23 13:42:25 -0700255 gfp_mask |= __GFP_COMP;
Zhu Yi2f301222009-10-09 17:19:45 +0800256
257 /* Alloc a new receive buffer */
Zhu Yi29b1b262009-10-23 13:42:25 -0700258 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
Zhu Yi2f301222009-10-09 17:19:45 +0800259 if (!page) {
Reinette Chatref82a9242009-09-17 10:43:56 -0700260 if (net_ratelimit())
Zhu Yi2f301222009-10-09 17:19:45 +0800261 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
262 "order: %d\n",
263 priv->hw_params.rx_page_order);
264
Reinette Chatref82a9242009-09-17 10:43:56 -0700265 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
266 net_ratelimit())
Zhu Yi2f301222009-10-09 17:19:45 +0800267 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
Reinette Chatref82a9242009-09-17 10:43:56 -0700268 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
269 rxq->free_count);
Reinette Chatrede0bd502009-09-11 10:38:12 -0700270 /* We don't reschedule replenish work here -- we will
271 * call the restock method and if it still needs
272 * more buffers it will schedule replenish */
Zhu Yi2f301222009-10-09 17:19:45 +0800273 return;
Reinette Chatrede0bd502009-09-11 10:38:12 -0700274 }
275
276 spin_lock_irqsave(&rxq->lock, flags);
Zhu Yif1bc4ac2008-12-17 16:52:33 +0800277
278 if (list_empty(&rxq->rx_used)) {
279 spin_unlock_irqrestore(&rxq->lock, flags);
Zhu Yi2f301222009-10-09 17:19:45 +0800280 __free_pages(page, priv->hw_params.rx_page_order);
Zhu Yif1bc4ac2008-12-17 16:52:33 +0800281 return;
282 }
Tomas Winklera55360e2008-05-05 10:22:28 +0800283 element = rxq->rx_used.next;
284 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
Zhu Yif1bc4ac2008-12-17 16:52:33 +0800285 list_del(element);
286
287 spin_unlock_irqrestore(&rxq->lock, flags);
Tomas Winklera55360e2008-05-05 10:22:28 +0800288
Zhu Yi2f301222009-10-09 17:19:45 +0800289 rxb->page = page;
290 /* Get physical address of the RB */
291 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
292 PAGE_SIZE << priv->hw_params.rx_page_order,
293 PCI_DMA_FROMDEVICE);
Johannes Berg40185172008-11-18 01:47:21 +0100294 /* dma address must be no more than 36 bits */
Zhu Yi2f301222009-10-09 17:19:45 +0800295 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
Johannes Berg40185172008-11-18 01:47:21 +0100296 /* and also 256 byte aligned! */
Zhu Yi2f301222009-10-09 17:19:45 +0800297 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
Johannes Berg40185172008-11-18 01:47:21 +0100298
Zhu Yif1bc4ac2008-12-17 16:52:33 +0800299 spin_lock_irqsave(&rxq->lock, flags);
300
Tomas Winklera55360e2008-05-05 10:22:28 +0800301 list_add_tail(&rxb->list, &rxq->rx_free);
302 rxq->free_count++;
Zhu Yi2f301222009-10-09 17:19:45 +0800303 priv->alloc_rxb_page++;
Zhu Yif1bc4ac2008-12-17 16:52:33 +0800304
305 spin_unlock_irqrestore(&rxq->lock, flags);
Tomas Winklera55360e2008-05-05 10:22:28 +0800306 }
Tomas Winklera55360e2008-05-05 10:22:28 +0800307}
Tomas Winklera55360e2008-05-05 10:22:28 +0800308
309void iwl_rx_replenish(struct iwl_priv *priv)
310{
311 unsigned long flags;
312
Mohamed Abbas4752c932009-05-22 11:01:51 -0700313 iwl_rx_allocate(priv, GFP_KERNEL);
Tomas Winklera55360e2008-05-05 10:22:28 +0800314
315 spin_lock_irqsave(&priv->lock, flags);
316 iwl_rx_queue_restock(priv);
317 spin_unlock_irqrestore(&priv->lock, flags);
318}
319EXPORT_SYMBOL(iwl_rx_replenish);
320
Mohamed Abbas4752c932009-05-22 11:01:51 -0700321void iwl_rx_replenish_now(struct iwl_priv *priv)
322{
323 iwl_rx_allocate(priv, GFP_ATOMIC);
324
325 iwl_rx_queue_restock(priv);
326}
327EXPORT_SYMBOL(iwl_rx_replenish_now);
328
Tomas Winklera55360e2008-05-05 10:22:28 +0800329
330/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
331 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
332 * This free routine walks the list of POOL entries and if SKB is set to
333 * non NULL it is unmapped and freed
334 */
335void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
336{
337 int i;
338 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
Zhu Yi2f301222009-10-09 17:19:45 +0800339 if (rxq->pool[i].page != NULL) {
340 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
Zhu Yi64a76b52009-12-10 14:37:21 -0800343 __iwl_free_pages(priv, rxq->pool[i].page);
Zhu Yi2f301222009-10-09 17:19:45 +0800344 rxq->pool[i].page = NULL;
Tomas Winklera55360e2008-05-05 10:22:28 +0800345 }
346 }
347
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800348 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
349 rxq->dma_addr);
350 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
351 rxq->rb_stts, rxq->rb_stts_dma);
Tomas Winklera55360e2008-05-05 10:22:28 +0800352 rxq->bd = NULL;
Winkler, Tomas8d864222008-11-07 09:58:39 -0800353 rxq->rb_stts = NULL;
Tomas Winklera55360e2008-05-05 10:22:28 +0800354}
355EXPORT_SYMBOL(iwl_rx_queue_free);
356
357int iwl_rx_queue_alloc(struct iwl_priv *priv)
358{
359 struct iwl_rx_queue *rxq = &priv->rxq;
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800360 struct device *dev = &priv->pci_dev->dev;
Tomas Winklera55360e2008-05-05 10:22:28 +0800361 int i;
362
363 spin_lock_init(&rxq->lock);
364 INIT_LIST_HEAD(&rxq->rx_free);
365 INIT_LIST_HEAD(&rxq->rx_used);
366
367 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800368 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
369 GFP_KERNEL);
Tomas Winklera55360e2008-05-05 10:22:28 +0800370 if (!rxq->bd)
Winkler, Tomas8d864222008-11-07 09:58:39 -0800371 goto err_bd;
372
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800373 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
374 &rxq->rb_stts_dma, GFP_KERNEL);
Winkler, Tomas8d864222008-11-07 09:58:39 -0800375 if (!rxq->rb_stts)
376 goto err_rb;
Tomas Winklera55360e2008-05-05 10:22:28 +0800377
378 /* Fill the rx_used queue with _all_ of the Rx buffers */
379 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
380 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
381
382 /* Set us so that we have processed and used all buffers, but have
383 * not restocked the Rx queue with fresh buffers */
384 rxq->read = rxq->write = 0;
Mohamed Abbas4752c932009-05-22 11:01:51 -0700385 rxq->write_actual = 0;
Tomas Winklera55360e2008-05-05 10:22:28 +0800386 rxq->free_count = 0;
387 rxq->need_update = 0;
388 return 0;
Winkler, Tomas8d864222008-11-07 09:58:39 -0800389
390err_rb:
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800391 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
392 rxq->dma_addr);
Winkler, Tomas8d864222008-11-07 09:58:39 -0800393err_bd:
394 return -ENOMEM;
Tomas Winklera55360e2008-05-05 10:22:28 +0800395}
396EXPORT_SYMBOL(iwl_rx_queue_alloc);
397
398void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
399{
400 unsigned long flags;
401 int i;
402 spin_lock_irqsave(&rxq->lock, flags);
403 INIT_LIST_HEAD(&rxq->rx_free);
404 INIT_LIST_HEAD(&rxq->rx_used);
405 /* Fill the rx_used queue with _all_ of the Rx buffers */
406 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
407 /* In the reset function, these buffers may have been allocated
408 * to an SKB, so we need to unmap and free potential storage */
Zhu Yi2f301222009-10-09 17:19:45 +0800409 if (rxq->pool[i].page != NULL) {
410 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
411 PAGE_SIZE << priv->hw_params.rx_page_order,
412 PCI_DMA_FROMDEVICE);
Zhu Yi64a76b52009-12-10 14:37:21 -0800413 __iwl_free_pages(priv, rxq->pool[i].page);
Zhu Yi2f301222009-10-09 17:19:45 +0800414 rxq->pool[i].page = NULL;
Tomas Winklera55360e2008-05-05 10:22:28 +0800415 }
416 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
417 }
418
419 /* Set us so that we have processed and used all buffers, but have
420 * not restocked the Rx queue with fresh buffers */
421 rxq->read = rxq->write = 0;
Mohamed Abbas4752c932009-05-22 11:01:51 -0700422 rxq->write_actual = 0;
Tomas Winklera55360e2008-05-05 10:22:28 +0800423 rxq->free_count = 0;
424 spin_unlock_irqrestore(&rxq->lock, flags);
425}
Tomas Winklera55360e2008-05-05 10:22:28 +0800426
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800427int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
428{
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800429 u32 rb_size;
430 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
Mohamed Abbas0324c142009-05-22 11:01:53 -0700431 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
432
433 if (!priv->cfg->use_isr_legacy)
434 rb_timeout = RX_RB_TIMEOUT;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800435
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800436 if (priv->cfg->mod_params->amsdu_size_8K)
437 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
438 else
439 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
440
441 /* Stop Rx DMA */
442 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
443
444 /* Reset driver's Rx queue write index */
445 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
446
447 /* Tell device where to find RBD circular buffer in DRAM */
448 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800449 (u32)(rxq->dma_addr >> 8));
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800450
451 /* Tell device where in DRAM to update its Rx status */
452 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
Winkler, Tomas8d864222008-11-07 09:58:39 -0800453 rxq->rb_stts_dma >> 4);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800454
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800455 /* Enable Rx DMA
Tomas Winklera96a27f2008-10-23 23:48:56 -0700456 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800457 * the credit mechanism in 5000 HW RX FIFO
458 * Direct rx interrupts to hosts
459 * Rx buffer size 4 or 8k
460 * RB timeout 0x10
461 * 256 RBDs
462 */
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800463 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
464 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800465 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800466 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
Winkler, Tomas9f925932008-12-09 11:28:59 -0800467 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800468 rb_size|
469 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
470 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800471
Wey-Yi Guy2be767032010-01-15 13:43:37 -0800472 /* Set interrupt coalescing timer to default (2048 usecs) */
473 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
Winkler, Tomas8cd519e2008-09-26 15:09:32 +0800474
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800475 return 0;
476}
477
Tomas Winklerb3bbacb2008-05-29 16:35:01 +0800478int iwl_rxq_stop(struct iwl_priv *priv)
479{
Tomas Winklerb3bbacb2008-05-29 16:35:01 +0800480
481 /* stop Rx DMA */
482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
Zhu, Yi73d7b5a2008-12-05 07:58:40 -0800483 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
484 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
Tomas Winklerb3bbacb2008-05-29 16:35:01 +0800485
Tomas Winklerb3bbacb2008-05-29 16:35:01 +0800486 return 0;
487}
488EXPORT_SYMBOL(iwl_rxq_stop);
489
Tomas Winklerc1354752008-05-29 16:35:04 +0800490void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb)
492
493{
Zhu Yi2f301222009-10-09 17:19:45 +0800494 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler2aa6ab82008-12-11 10:33:40 -0800495 struct iwl_missed_beacon_notif *missed_beacon;
Tomas Winklerc1354752008-05-29 16:35:04 +0800496
497 missed_beacon = &pkt->u.missed_beacon;
Wey-Yi Guya13d2762010-01-22 14:22:42 -0800498 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
499 priv->missed_beacon_threshold) {
Tomas Winklere1623442009-01-27 14:27:56 -0800500 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
Wey-Yi Guya13d2762010-01-22 14:22:42 -0800501 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
Tomas Winklerc1354752008-05-29 16:35:04 +0800502 le32_to_cpu(missed_beacon->total_missed_becons),
503 le32_to_cpu(missed_beacon->num_recvd_beacons),
504 le32_to_cpu(missed_beacon->num_expected_beacons));
505 if (!test_bit(STATUS_SCANNING, &priv->status))
506 iwl_init_sensitivity(priv);
507 }
Tomas Winklerc1354752008-05-29 16:35:04 +0800508}
509EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800510
Reinette Chatre81963d62010-01-22 14:22:57 -0800511void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
512 struct iwl_rx_mem_buffer *rxb)
513{
514 struct iwl_rx_packet *pkt = rxb_addr(rxb);
515 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
516
517 if (!report->state) {
518 IWL_DEBUG_11H(priv,
519 "Spectrum Measure Notification: Start\n");
520 return;
521 }
522
523 memcpy(&priv->measure_report, report, sizeof(*report));
524 priv->measurement_status |= MEASUREMENT_READY;
525}
526EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
527
528
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800529
530/* Calculate noise level, based on measurements during network silence just
531 * before arriving beacon. This measurement can be done only if we know
532 * exactly when to expect beacons, therefore only when we're associated. */
533static void iwl_rx_calc_noise(struct iwl_priv *priv)
534{
535 struct statistics_rx_non_phy *rx_info
536 = &(priv->statistics.rx.general);
537 int num_active_rx = 0;
538 int total_silence = 0;
539 int bcn_silence_a =
540 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
541 int bcn_silence_b =
542 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
543 int bcn_silence_c =
544 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
545
546 if (bcn_silence_a) {
547 total_silence += bcn_silence_a;
548 num_active_rx++;
549 }
550 if (bcn_silence_b) {
551 total_silence += bcn_silence_b;
552 num_active_rx++;
553 }
554 if (bcn_silence_c) {
555 total_silence += bcn_silence_c;
556 num_active_rx++;
557 }
558
559 /* Average among active antennas */
560 if (num_active_rx)
561 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
562 else
563 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
564
Tomas Winklere1623442009-01-27 14:27:56 -0800565 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800566 bcn_silence_a, bcn_silence_b, bcn_silence_c,
567 priv->last_rx_noise);
568}
569
Wey-Yi Guy92a35bd2009-10-09 13:20:29 -0700570#ifdef CONFIG_IWLWIFI_DEBUG
571/*
572 * based on the assumption of all statistics counter are in DWORD
573 * FIXME: This function is for debugging, do not deal with
574 * the case of counters roll-over.
575 */
576static void iwl_accumulative_statistics(struct iwl_priv *priv,
577 __le32 *stats)
578{
579 int i;
580 __le32 *prev_stats;
581 u32 *accum_stats;
Wey-Yi Guye3ef2162010-01-15 13:43:34 -0800582 u32 *delta, *max_delta;
Wey-Yi Guy92a35bd2009-10-09 13:20:29 -0700583
584 prev_stats = (__le32 *)&priv->statistics;
585 accum_stats = (u32 *)&priv->accum_statistics;
Wey-Yi Guye3ef2162010-01-15 13:43:34 -0800586 delta = (u32 *)&priv->delta_statistics;
587 max_delta = (u32 *)&priv->max_delta;
Wey-Yi Guy92a35bd2009-10-09 13:20:29 -0700588
589 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
Wey-Yi Guye3ef2162010-01-15 13:43:34 -0800590 i += sizeof(__le32), stats++, prev_stats++, delta++,
591 max_delta++, accum_stats++) {
592 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
593 *delta = (le32_to_cpu(*stats) -
Wey-Yi Guy92a35bd2009-10-09 13:20:29 -0700594 le32_to_cpu(*prev_stats));
Wey-Yi Guye3ef2162010-01-15 13:43:34 -0800595 *accum_stats += *delta;
596 if (*delta > *max_delta)
597 *max_delta = *delta;
598 }
599 }
Wey-Yi Guy92a35bd2009-10-09 13:20:29 -0700600
601 /* reset accumulative statistics for "no-counter" type statistics */
602 priv->accum_statistics.general.temperature =
603 priv->statistics.general.temperature;
604 priv->accum_statistics.general.temperature_m =
605 priv->statistics.general.temperature_m;
606 priv->accum_statistics.general.ttl_timestamp =
607 priv->statistics.general.ttl_timestamp;
608 priv->accum_statistics.tx.tx_power.ant_a =
609 priv->statistics.tx.tx_power.ant_a;
610 priv->accum_statistics.tx.tx_power.ant_b =
611 priv->statistics.tx.tx_power.ant_b;
612 priv->accum_statistics.tx.tx_power.ant_c =
613 priv->statistics.tx.tx_power.ant_c;
614}
615#endif
616
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800617#define REG_RECALIB_PERIOD (60)
618
Trieu 'Andrew' Nguyen1db59502010-02-10 10:27:34 -0800619/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
620#define ACK_CNT_RATIO (50)
621#define BA_TIMEOUT_CNT (5)
622#define BA_TIMEOUT_MAX (16)
623
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -0800624#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800625void iwl_rx_statistics(struct iwl_priv *priv,
626 struct iwl_rx_mem_buffer *rxb)
627{
Zhu Yi52256402008-06-30 17:23:31 +0800628 int change;
Zhu Yi2f301222009-10-09 17:19:45 +0800629 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -0800630 int combined_plcp_delta;
631 unsigned int plcp_msec;
632 unsigned long plcp_received_jiffies;
Trieu 'Andrew' Nguyen1db59502010-02-10 10:27:34 -0800633 int actual_ack_cnt_delta;
634 int expected_ack_cnt_delta;
635 int ba_timeout_delta;
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800636
Tomas Winklere1623442009-01-27 14:27:56 -0800637 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
Daniel C Halperin396887a2009-08-13 13:31:01 -0700638 (int)sizeof(priv->statistics),
639 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800640
Zhu Yi52256402008-06-30 17:23:31 +0800641 change = ((priv->statistics.general.temperature !=
642 pkt->u.stats.general.temperature) ||
643 ((priv->statistics.flag &
Wey-Yi Guy7aafef12009-08-07 15:41:38 -0700644 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
645 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
Zhu Yi52256402008-06-30 17:23:31 +0800646
Wey-Yi Guy92a35bd2009-10-09 13:20:29 -0700647#ifdef CONFIG_IWLWIFI_DEBUG
648 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
649#endif
Trieu 'Andrew' Nguyen1db59502010-02-10 10:27:34 -0800650 actual_ack_cnt_delta = le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
651 le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
652 expected_ack_cnt_delta = le32_to_cpu(
653 pkt->u.stats.tx.expected_ack_cnt) -
654 le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
655 ba_timeout_delta = le32_to_cpu(
656 pkt->u.stats.tx.agg.ba_timeout) -
657 le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
658 if ((priv->agg_tids_count > 0) &&
659 (expected_ack_cnt_delta > 0) &&
660 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) <
661 ACK_CNT_RATIO) &&
662 (ba_timeout_delta > BA_TIMEOUT_CNT)) {
663 IWL_DEBUG_RADIO(priv,
664 "actual_ack_cnt delta = %d, expected_ack_cnt = %d\n",
665 actual_ack_cnt_delta, expected_ack_cnt_delta);
666
667#ifdef CONFIG_IWLWIFI_DEBUG
668 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
669 priv->delta_statistics.tx.rx_detected_cnt);
670 IWL_DEBUG_RADIO(priv,
671 "ack_or_ba_timeout_collision delta = %d\n",
672 priv->delta_statistics.tx.ack_or_ba_timeout_collision);
673#endif
674 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
675 ba_timeout_delta);
676 if ((actual_ack_cnt_delta == 0) &&
677 (ba_timeout_delta >=
678 BA_TIMEOUT_MAX)) {
679 IWL_DEBUG_RADIO(priv,
680 "call iwl_force_reset(IWL_FW_RESET)\n");
681 iwl_force_reset(priv, IWL_FW_RESET);
682 } else {
683 IWL_DEBUG_RADIO(priv,
684 "call iwl_force_reset(IWL_RF_RESET)\n");
685 iwl_force_reset(priv, IWL_RF_RESET);
686 }
687 }
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -0800688 /*
689 * check for plcp_err and trigger radio reset if it exceeds
690 * the plcp error threshold plcp_delta.
691 */
692 plcp_received_jiffies = jiffies;
693 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
694 (long) priv->plcp_jiffies);
695 priv->plcp_jiffies = plcp_received_jiffies;
696 /*
697 * check to make sure plcp_msec is not 0 to prevent division
698 * by zero.
699 */
700 if (plcp_msec) {
701 combined_plcp_delta =
702 (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
703 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
704 (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
705 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
706
707 if ((combined_plcp_delta > 0) &&
708 ((combined_plcp_delta * 100) / plcp_msec) >
709 priv->cfg->plcp_delta_threshold) {
710 /*
711 * if plcp_err exceed the threshold, the following
712 * data is printed in csv format:
713 * Text: plcp_err exceeded %d,
714 * Received ofdm.plcp_err,
715 * Current ofdm.plcp_err,
716 * Received ofdm_ht.plcp_err,
717 * Current ofdm_ht.plcp_err,
718 * combined_plcp_delta,
719 * plcp_msec
720 */
721 IWL_DEBUG_RADIO(priv, PLCP_MSG,
722 priv->cfg->plcp_delta_threshold,
723 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
724 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
725 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
726 le32_to_cpu(
727 priv->statistics.rx.ofdm_ht.plcp_err),
728 combined_plcp_delta, plcp_msec);
729
730 /*
731 * Reset the RF radio due to the high plcp
732 * error rate
733 */
Wey-Yi Guya93e7972010-02-03 11:47:19 -0800734 iwl_force_reset(priv, IWL_RF_RESET);
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -0800735 }
736 }
737
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800738 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
739
740 set_bit(STATUS_STATISTICS, &priv->status);
741
742 /* Reschedule the statistics timer to occur in
743 * REG_RECALIB_PERIOD seconds to ensure we get a
744 * thermal update even if the uCode doesn't give
745 * us one */
746 mod_timer(&priv->statistics_periodic, jiffies +
747 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
748
749 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
750 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
751 iwl_rx_calc_noise(priv);
752 queue_work(priv->workqueue, &priv->run_time_calib_work);
753 }
Wey-Yi Guy62161ae2009-05-21 13:44:23 -0700754 if (priv->cfg->ops->lib->temp_ops.temperature && change)
755 priv->cfg->ops->lib->temp_ops.temperature(priv);
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +0800756}
757EXPORT_SYMBOL(iwl_rx_statistics);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800758
Wey-Yi Guyef8d5522009-11-13 11:56:28 -0800759void iwl_reply_statistics(struct iwl_priv *priv,
760 struct iwl_rx_mem_buffer *rxb)
761{
762 struct iwl_rx_packet *pkt = rxb_addr(rxb);
763
764 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
Wey-Yi Guyef8d5522009-11-13 11:56:28 -0800765#ifdef CONFIG_IWLWIFI_DEBUG
766 memset(&priv->accum_statistics, 0,
767 sizeof(struct iwl_notif_statistics));
Wey-Yi Guye3ef2162010-01-15 13:43:34 -0800768 memset(&priv->delta_statistics, 0,
769 sizeof(struct iwl_notif_statistics));
770 memset(&priv->max_delta, 0,
771 sizeof(struct iwl_notif_statistics));
Wey-Yi Guyef8d5522009-11-13 11:56:28 -0800772#endif
773 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
774 }
775 iwl_rx_statistics(priv, rxb);
776}
777EXPORT_SYMBOL(iwl_reply_statistics);
778
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800779/* Calc max signal level (dBm) among 3 possible receivers */
780static inline int iwl_calc_rssi(struct iwl_priv *priv,
781 struct iwl_rx_phy_res *rx_resp)
782{
783 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
784}
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800785
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800786#ifdef CONFIG_IWLWIFI_DEBUG
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800787/**
788 * iwl_dbg_report_frame - dump frame to syslog during debug sessions
789 *
790 * You may hack this function to show different aspects of received frames,
791 * including selective frame dumps.
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800792 * group100 parameter selects whether to show 1 out of 100 good data frames.
793 * All beacon and probe response frames are printed.
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800794 */
795static void iwl_dbg_report_frame(struct iwl_priv *priv,
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800796 struct iwl_rx_phy_res *phy_res, u16 length,
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800797 struct ieee80211_hdr *header, int group100)
798{
799 u32 to_us;
800 u32 print_summary = 0;
801 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
802 u32 hundred = 0;
803 u32 dataframe = 0;
804 __le16 fc;
805 u16 seq_ctl;
806 u16 channel;
807 u16 phy_flags;
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800808 u32 rate_n_flags;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800809 u32 tsf_low;
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800810 int rssi;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800811
Reinette Chatre3d816c72009-08-07 15:41:37 -0700812 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800813 return;
814
815 /* MAC header */
816 fc = header->frame_control;
817 seq_ctl = le16_to_cpu(header->seq_ctrl);
818
819 /* metadata */
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800820 channel = le16_to_cpu(phy_res->channel);
821 phy_flags = le16_to_cpu(phy_res->phy_flags);
822 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800823
824 /* signal statistics */
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800825 rssi = iwl_calc_rssi(priv, phy_res);
826 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800827
828 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
829
830 /* if data frame is to us and all is good,
831 * (optionally) print summary for only 1 out of every 100 */
832 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
833 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
834 dataframe = 1;
835 if (!group100)
836 print_summary = 1; /* print each frame */
837 else if (priv->framecnt_to_us < 100) {
838 priv->framecnt_to_us++;
839 print_summary = 0;
840 } else {
841 priv->framecnt_to_us = 0;
842 print_summary = 1;
843 hundred = 1;
844 }
845 } else {
846 /* print summary for all other frames */
847 print_summary = 1;
848 }
849
850 if (print_summary) {
851 char *title;
852 int rate_idx;
853 u32 bitrate;
854
855 if (hundred)
856 title = "100Frames";
857 else if (ieee80211_has_retry(fc))
858 title = "Retry";
859 else if (ieee80211_is_assoc_resp(fc))
860 title = "AscRsp";
861 else if (ieee80211_is_reassoc_resp(fc))
862 title = "RasRsp";
863 else if (ieee80211_is_probe_resp(fc)) {
864 title = "PrbRsp";
865 print_dump = 1; /* dump frame contents */
866 } else if (ieee80211_is_beacon(fc)) {
867 title = "Beacon";
868 print_dump = 1; /* dump frame contents */
869 } else if (ieee80211_is_atim(fc))
870 title = "ATIM";
871 else if (ieee80211_is_auth(fc))
872 title = "Auth";
873 else if (ieee80211_is_deauth(fc))
874 title = "DeAuth";
875 else if (ieee80211_is_disassoc(fc))
876 title = "DisAssoc";
877 else
878 title = "Frame";
879
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800880 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
881 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800882 bitrate = 0;
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800883 WARN_ON_ONCE(1);
884 } else {
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800885 bitrate = iwl_rates[rate_idx].ieee / 2;
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800886 }
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800887
888 /* print frame summary.
889 * MAC addresses show just the last byte (for brevity),
890 * but you can hack it to show more, if you'd like to. */
891 if (dataframe)
Tomas Winklere1623442009-01-27 14:27:56 -0800892 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800893 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
894 title, le16_to_cpu(fc), header->addr1[5],
895 length, rssi, channel, bitrate);
896 else {
897 /* src/dst addresses assume managed mode */
Tomas Winklere1623442009-01-27 14:27:56 -0800898 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800899 "len=%u, rssi=%d, tim=%lu usec, "
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800900 "phy=0x%02x, chnl=%d\n",
901 title, le16_to_cpu(fc), header->addr1[5],
Halperin, Daniel C00e540b2008-12-05 07:58:36 -0800902 header->addr3[5], length, rssi,
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800903 tsf_low - priv->scan_start_tsf,
904 phy_flags, channel);
905 }
906 }
907 if (print_dump)
Reinette Chatre3d816c72009-08-07 15:41:37 -0700908 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800909}
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800910#endif
911
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800912/*
913 * returns non-zero if packet should be dropped
914 */
Samuel Ortiz8ccde882009-01-27 14:27:52 -0800915int iwl_set_decrypted_flag(struct iwl_priv *priv,
916 struct ieee80211_hdr *hdr,
917 u32 decrypt_res,
918 struct ieee80211_rx_status *stats)
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800919{
920 u16 fc = le16_to_cpu(hdr->frame_control);
921
922 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
923 return 0;
924
925 if (!(fc & IEEE80211_FCTL_PROTECTED))
926 return 0;
927
Tomas Winklere1623442009-01-27 14:27:56 -0800928 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800929 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
930 case RX_RES_STATUS_SEC_TYPE_TKIP:
931 /* The uCode has got a bad phase 1 Key, pushes the packet.
932 * Decryption will be done in SW. */
933 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
934 RX_RES_STATUS_BAD_KEY_TTAK)
935 break;
936
937 case RX_RES_STATUS_SEC_TYPE_WEP:
938 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
939 RX_RES_STATUS_BAD_ICV_MIC) {
940 /* bad ICV, the packet is destroyed since the
941 * decryption is inplace, drop it */
Tomas Winklere1623442009-01-27 14:27:56 -0800942 IWL_DEBUG_RX(priv, "Packet destroyed\n");
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800943 return -1;
944 }
945 case RX_RES_STATUS_SEC_TYPE_CCMP:
946 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
947 RX_RES_STATUS_DECRYPT_OK) {
Tomas Winklere1623442009-01-27 14:27:56 -0800948 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800949 stats->flag |= RX_FLAG_DECRYPTED;
950 }
951 break;
952
953 default:
954 break;
955 }
956 return 0;
957}
Samuel Ortiz8ccde882009-01-27 14:27:52 -0800958EXPORT_SYMBOL(iwl_set_decrypted_flag);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800959
960static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
961{
962 u32 decrypt_out = 0;
963
964 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
965 RX_RES_STATUS_STATION_FOUND)
966 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
967 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
968
969 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
970
971 /* packet was not encrypted */
972 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
973 RX_RES_STATUS_SEC_TYPE_NONE)
974 return decrypt_out;
975
976 /* packet was encrypted with unknown alg */
977 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
978 RX_RES_STATUS_SEC_TYPE_ERR)
979 return decrypt_out;
980
981 /* decryption was not done in HW */
982 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
983 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
984 return decrypt_out;
985
986 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
987
988 case RX_RES_STATUS_SEC_TYPE_CCMP:
989 /* alg is CCM: check MIC only */
990 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
991 /* Bad MIC */
992 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
993 else
994 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
995
996 break;
997
998 case RX_RES_STATUS_SEC_TYPE_TKIP:
999 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
1000 /* Bad TTAK */
1001 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
1002 break;
1003 }
1004 /* fall through if TTAK OK */
1005 default:
1006 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
1007 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1008 else
1009 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1010 break;
1011 };
1012
Tomas Winklere1623442009-01-27 14:27:56 -08001013 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001014 decrypt_in, decrypt_out);
1015
1016 return decrypt_out;
1017}
1018
Emmanuel Grumbach4b8817b2008-06-30 17:23:10 +08001019static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001020 struct ieee80211_hdr *hdr,
1021 u16 len,
1022 u32 ampdu_status,
1023 struct iwl_rx_mem_buffer *rxb,
1024 struct ieee80211_rx_status *stats)
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001025{
Zhu Yi2f301222009-10-09 17:19:45 +08001026 struct sk_buff *skb;
1027 int ret = 0;
Zhu Yi29b1b262009-10-23 13:42:25 -07001028 __le16 fc = hdr->frame_control;
Zhu Yi2f301222009-10-09 17:19:45 +08001029
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001030 /* We only process data packets if the interface is open */
1031 if (unlikely(!priv->is_open)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001032 IWL_DEBUG_DROP_LIMIT(priv,
1033 "Dropping packet while interface is not open.\n");
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001034 return;
1035 }
1036
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001037 /* In case of HW accelerated crypto and bad decryption, drop */
Tomas Winkler90e8e422009-06-19 13:52:42 -07001038 if (!priv->cfg->mod_params->sw_crypto &&
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001039 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1040 return;
1041
Zhu Yia3b6bd52009-11-06 14:52:45 -08001042 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
Zhu Yi2f301222009-10-09 17:19:45 +08001043 if (!skb) {
1044 IWL_ERR(priv, "alloc_skb failed\n");
1045 return;
1046 }
1047
Zhu Yia3b6bd52009-11-06 14:52:45 -08001048 skb_reserve(skb, IWL_LINK_HDR_MAX);
Zhu Yi2f301222009-10-09 17:19:45 +08001049 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1050
1051 /* mac80211 currently doesn't support paged SKB. Convert it to
1052 * linear SKB for management frame and data frame requires
1053 * software decryption or software defragementation. */
Zhu Yi29b1b262009-10-23 13:42:25 -07001054 if (ieee80211_is_mgmt(fc) ||
1055 ieee80211_has_protected(fc) ||
1056 ieee80211_has_morefrags(fc) ||
Zhu Yi2f301222009-10-09 17:19:45 +08001057 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
1058 ret = skb_linearize(skb);
1059 else
1060 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
1061 0 : -ENOMEM;
1062
1063 if (ret) {
1064 kfree_skb(skb);
1065 goto out;
1066 }
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001067
Zhu Yi29b1b262009-10-23 13:42:25 -07001068 /*
1069 * XXX: We cannot touch the page and its virtual memory (hdr) after
1070 * here. It might have already been freed by the above skb change.
1071 */
1072
1073 iwl_update_stats(priv, false, fc, len);
Zhu Yi2f301222009-10-09 17:19:45 +08001074 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1075
1076 ieee80211_rx(priv->hw, skb);
1077 out:
1078 priv->alloc_rxb_page--;
1079 rxb->page = NULL;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001080}
1081
Emmanuel Grumbach4b8817b2008-06-30 17:23:10 +08001082/* This is necessary only for a number of statistics, see the caller. */
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001083static int iwl_is_network_packet(struct iwl_priv *priv,
1084 struct ieee80211_hdr *header)
1085{
1086 /* Filter incoming packets to determine if they are targeted toward
1087 * this network, discarding packets coming from ourselves */
1088 switch (priv->iw_mode) {
Johannes Berg05c914f2008-09-11 00:01:58 +02001089 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
Emmanuel Grumbach4b8817b2008-06-30 17:23:10 +08001090 /* packets to our IBSS update information */
1091 return !compare_ether_addr(header->addr3, priv->bssid);
Johannes Berg05c914f2008-09-11 00:01:58 +02001092 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
Emmanuel Grumbach4b8817b2008-06-30 17:23:10 +08001093 /* packets to our IBSS update information */
1094 return !compare_ether_addr(header->addr2, priv->bssid);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001095 default:
Emmanuel Grumbach4b8817b2008-06-30 17:23:10 +08001096 return 1;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001097 }
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001098}
1099
1100/* Called for REPLY_RX (legacy ABG frames), or
1101 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1102void iwl_rx_reply_rx(struct iwl_priv *priv,
1103 struct iwl_rx_mem_buffer *rxb)
1104{
1105 struct ieee80211_hdr *header;
1106 struct ieee80211_rx_status rx_status;
Zhu Yi2f301222009-10-09 17:19:45 +08001107 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001108 struct iwl_rx_phy_res *phy_res;
1109 __le32 rx_pkt_status;
1110 struct iwl4965_rx_mpdu_res_start *amsdu;
1111 u32 len;
1112 u32 ampdu_status;
Daniel C Halperinc5f8cdb2009-08-21 13:34:21 -07001113 u32 rate_n_flags;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001114
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001115 /**
1116 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1117 * REPLY_RX: physical layer info is in this buffer
1118 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1119 * command and cached in priv->last_phy_res
1120 *
1121 * Here we set up local variables depending on which command is
1122 * received.
1123 */
1124 if (pkt->hdr.cmd == REPLY_RX) {
1125 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1126 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1127 + phy_res->cfg_phy_cnt);
1128
1129 len = le16_to_cpu(phy_res->byte_count);
1130 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1131 phy_res->cfg_phy_cnt + len);
1132 ampdu_status = le32_to_cpu(rx_pkt_status);
1133 } else {
1134 if (!priv->last_phy_res[0]) {
1135 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1136 return;
1137 }
1138 phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1139 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1140 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1141 len = le16_to_cpu(amsdu->byte_count);
1142 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1143 ampdu_status = iwl_translate_rx_status(priv,
1144 le32_to_cpu(rx_pkt_status));
1145 }
1146
1147 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1148 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1149 phy_res->cfg_phy_cnt);
1150 return;
1151 }
1152
1153 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1154 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1155 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1156 le32_to_cpu(rx_pkt_status));
1157 return;
1158 }
1159
Daniel C Halperin31513be2009-08-28 09:44:47 -07001160 /* This will be used in several places later */
1161 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1162
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001163 /* rx_status carries information about the packet to mac80211 */
1164 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001165 rx_status.freq =
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001166 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1167 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001168 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1169 rx_status.rate_idx =
Daniel C Halperin31513be2009-08-28 09:44:47 -07001170 iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001171 rx_status.flag = 0;
Assaf Kraussb94d8ee2008-09-03 11:18:42 +08001172
1173 /* TSF isn't reliable. In order to allow smooth user experience,
1174 * this W/A doesn't propagate it to the mac80211 */
1175 /*rx_status.flag |= RX_FLAG_TSFT;*/
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001176
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001177 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001178
1179 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001180 rx_status.signal = iwl_calc_rssi(priv, phy_res);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001181
1182 /* Meaningful noise values are available only from beacon statistics,
1183 * which are gathered only when associated, and indicate noise
1184 * only for the associated network channel ...
1185 * Ignore these noise values while scanning (other channels) */
1186 if (iwl_is_associated(priv) &&
1187 !test_bit(STATUS_SCANNING, &priv->status)) {
1188 rx_status.noise = priv->last_rx_noise;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001189 } else {
1190 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001191 }
1192
1193 /* Reset beacon noise level if not associated. */
1194 if (!iwl_is_associated(priv))
1195 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1196
Rami Rosen21a49fc2008-12-09 08:37:28 +02001197#ifdef CONFIG_IWLWIFI_DEBUG
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001198 /* Set "1" to report good data frames in groups of 100 */
Reinette Chatre3d816c72009-08-07 15:41:37 -07001199 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001200 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
Rami Rosen21a49fc2008-12-09 08:37:28 +02001201#endif
Wey-Yi Guy20594eb2009-08-07 15:41:39 -07001202 iwl_dbg_log_rx_data_frame(priv, len, header);
Johannes Berg671adc92009-12-23 13:12:04 +01001203 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1204 rx_status.signal, rx_status.noise,
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001205 (unsigned long long)rx_status.mactime);
1206
Bruno Randolf6f0a2c42008-07-30 17:20:14 +02001207 /*
1208 * "antenna number"
1209 *
1210 * It seems that the antenna field in the phy flags value
Tomas Winklera96a27f2008-10-23 23:48:56 -07001211 * is actually a bit field. This is undefined by radiotap,
Bruno Randolf6f0a2c42008-07-30 17:20:14 +02001212 * it wants an actual antenna number but I always get "7"
1213 * for most legacy frames I receive indicating that the
1214 * same frame was received on all three RX chains.
1215 *
Tomas Winklera96a27f2008-10-23 23:48:56 -07001216 * I think this field should be removed in favor of a
Bruno Randolf6f0a2c42008-07-30 17:20:14 +02001217 * new 802.11n radiotap field "RX chains" that is defined
1218 * as a bitmask.
1219 */
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001220 rx_status.antenna =
Reinette Chatre9024adf2009-10-02 13:43:57 -07001221 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001222 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
Bruno Randolf6f0a2c42008-07-30 17:20:14 +02001223
1224 /* set the preamble flag if appropriate */
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001225 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
Bruno Randolf6f0a2c42008-07-30 17:20:14 +02001226 rx_status.flag |= RX_FLAG_SHORTPRE;
1227
Daniel C Halperinc5f8cdb2009-08-21 13:34:21 -07001228 /* Set up the HT phy flags */
Daniel C Halperinc5f8cdb2009-08-21 13:34:21 -07001229 if (rate_n_flags & RATE_MCS_HT_MSK)
1230 rx_status.flag |= RX_FLAG_HT;
1231 if (rate_n_flags & RATE_MCS_HT40_MSK)
1232 rx_status.flag |= RX_FLAG_40MHZ;
1233 if (rate_n_flags & RATE_MCS_SGI_MSK)
1234 rx_status.flag |= RX_FLAG_SHORT_GI;
1235
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001236 if (iwl_is_network_packet(priv, header)) {
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001237 priv->last_rx_rssi = rx_status.signal;
1238 priv->last_beacon_time = priv->ucode_beacon_time;
Daniel C Halperin9f30e042009-08-13 13:30:56 -07001239 priv->last_tsf = le64_to_cpu(phy_res->timestamp);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001240 }
1241
Johannes Berg6ab10ff2009-11-13 11:56:37 -08001242 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1243 rxb, &rx_status);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001244}
1245EXPORT_SYMBOL(iwl_rx_reply_rx);
1246
1247/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1248 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1249void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1250 struct iwl_rx_mem_buffer *rxb)
1251{
Zhu Yi2f301222009-10-09 17:19:45 +08001252 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001253 priv->last_phy_res[0] = 1;
1254 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
Tomas Winklercaab8f12008-08-04 16:00:42 +08001255 sizeof(struct iwl_rx_phy_res));
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08001256}
1257EXPORT_SYMBOL(iwl_rx_reply_rx_phy);