blob: ed63e5c76f3cca511937a105da0c49a36c1f1c01 [file] [log] [blame]
Tomas Winklera55360e2008-05-05 10:22:28 +08001/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include "iwl-eeprom.h"
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-io.h"
36#include "iwl-helpers.h"
37/************************** RX-FUNCTIONS ****************************/
38/*
39 * Rx theory of operation
40 *
41 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
42 * each of which point to Receive Buffers to be filled by the NIC. These get
43 * used not only for Rx frames, but for any command response or notification
44 * from the NIC. The driver and NIC manage the Rx buffers by means
45 * of indexes into the circular buffer.
46 *
47 * Rx Queue Indexes
48 * The host/firmware share two index registers for managing the Rx buffers.
49 *
50 * The READ index maps to the first position that the firmware may be writing
51 * to -- the driver can read up to (but not including) this position and get
52 * good data.
53 * The READ index is managed by the firmware once the card is enabled.
54 *
55 * The WRITE index maps to the last position the driver has read from -- the
56 * position preceding WRITE is the last slot the firmware can place a packet.
57 *
58 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
59 * WRITE = READ.
60 *
61 * During initialization, the host sets up the READ queue position to the first
62 * INDEX position, and WRITE to the last (READ - 1 wrapped)
63 *
64 * When the firmware places a packet in a buffer, it will advance the READ index
65 * and fire the RX interrupt. The driver can then query the READ index and
66 * process as many packets as possible, moving the WRITE index forward as it
67 * resets the Rx queue buffers with new memory.
68 *
69 * The management in the driver is as follows:
70 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
71 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
72 * to replenish the iwl->rxq->rx_free.
73 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
74 * iwl->rxq is replenished and the READ INDEX is updated (updating the
75 * 'processed' and 'read' driver indexes as well)
76 * + A received packet is processed and handed to the kernel network stack,
77 * detached from the iwl->rxq. The driver 'processed' index is updated.
78 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
79 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
80 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
81 * were enough free buffers and RX_STALLED is set it is cleared.
82 *
83 *
84 * Driver sequence:
85 *
86 * iwl_rx_queue_alloc() Allocates rx_free
87 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
88 * iwl_rx_queue_restock
89 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
90 * queue, updates firmware pointers, and updates
91 * the WRITE index. If insufficient rx_free buffers
92 * are available, schedules iwl_rx_replenish
93 *
94 * -- enable interrupts --
95 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
96 * READ INDEX, detaching the SKB from the pool.
97 * Moves the packet buffer from queue to rx_used.
98 * Calls iwl_rx_queue_restock to refill any empty
99 * slots.
100 * ...
101 *
102 */
103
104/**
105 * iwl_rx_queue_space - Return number of free slots available in queue.
106 */
107int iwl_rx_queue_space(const struct iwl_rx_queue *q)
108{
109 int s = q->read - q->write;
110 if (s <= 0)
111 s += RX_QUEUE_SIZE;
112 /* keep some buffer to not confuse full and empty queue */
113 s -= 2;
114 if (s < 0)
115 s = 0;
116 return s;
117}
118EXPORT_SYMBOL(iwl_rx_queue_space);
119
120/**
121 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
122 */
123int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
124{
125 u32 reg = 0;
126 int ret = 0;
127 unsigned long flags;
128
129 spin_lock_irqsave(&q->lock, flags);
130
131 if (q->need_update == 0)
132 goto exit_unlock;
133
134 /* If power-saving is in use, make sure device is awake */
135 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
136 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
137
138 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
139 iwl_set_bit(priv, CSR_GP_CNTRL,
140 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
141 goto exit_unlock;
142 }
143
144 ret = iwl_grab_nic_access(priv);
145 if (ret)
146 goto exit_unlock;
147
148 /* Device expects a multiple of 8 */
149 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
150 q->write & ~0x7);
151 iwl_release_nic_access(priv);
152
153 /* Else device is assumed to be awake */
154 } else
155 /* Device expects a multiple of 8 */
156 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
157
158
159 q->need_update = 0;
160
161 exit_unlock:
162 spin_unlock_irqrestore(&q->lock, flags);
163 return ret;
164}
165EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
166/**
167 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
168 */
169static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
170 dma_addr_t dma_addr)
171{
172 return cpu_to_le32((u32)(dma_addr >> 8));
173}
174
175/**
176 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
177 *
178 * If there are slots in the RX queue that need to be restocked,
179 * and we have free pre-allocated buffers, fill the ranks as much
180 * as we can, pulling from rx_free.
181 *
182 * This moves the 'write' index forward to catch up with 'processed', and
183 * also updates the memory address in the firmware to reference the new
184 * target buffer.
185 */
186int iwl_rx_queue_restock(struct iwl_priv *priv)
187{
188 struct iwl_rx_queue *rxq = &priv->rxq;
189 struct list_head *element;
190 struct iwl_rx_mem_buffer *rxb;
191 unsigned long flags;
192 int write;
193 int ret = 0;
194
195 spin_lock_irqsave(&rxq->lock, flags);
196 write = rxq->write & ~0x7;
197 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
198 /* Get next free Rx buffer, remove from free list */
199 element = rxq->rx_free.next;
200 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
201 list_del(element);
202
203 /* Point to Rx buffer via next RBD in circular buffer */
204 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
205 rxq->queue[rxq->write] = rxb;
206 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
207 rxq->free_count--;
208 }
209 spin_unlock_irqrestore(&rxq->lock, flags);
210 /* If the pre-allocated buffer pool is dropping low, schedule to
211 * refill it */
212 if (rxq->free_count <= RX_LOW_WATERMARK)
213 queue_work(priv->workqueue, &priv->rx_replenish);
214
215
216 /* If we've added more space for the firmware to place data, tell it.
217 * Increment device's write pointer in multiples of 8. */
218 if ((write != (rxq->write & ~0x7))
219 || (abs(rxq->write - rxq->read) > 7)) {
220 spin_lock_irqsave(&rxq->lock, flags);
221 rxq->need_update = 1;
222 spin_unlock_irqrestore(&rxq->lock, flags);
223 ret = iwl_rx_queue_update_write_ptr(priv, rxq);
224 }
225
226 return ret;
227}
228EXPORT_SYMBOL(iwl_rx_queue_restock);
229
230
231/**
232 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
233 *
234 * When moving to rx_free an SKB is allocated for the slot.
235 *
236 * Also restock the Rx queue via iwl_rx_queue_restock.
237 * This is called as a scheduled work item (except for during initialization)
238 */
239void iwl_rx_allocate(struct iwl_priv *priv)
240{
241 struct iwl_rx_queue *rxq = &priv->rxq;
242 struct list_head *element;
243 struct iwl_rx_mem_buffer *rxb;
244 unsigned long flags;
245 spin_lock_irqsave(&rxq->lock, flags);
246 while (!list_empty(&rxq->rx_used)) {
247 element = rxq->rx_used.next;
248 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
249
250 /* Alloc a new receive buffer */
251 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
252 __GFP_NOWARN | GFP_ATOMIC);
253 if (!rxb->skb) {
254 if (net_ratelimit())
255 printk(KERN_CRIT DRV_NAME
256 ": Can not allocate SKB buffers\n");
257 /* We don't reschedule replenish work here -- we will
258 * call the restock method and if it still needs
259 * more buffers it will schedule replenish */
260 break;
261 }
262 priv->alloc_rxb_skb++;
263 list_del(element);
264
265 /* Get physical address of RB/SKB */
266 rxb->dma_addr =
267 pci_map_single(priv->pci_dev, rxb->skb->data,
268 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
269 list_add_tail(&rxb->list, &rxq->rx_free);
270 rxq->free_count++;
271 }
272 spin_unlock_irqrestore(&rxq->lock, flags);
273}
274EXPORT_SYMBOL(iwl_rx_allocate);
275
276void iwl_rx_replenish(struct iwl_priv *priv)
277{
278 unsigned long flags;
279
280 iwl_rx_allocate(priv);
281
282 spin_lock_irqsave(&priv->lock, flags);
283 iwl_rx_queue_restock(priv);
284 spin_unlock_irqrestore(&priv->lock, flags);
285}
286EXPORT_SYMBOL(iwl_rx_replenish);
287
288
289/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
290 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
291 * This free routine walks the list of POOL entries and if SKB is set to
292 * non NULL it is unmapped and freed
293 */
294void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
295{
296 int i;
297 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
298 if (rxq->pool[i].skb != NULL) {
299 pci_unmap_single(priv->pci_dev,
300 rxq->pool[i].dma_addr,
301 priv->hw_params.rx_buf_size,
302 PCI_DMA_FROMDEVICE);
303 dev_kfree_skb(rxq->pool[i].skb);
304 }
305 }
306
307 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
308 rxq->dma_addr);
309 rxq->bd = NULL;
310}
311EXPORT_SYMBOL(iwl_rx_queue_free);
312
313int iwl_rx_queue_alloc(struct iwl_priv *priv)
314{
315 struct iwl_rx_queue *rxq = &priv->rxq;
316 struct pci_dev *dev = priv->pci_dev;
317 int i;
318
319 spin_lock_init(&rxq->lock);
320 INIT_LIST_HEAD(&rxq->rx_free);
321 INIT_LIST_HEAD(&rxq->rx_used);
322
323 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
324 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
325 if (!rxq->bd)
326 return -ENOMEM;
327
328 /* Fill the rx_used queue with _all_ of the Rx buffers */
329 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
330 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
331
332 /* Set us so that we have processed and used all buffers, but have
333 * not restocked the Rx queue with fresh buffers */
334 rxq->read = rxq->write = 0;
335 rxq->free_count = 0;
336 rxq->need_update = 0;
337 return 0;
338}
339EXPORT_SYMBOL(iwl_rx_queue_alloc);
340
341void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
342{
343 unsigned long flags;
344 int i;
345 spin_lock_irqsave(&rxq->lock, flags);
346 INIT_LIST_HEAD(&rxq->rx_free);
347 INIT_LIST_HEAD(&rxq->rx_used);
348 /* Fill the rx_used queue with _all_ of the Rx buffers */
349 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
350 /* In the reset function, these buffers may have been allocated
351 * to an SKB, so we need to unmap and free potential storage */
352 if (rxq->pool[i].skb != NULL) {
353 pci_unmap_single(priv->pci_dev,
354 rxq->pool[i].dma_addr,
355 priv->hw_params.rx_buf_size,
356 PCI_DMA_FROMDEVICE);
357 priv->alloc_rxb_skb--;
358 dev_kfree_skb(rxq->pool[i].skb);
359 rxq->pool[i].skb = NULL;
360 }
361 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
362 }
363
364 /* Set us so that we have processed and used all buffers, but have
365 * not restocked the Rx queue with fresh buffers */
366 rxq->read = rxq->write = 0;
367 rxq->free_count = 0;
368 spin_unlock_irqrestore(&rxq->lock, flags);
369}
370EXPORT_SYMBOL(iwl_rx_queue_reset);
371
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800372int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
373{
374 int ret;
375 unsigned long flags;
376 unsigned int rb_size;
377
378 spin_lock_irqsave(&priv->lock, flags);
379 ret = iwl_grab_nic_access(priv);
380 if (ret) {
381 spin_unlock_irqrestore(&priv->lock, flags);
382 return ret;
383 }
384
385 if (priv->cfg->mod_params->amsdu_size_8K)
386 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
387 else
388 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
389
390 /* Stop Rx DMA */
391 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
392
393 /* Reset driver's Rx queue write index */
394 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
395
396 /* Tell device where to find RBD circular buffer in DRAM */
397 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
398 rxq->dma_addr >> 8);
399
400 /* Tell device where in DRAM to update its Rx status */
401 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
Ron Rindjunskyd67f5482008-05-05 10:22:49 +0800402 (priv->shared_phys + priv->rb_closed_offset) >> 4);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800403
404 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
405 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
406 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
407 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
408 rb_size |
409 /* 0x10 << 4 | */
410 (RX_QUEUE_SIZE_LOG <<
411 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
412
413 /*
414 * iwl_write32(priv,CSR_INT_COAL_REG,0);
415 */
416
417 iwl_release_nic_access(priv);
418 spin_unlock_irqrestore(&priv->lock, flags);
419
420 return 0;
421}
422
Tomas Winklerb3bbacb2008-05-29 16:35:01 +0800423int iwl_rxq_stop(struct iwl_priv *priv)
424{
425 int ret;
426 unsigned long flags;
427
428 spin_lock_irqsave(&priv->lock, flags);
429 ret = iwl_grab_nic_access(priv);
430 if (unlikely(ret)) {
431 spin_unlock_irqrestore(&priv->lock, flags);
432 return ret;
433 }
434
435 /* stop Rx DMA */
436 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
437 ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
438 (1 << 24), 1000);
439 if (ret < 0)
440 IWL_ERROR("Can't stop Rx DMA.\n");
441
442 iwl_release_nic_access(priv);
443 spin_unlock_irqrestore(&priv->lock, flags);
444
445 return 0;
446}
447EXPORT_SYMBOL(iwl_rxq_stop);
448