blob: d583ee070b47cc16b53be46a6df8f13fa5b7c74a [file] [log] [blame]
Ivo van Doorn95ea3622007-09-25 17:57:13 -07001/*
Gertjan van Wingerde9c9a0d12009-11-08 16:39:55 +01002 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
Ivo van Doorn95ea3622007-09-25 17:57:13 -07003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 */
25
Ivo van Doorn95ea3622007-09-25 17:57:13 -070026#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Ivo van Doorn95ea3622007-09-25 17:57:13 -070031
32#include "rt2x00.h"
33#include "rt2x00pci.h"
34
35/*
Ivo van Doornc9c3b1a2008-11-10 19:41:40 +010036 * Register access.
37 */
38int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
39 const unsigned int offset,
40 const struct rt2x00_field32 field,
41 u32 *reg)
42{
43 unsigned int i;
44
Alban Browaeysc70762f2009-12-04 23:46:57 +010045 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
46 return 0;
47
Ivo van Doornc9c3b1a2008-11-10 19:41:40 +010048 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
49 rt2x00pci_register_read(rt2x00dev, offset, reg);
50 if (!rt2x00_get_field32(*reg, field))
51 return 1;
52 udelay(REGISTER_BUSY_DELAY);
53 }
54
55 ERROR(rt2x00dev, "Indirect register access failed: "
56 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
57 *reg = ~0;
58
59 return 0;
60}
61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
62
63/*
Ivo van Doorn95ea3622007-09-25 17:57:13 -070064 * TX data handlers.
65 */
Helmut Schaa41086692010-04-15 09:13:13 +020066int rt2x00pci_write_tx_data(struct queue_entry *entry,
67 struct txentry_desc *txdesc)
Ivo van Doorn95ea3622007-09-25 17:57:13 -070068{
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010069 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070070
Ivo van Doorn6db37862008-06-06 22:50:28 +020071 /*
72 * This should not happen, we already checked the entry
73 * was ours. When the hardware disagrees there has been
74 * a queue corruption!
75 */
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010076 if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
77 ERROR(rt2x00dev,
Ivo van Doorn6db37862008-06-06 22:50:28 +020078 "Corrupt queue %d, accessing entry which is not ours.\n"
Ivo van Doorn95ea3622007-09-25 17:57:13 -070079 "Please file bug report to %s.\n",
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020080 entry->queue->qid, DRV_PROJECT);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070081 return -EINVAL;
82 }
83
Gertjan van Wingerdebaaffe62010-06-03 10:51:43 +020084 /*
Gertjan van Wingerde0b8004a2010-06-03 10:51:45 +020085 * Add the requested extra tx headroom in front of the skb.
86 */
87 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
88 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
89
90 /*
Gertjan van Wingerdebaaffe62010-06-03 10:51:43 +020091 * Call the driver's write_tx_datadesc function, if it exists.
92 */
93 if (rt2x00dev->ops->lib->write_tx_datadesc)
94 rt2x00dev->ops->lib->write_tx_datadesc(entry, txdesc);
95
Gertjan van Wingerde0b8004a2010-06-03 10:51:45 +020096 /*
97 * Map the skb to DMA.
98 */
99 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
100 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
101
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700102 return 0;
103}
104EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
105
106/*
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100107 * TX/RX data handlers.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700108 */
Gertjan van Wingerde0b8004a2010-06-03 10:51:45 +0200109void rt2x00pci_txdone(struct queue_entry *entry,
110 struct txdone_entry_desc *txdesc)
111{
112 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
113 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
114
115 /*
116 * Unmap the skb.
117 */
118 rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
119
120 /*
121 * Remove the extra tx headroom from the skb.
122 */
123 skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
124
125 /*
126 * Signal that the TX descriptor is no longer in the skb.
127 */
128 skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
129
130 /*
131 * Pass on to rt2x00lib.
132 */
133 rt2x00lib_txdone(entry, txdesc);
134}
135EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
136
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700137void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
138{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500139 struct data_queue *queue = rt2x00dev->rx;
140 struct queue_entry *entry;
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200141 struct queue_entry_priv_pci *entry_priv;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200142 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700143
144 while (1) {
Ivo van Doorn181d6902008-02-05 16:42:23 -0500145 entry = rt2x00queue_get_entry(queue, Q_INDEX);
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200146 entry_priv = entry->priv_data;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700147
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100148 if (rt2x00dev->ops->lib->get_entry_state(entry))
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700149 break;
150
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200151 /*
152 * Fill in desc fields of the skb descriptor
153 */
154 skbdesc = get_skb_frame_desc(entry->skb);
155 skbdesc->desc = entry_priv->desc;
156 skbdesc->desc_len = entry->queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700157
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200158 /*
159 * Send the frame to rt2x00lib for further processing.
160 */
161 rt2x00lib_rxdone(rt2x00dev, entry);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700162 }
163}
164EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
165
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700166/*
167 * Device initialization handlers.
168 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500169static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
170 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700171{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200172 struct queue_entry_priv_pci *entry_priv;
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100173 void *addr;
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100174 dma_addr_t dma;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700175 unsigned int i;
176
177 /*
178 * Allocate DMA memory for descriptor and buffer.
179 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200180 addr = dma_alloc_coherent(rt2x00dev->dev,
181 queue->limit * queue->desc_size,
182 &dma, GFP_KERNEL | GFP_DMA);
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100183 if (!addr)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700184 return -ENOMEM;
185
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200186 memset(addr, 0, queue->limit * queue->desc_size);
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100187
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700188 /*
Ivo van Doorn181d6902008-02-05 16:42:23 -0500189 * Initialize all queue entries to contain valid addresses.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700190 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500191 for (i = 0; i < queue->limit; i++) {
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200192 entry_priv = queue->entries[i].priv_data;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200193 entry_priv->desc = addr + i * queue->desc_size;
194 entry_priv->desc_dma = dma + i * queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700195 }
196
197 return 0;
198}
199
Ivo van Doorn181d6902008-02-05 16:42:23 -0500200static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
201 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700202{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200203 struct queue_entry_priv_pci *entry_priv =
204 queue->entries[0].priv_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500205
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200206 if (entry_priv->desc)
207 dma_free_coherent(rt2x00dev->dev,
208 queue->limit * queue->desc_size,
209 entry_priv->desc, entry_priv->desc_dma);
210 entry_priv->desc = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700211}
212
213int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
214{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500215 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700216 int status;
217
218 /*
219 * Allocate DMA
220 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500221 queue_for_each(rt2x00dev, queue) {
222 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700223 if (status)
224 goto exit;
225 }
226
227 /*
228 * Register interrupt handler.
229 */
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100230 status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler,
231 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700232 if (status) {
233 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100234 rt2x00dev->irq, status);
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200235 goto exit;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700236 }
237
238 return 0;
239
240exit:
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200241 queue_for_each(rt2x00dev, queue)
242 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700243
244 return status;
245}
246EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
247
248void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
249{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500250 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700251
252 /*
253 * Free irq line.
254 */
Helmut Schaa52a9bd22010-05-19 08:47:59 +0200255 free_irq(rt2x00dev->irq, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700256
257 /*
258 * Free DMA
259 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500260 queue_for_each(rt2x00dev, queue)
261 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700262}
263EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
264
265/*
266 * PCI driver handlers.
267 */
268static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
269{
270 kfree(rt2x00dev->rf);
271 rt2x00dev->rf = NULL;
272
273 kfree(rt2x00dev->eeprom);
274 rt2x00dev->eeprom = NULL;
275
Ivo van Doorn21795092008-02-10 22:49:13 +0100276 if (rt2x00dev->csr.base) {
277 iounmap(rt2x00dev->csr.base);
278 rt2x00dev->csr.base = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700279 }
280}
281
282static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
283{
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200284 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700285
Arjan van de Ven275f1652008-10-20 21:42:39 -0700286 rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
Ivo van Doorn21795092008-02-10 22:49:13 +0100287 if (!rt2x00dev->csr.base)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700288 goto exit;
289
290 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
291 if (!rt2x00dev->eeprom)
292 goto exit;
293
294 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
295 if (!rt2x00dev->rf)
296 goto exit;
297
298 return 0;
299
300exit:
301 ERROR_PROBE("Failed to allocate registers.\n");
302
303 rt2x00pci_free_reg(rt2x00dev);
304
305 return -ENOMEM;
306}
307
308int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
309{
310 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
311 struct ieee80211_hw *hw;
312 struct rt2x00_dev *rt2x00dev;
313 int retval;
314
315 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
316 if (retval) {
317 ERROR_PROBE("PCI request regions failed.\n");
318 return retval;
319 }
320
321 retval = pci_enable_device(pci_dev);
322 if (retval) {
323 ERROR_PROBE("Enable device failed.\n");
324 goto exit_release_regions;
325 }
326
327 pci_set_master(pci_dev);
328
329 if (pci_set_mwi(pci_dev))
330 ERROR_PROBE("MWI not available.\n");
331
Yang Hongyang284901a2009-04-06 19:01:15 -0700332 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700333 ERROR_PROBE("PCI DMA not supported.\n");
334 retval = -EIO;
335 goto exit_disable_device;
336 }
337
338 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
339 if (!hw) {
340 ERROR_PROBE("Failed to allocate hardware.\n");
341 retval = -ENOMEM;
342 goto exit_disable_device;
343 }
344
345 pci_set_drvdata(pci_dev, hw);
346
347 rt2x00dev = hw->priv;
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200348 rt2x00dev->dev = &pci_dev->dev;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700349 rt2x00dev->ops = ops;
350 rt2x00dev->hw = hw;
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100351 rt2x00dev->irq = pci_dev->irq;
352 rt2x00dev->name = pci_name(pci_dev);
353
Gertjan van Wingerde2015d192009-11-08 12:30:14 +0100354 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
355
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700356 retval = rt2x00pci_alloc_reg(rt2x00dev);
357 if (retval)
358 goto exit_free_device;
359
360 retval = rt2x00lib_probe_dev(rt2x00dev);
361 if (retval)
362 goto exit_free_reg;
363
364 return 0;
365
366exit_free_reg:
367 rt2x00pci_free_reg(rt2x00dev);
368
369exit_free_device:
370 ieee80211_free_hw(hw);
371
372exit_disable_device:
373 if (retval != -EBUSY)
374 pci_disable_device(pci_dev);
375
376exit_release_regions:
377 pci_release_regions(pci_dev);
378
379 pci_set_drvdata(pci_dev, NULL);
380
381 return retval;
382}
383EXPORT_SYMBOL_GPL(rt2x00pci_probe);
384
385void rt2x00pci_remove(struct pci_dev *pci_dev)
386{
387 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
388 struct rt2x00_dev *rt2x00dev = hw->priv;
389
390 /*
391 * Free all allocated data.
392 */
393 rt2x00lib_remove_dev(rt2x00dev);
394 rt2x00pci_free_reg(rt2x00dev);
395 ieee80211_free_hw(hw);
396
397 /*
398 * Free the PCI device data.
399 */
400 pci_set_drvdata(pci_dev, NULL);
401 pci_disable_device(pci_dev);
402 pci_release_regions(pci_dev);
403}
404EXPORT_SYMBOL_GPL(rt2x00pci_remove);
405
406#ifdef CONFIG_PM
407int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
408{
409 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
410 struct rt2x00_dev *rt2x00dev = hw->priv;
411 int retval;
412
413 retval = rt2x00lib_suspend(rt2x00dev, state);
414 if (retval)
415 return retval;
416
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700417 pci_save_state(pci_dev);
418 pci_disable_device(pci_dev);
419 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
420}
421EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
422
423int rt2x00pci_resume(struct pci_dev *pci_dev)
424{
425 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
426 struct rt2x00_dev *rt2x00dev = hw->priv;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700427
428 if (pci_set_power_state(pci_dev, PCI_D0) ||
429 pci_enable_device(pci_dev) ||
430 pci_restore_state(pci_dev)) {
431 ERROR(rt2x00dev, "Failed to resume device.\n");
432 return -EIO;
433 }
434
Ivo van Doorn499a2142009-03-28 20:51:58 +0100435 return rt2x00lib_resume(rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700436}
437EXPORT_SYMBOL_GPL(rt2x00pci_resume);
438#endif /* CONFIG_PM */
439
440/*
441 * rt2x00pci module information.
442 */
443MODULE_AUTHOR(DRV_PROJECT);
444MODULE_VERSION(DRV_VERSION);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500445MODULE_DESCRIPTION("rt2x00 pci library");
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700446MODULE_LICENSE("GPL");