blob: 28e6ff1a6694d8b428b53a6bc5b07a6c3167b2cd [file] [log] [blame]
Ivo van Doorn95ea3622007-09-25 17:57:13 -07001/*
Gertjan van Wingerde9c9a0d12009-11-08 16:39:55 +01002 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
Ivo van Doorn95ea3622007-09-25 17:57:13 -07003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 */
25
Ivo van Doorn95ea3622007-09-25 17:57:13 -070026#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Ivo van Doorn95ea3622007-09-25 17:57:13 -070031
32#include "rt2x00.h"
33#include "rt2x00pci.h"
34
35/*
Ivo van Doornc9c3b1a2008-11-10 19:41:40 +010036 * Register access.
37 */
38int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
39 const unsigned int offset,
40 const struct rt2x00_field32 field,
41 u32 *reg)
42{
43 unsigned int i;
44
Alban Browaeysc70762f2009-12-04 23:46:57 +010045 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
46 return 0;
47
Ivo van Doornc9c3b1a2008-11-10 19:41:40 +010048 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
49 rt2x00pci_register_read(rt2x00dev, offset, reg);
50 if (!rt2x00_get_field32(*reg, field))
51 return 1;
52 udelay(REGISTER_BUSY_DELAY);
53 }
54
55 ERROR(rt2x00dev, "Indirect register access failed: "
56 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
57 *reg = ~0;
58
59 return 0;
60}
61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
62
Ivo van Doorn95ea3622007-09-25 17:57:13 -070063void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
64{
Ivo van Doorn181d6902008-02-05 16:42:23 -050065 struct data_queue *queue = rt2x00dev->rx;
66 struct queue_entry *entry;
Ivo van Doornb8be63f2008-05-10 13:46:03 +020067 struct queue_entry_priv_pci *entry_priv;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020068 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070069
70 while (1) {
Ivo van Doorn181d6902008-02-05 16:42:23 -050071 entry = rt2x00queue_get_entry(queue, Q_INDEX);
Ivo van Doornb8be63f2008-05-10 13:46:03 +020072 entry_priv = entry->priv_data;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070073
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010074 if (rt2x00dev->ops->lib->get_entry_state(entry))
Ivo van Doorn95ea3622007-09-25 17:57:13 -070075 break;
76
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020077 /*
78 * Fill in desc fields of the skb descriptor
79 */
80 skbdesc = get_skb_frame_desc(entry->skb);
81 skbdesc->desc = entry_priv->desc;
82 skbdesc->desc_len = entry->queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070083
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020084 /*
Ivo van Doorn64e7d722010-12-13 12:36:00 +010085 * DMA is already done, notify rt2x00lib that
86 * it finished successfully.
87 */
88 rt2x00lib_dmastart(entry);
89 rt2x00lib_dmadone(entry);
90
91 /*
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020092 * Send the frame to rt2x00lib for further processing.
93 */
Ivo van Doornfa695602010-10-11 15:37:25 +020094 rt2x00lib_rxdone(entry);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070095 }
96}
97EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
98
Ivo van Doorn95ea3622007-09-25 17:57:13 -070099/*
100 * Device initialization handlers.
101 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500102static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
103 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700104{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200105 struct queue_entry_priv_pci *entry_priv;
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100106 void *addr;
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100107 dma_addr_t dma;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700108 unsigned int i;
109
110 /*
111 * Allocate DMA memory for descriptor and buffer.
112 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200113 addr = dma_alloc_coherent(rt2x00dev->dev,
114 queue->limit * queue->desc_size,
John W. Linvilleb9237572010-10-25 10:33:07 -0400115 &dma, GFP_KERNEL);
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100116 if (!addr)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700117 return -ENOMEM;
118
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200119 memset(addr, 0, queue->limit * queue->desc_size);
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100120
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700121 /*
Ivo van Doorn181d6902008-02-05 16:42:23 -0500122 * Initialize all queue entries to contain valid addresses.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700123 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500124 for (i = 0; i < queue->limit; i++) {
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200125 entry_priv = queue->entries[i].priv_data;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200126 entry_priv->desc = addr + i * queue->desc_size;
127 entry_priv->desc_dma = dma + i * queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700128 }
129
130 return 0;
131}
132
Ivo van Doorn181d6902008-02-05 16:42:23 -0500133static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
134 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700135{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200136 struct queue_entry_priv_pci *entry_priv =
137 queue->entries[0].priv_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500138
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200139 if (entry_priv->desc)
140 dma_free_coherent(rt2x00dev->dev,
141 queue->limit * queue->desc_size,
142 entry_priv->desc, entry_priv->desc_dma);
143 entry_priv->desc = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700144}
145
146int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
147{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500148 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700149 int status;
150
151 /*
152 * Allocate DMA
153 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500154 queue_for_each(rt2x00dev, queue) {
155 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700156 if (status)
157 goto exit;
158 }
159
160 /*
161 * Register interrupt handler.
162 */
Helmut Schaa78e256c2010-07-11 12:26:48 +0200163 status = request_threaded_irq(rt2x00dev->irq,
164 rt2x00dev->ops->lib->irq_handler,
165 rt2x00dev->ops->lib->irq_handler_thread,
166 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700167 if (status) {
168 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100169 rt2x00dev->irq, status);
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200170 goto exit;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700171 }
172
173 return 0;
174
175exit:
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200176 queue_for_each(rt2x00dev, queue)
177 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700178
179 return status;
180}
181EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
182
183void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
184{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500185 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700186
187 /*
188 * Free irq line.
189 */
Helmut Schaa52a9bd22010-05-19 08:47:59 +0200190 free_irq(rt2x00dev->irq, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700191
192 /*
193 * Free DMA
194 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500195 queue_for_each(rt2x00dev, queue)
196 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700197}
198EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
199
200/*
201 * PCI driver handlers.
202 */
203static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
204{
205 kfree(rt2x00dev->rf);
206 rt2x00dev->rf = NULL;
207
208 kfree(rt2x00dev->eeprom);
209 rt2x00dev->eeprom = NULL;
210
Ivo van Doorn21795092008-02-10 22:49:13 +0100211 if (rt2x00dev->csr.base) {
212 iounmap(rt2x00dev->csr.base);
213 rt2x00dev->csr.base = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700214 }
215}
216
217static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
218{
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200219 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700220
Arjan van de Ven275f1652008-10-20 21:42:39 -0700221 rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
Ivo van Doorn21795092008-02-10 22:49:13 +0100222 if (!rt2x00dev->csr.base)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700223 goto exit;
224
225 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
226 if (!rt2x00dev->eeprom)
227 goto exit;
228
229 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
230 if (!rt2x00dev->rf)
231 goto exit;
232
233 return 0;
234
235exit:
236 ERROR_PROBE("Failed to allocate registers.\n");
237
238 rt2x00pci_free_reg(rt2x00dev);
239
240 return -ENOMEM;
241}
242
243int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
244{
245 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
246 struct ieee80211_hw *hw;
247 struct rt2x00_dev *rt2x00dev;
248 int retval;
249
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700250 retval = pci_enable_device(pci_dev);
251 if (retval) {
252 ERROR_PROBE("Enable device failed.\n");
Kulikov Vasiliy47cb9052010-08-03 19:43:22 +0400253 return retval;
254 }
255
256 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
257 if (retval) {
258 ERROR_PROBE("PCI request regions failed.\n");
259 goto exit_disable_device;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700260 }
261
262 pci_set_master(pci_dev);
263
264 if (pci_set_mwi(pci_dev))
265 ERROR_PROBE("MWI not available.\n");
266
Yang Hongyang284901a2009-04-06 19:01:15 -0700267 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700268 ERROR_PROBE("PCI DMA not supported.\n");
269 retval = -EIO;
Kulikov Vasiliy47cb9052010-08-03 19:43:22 +0400270 goto exit_release_regions;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700271 }
272
273 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
274 if (!hw) {
275 ERROR_PROBE("Failed to allocate hardware.\n");
276 retval = -ENOMEM;
Kulikov Vasiliy47cb9052010-08-03 19:43:22 +0400277 goto exit_release_regions;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700278 }
279
280 pci_set_drvdata(pci_dev, hw);
281
282 rt2x00dev = hw->priv;
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200283 rt2x00dev->dev = &pci_dev->dev;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700284 rt2x00dev->ops = ops;
285 rt2x00dev->hw = hw;
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100286 rt2x00dev->irq = pci_dev->irq;
287 rt2x00dev->name = pci_name(pci_dev);
288
Gertjan van Wingerde6e1fdd12010-06-03 10:52:00 +0200289 if (pci_dev->is_pcie)
290 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
291 else
292 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
Gertjan van Wingerde2015d192009-11-08 12:30:14 +0100293
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700294 retval = rt2x00pci_alloc_reg(rt2x00dev);
295 if (retval)
296 goto exit_free_device;
297
298 retval = rt2x00lib_probe_dev(rt2x00dev);
299 if (retval)
300 goto exit_free_reg;
301
302 return 0;
303
304exit_free_reg:
305 rt2x00pci_free_reg(rt2x00dev);
306
307exit_free_device:
308 ieee80211_free_hw(hw);
309
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700310exit_release_regions:
311 pci_release_regions(pci_dev);
312
Kulikov Vasiliy47cb9052010-08-03 19:43:22 +0400313exit_disable_device:
314 pci_disable_device(pci_dev);
315
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700316 pci_set_drvdata(pci_dev, NULL);
317
318 return retval;
319}
320EXPORT_SYMBOL_GPL(rt2x00pci_probe);
321
322void rt2x00pci_remove(struct pci_dev *pci_dev)
323{
324 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
325 struct rt2x00_dev *rt2x00dev = hw->priv;
326
327 /*
328 * Free all allocated data.
329 */
330 rt2x00lib_remove_dev(rt2x00dev);
331 rt2x00pci_free_reg(rt2x00dev);
332 ieee80211_free_hw(hw);
333
334 /*
335 * Free the PCI device data.
336 */
337 pci_set_drvdata(pci_dev, NULL);
338 pci_disable_device(pci_dev);
339 pci_release_regions(pci_dev);
340}
341EXPORT_SYMBOL_GPL(rt2x00pci_remove);
342
343#ifdef CONFIG_PM
344int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
345{
346 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
347 struct rt2x00_dev *rt2x00dev = hw->priv;
348 int retval;
349
350 retval = rt2x00lib_suspend(rt2x00dev, state);
351 if (retval)
352 return retval;
353
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700354 pci_save_state(pci_dev);
355 pci_disable_device(pci_dev);
356 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
357}
358EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
359
360int rt2x00pci_resume(struct pci_dev *pci_dev)
361{
362 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
363 struct rt2x00_dev *rt2x00dev = hw->priv;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700364
365 if (pci_set_power_state(pci_dev, PCI_D0) ||
366 pci_enable_device(pci_dev) ||
367 pci_restore_state(pci_dev)) {
368 ERROR(rt2x00dev, "Failed to resume device.\n");
369 return -EIO;
370 }
371
Ivo van Doorn499a2142009-03-28 20:51:58 +0100372 return rt2x00lib_resume(rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700373}
374EXPORT_SYMBOL_GPL(rt2x00pci_resume);
375#endif /* CONFIG_PM */
376
377/*
378 * rt2x00pci module information.
379 */
380MODULE_AUTHOR(DRV_PROJECT);
381MODULE_VERSION(DRV_VERSION);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500382MODULE_DESCRIPTION("rt2x00 pci library");
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700383MODULE_LICENSE("GPL");