blob: e33bd0f150c5ac3b712eaca98678a7555a8a7791 [file] [log] [blame]
Ivo van Doorn95ea3622007-09-25 17:57:13 -07001/*
Ivo van Doorn811aa9c2008-02-03 15:42:53 +01002 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
Ivo van Doorn95ea3622007-09-25 17:57:13 -07003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 */
25
Ivo van Doorn95ea3622007-09-25 17:57:13 -070026#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#include "rt2x00.h"
32#include "rt2x00pci.h"
33
34/*
Ivo van Doorn95ea3622007-09-25 17:57:13 -070035 * TX data handlers.
36 */
Ivo van Doorn6db37862008-06-06 22:50:28 +020037int rt2x00pci_write_tx_data(struct queue_entry *entry)
Ivo van Doorn95ea3622007-09-25 17:57:13 -070038{
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010039 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Ivo van Doornb8be63f2008-05-10 13:46:03 +020040 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -050041 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070042
Ivo van Doorn6db37862008-06-06 22:50:28 +020043 /*
44 * This should not happen, we already checked the entry
45 * was ours. When the hardware disagrees there has been
46 * a queue corruption!
47 */
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010048 if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
49 ERROR(rt2x00dev,
Ivo van Doorn6db37862008-06-06 22:50:28 +020050 "Corrupt queue %d, accessing entry which is not ours.\n"
Ivo van Doorn95ea3622007-09-25 17:57:13 -070051 "Please file bug report to %s.\n",
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020052 entry->queue->qid, DRV_PROJECT);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070053 return -EINVAL;
54 }
55
Ivo van Doorn08992f72008-01-24 01:56:25 -080056 /*
57 * Fill in skb descriptor
58 */
Ivo van Doorn6db37862008-06-06 22:50:28 +020059 skbdesc = get_skb_frame_desc(entry->skb);
Ivo van Doornb8be63f2008-05-10 13:46:03 +020060 skbdesc->desc = entry_priv->desc;
Ivo van Doorn6db37862008-06-06 22:50:28 +020061 skbdesc->desc_len = entry->queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070062
Ivo van Doorn95ea3622007-09-25 17:57:13 -070063 return 0;
64}
65EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
66
67/*
Ivo van Doorn3957ccb2007-11-12 15:02:40 +010068 * TX/RX data handlers.
Ivo van Doorn95ea3622007-09-25 17:57:13 -070069 */
70void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
71{
Ivo van Doorn181d6902008-02-05 16:42:23 -050072 struct data_queue *queue = rt2x00dev->rx;
73 struct queue_entry *entry;
Ivo van Doornb8be63f2008-05-10 13:46:03 +020074 struct queue_entry_priv_pci *entry_priv;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020075 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070076
77 while (1) {
Ivo van Doorn181d6902008-02-05 16:42:23 -050078 entry = rt2x00queue_get_entry(queue, Q_INDEX);
Ivo van Doornb8be63f2008-05-10 13:46:03 +020079 entry_priv = entry->priv_data;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070080
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010081 if (rt2x00dev->ops->lib->get_entry_state(entry))
Ivo van Doorn95ea3622007-09-25 17:57:13 -070082 break;
83
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020084 /*
85 * Fill in desc fields of the skb descriptor
86 */
87 skbdesc = get_skb_frame_desc(entry->skb);
88 skbdesc->desc = entry_priv->desc;
89 skbdesc->desc_len = entry->queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070090
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020091 /*
92 * Send the frame to rt2x00lib for further processing.
93 */
94 rt2x00lib_rxdone(rt2x00dev, entry);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070095 }
96}
97EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
98
Ivo van Doorn95ea3622007-09-25 17:57:13 -070099/*
100 * Device initialization handlers.
101 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500102static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
103 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700104{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200105 struct queue_entry_priv_pci *entry_priv;
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100106 void *addr;
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100107 dma_addr_t dma;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700108 unsigned int i;
109
110 /*
111 * Allocate DMA memory for descriptor and buffer.
112 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200113 addr = dma_alloc_coherent(rt2x00dev->dev,
114 queue->limit * queue->desc_size,
115 &dma, GFP_KERNEL | GFP_DMA);
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100116 if (!addr)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700117 return -ENOMEM;
118
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200119 memset(addr, 0, queue->limit * queue->desc_size);
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100120
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700121 /*
Ivo van Doorn181d6902008-02-05 16:42:23 -0500122 * Initialize all queue entries to contain valid addresses.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700123 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500124 for (i = 0; i < queue->limit; i++) {
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200125 entry_priv = queue->entries[i].priv_data;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200126 entry_priv->desc = addr + i * queue->desc_size;
127 entry_priv->desc_dma = dma + i * queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700128 }
129
130 return 0;
131}
132
Ivo van Doorn181d6902008-02-05 16:42:23 -0500133static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
134 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700135{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200136 struct queue_entry_priv_pci *entry_priv =
137 queue->entries[0].priv_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500138
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200139 if (entry_priv->desc)
140 dma_free_coherent(rt2x00dev->dev,
141 queue->limit * queue->desc_size,
142 entry_priv->desc, entry_priv->desc_dma);
143 entry_priv->desc = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700144}
145
146int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
147{
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200148 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500149 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700150 int status;
151
152 /*
153 * Allocate DMA
154 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500155 queue_for_each(rt2x00dev, queue) {
156 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700157 if (status)
158 goto exit;
159 }
160
161 /*
162 * Register interrupt handler.
163 */
164 status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
165 IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
166 if (status) {
167 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
168 pci_dev->irq, status);
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200169 goto exit;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700170 }
171
172 return 0;
173
174exit:
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200175 queue_for_each(rt2x00dev, queue)
176 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700177
178 return status;
179}
180EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
181
182void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
183{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500184 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700185
186 /*
187 * Free irq line.
188 */
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200189 free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700190
191 /*
192 * Free DMA
193 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500194 queue_for_each(rt2x00dev, queue)
195 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700196}
197EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
198
199/*
200 * PCI driver handlers.
201 */
202static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
203{
204 kfree(rt2x00dev->rf);
205 rt2x00dev->rf = NULL;
206
207 kfree(rt2x00dev->eeprom);
208 rt2x00dev->eeprom = NULL;
209
Ivo van Doorn21795092008-02-10 22:49:13 +0100210 if (rt2x00dev->csr.base) {
211 iounmap(rt2x00dev->csr.base);
212 rt2x00dev->csr.base = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700213 }
214}
215
216static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
217{
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200218 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700219
Arjan van de Ven275f1652008-10-20 21:42:39 -0700220 rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
Ivo van Doorn21795092008-02-10 22:49:13 +0100221 if (!rt2x00dev->csr.base)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700222 goto exit;
223
224 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
225 if (!rt2x00dev->eeprom)
226 goto exit;
227
228 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
229 if (!rt2x00dev->rf)
230 goto exit;
231
232 return 0;
233
234exit:
235 ERROR_PROBE("Failed to allocate registers.\n");
236
237 rt2x00pci_free_reg(rt2x00dev);
238
239 return -ENOMEM;
240}
241
242int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
243{
244 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
245 struct ieee80211_hw *hw;
246 struct rt2x00_dev *rt2x00dev;
247 int retval;
248
249 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
250 if (retval) {
251 ERROR_PROBE("PCI request regions failed.\n");
252 return retval;
253 }
254
255 retval = pci_enable_device(pci_dev);
256 if (retval) {
257 ERROR_PROBE("Enable device failed.\n");
258 goto exit_release_regions;
259 }
260
261 pci_set_master(pci_dev);
262
263 if (pci_set_mwi(pci_dev))
264 ERROR_PROBE("MWI not available.\n");
265
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200266 if (dma_set_mask(&pci_dev->dev, DMA_32BIT_MASK)) {
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700267 ERROR_PROBE("PCI DMA not supported.\n");
268 retval = -EIO;
269 goto exit_disable_device;
270 }
271
272 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
273 if (!hw) {
274 ERROR_PROBE("Failed to allocate hardware.\n");
275 retval = -ENOMEM;
276 goto exit_disable_device;
277 }
278
279 pci_set_drvdata(pci_dev, hw);
280
281 rt2x00dev = hw->priv;
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200282 rt2x00dev->dev = &pci_dev->dev;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700283 rt2x00dev->ops = ops;
284 rt2x00dev->hw = hw;
285
286 retval = rt2x00pci_alloc_reg(rt2x00dev);
287 if (retval)
288 goto exit_free_device;
289
290 retval = rt2x00lib_probe_dev(rt2x00dev);
291 if (retval)
292 goto exit_free_reg;
293
294 return 0;
295
296exit_free_reg:
297 rt2x00pci_free_reg(rt2x00dev);
298
299exit_free_device:
300 ieee80211_free_hw(hw);
301
302exit_disable_device:
303 if (retval != -EBUSY)
304 pci_disable_device(pci_dev);
305
306exit_release_regions:
307 pci_release_regions(pci_dev);
308
309 pci_set_drvdata(pci_dev, NULL);
310
311 return retval;
312}
313EXPORT_SYMBOL_GPL(rt2x00pci_probe);
314
315void rt2x00pci_remove(struct pci_dev *pci_dev)
316{
317 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
318 struct rt2x00_dev *rt2x00dev = hw->priv;
319
320 /*
321 * Free all allocated data.
322 */
323 rt2x00lib_remove_dev(rt2x00dev);
324 rt2x00pci_free_reg(rt2x00dev);
325 ieee80211_free_hw(hw);
326
327 /*
328 * Free the PCI device data.
329 */
330 pci_set_drvdata(pci_dev, NULL);
331 pci_disable_device(pci_dev);
332 pci_release_regions(pci_dev);
333}
334EXPORT_SYMBOL_GPL(rt2x00pci_remove);
335
336#ifdef CONFIG_PM
337int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
338{
339 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
340 struct rt2x00_dev *rt2x00dev = hw->priv;
341 int retval;
342
343 retval = rt2x00lib_suspend(rt2x00dev, state);
344 if (retval)
345 return retval;
346
347 rt2x00pci_free_reg(rt2x00dev);
348
349 pci_save_state(pci_dev);
350 pci_disable_device(pci_dev);
351 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
352}
353EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
354
355int rt2x00pci_resume(struct pci_dev *pci_dev)
356{
357 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
358 struct rt2x00_dev *rt2x00dev = hw->priv;
359 int retval;
360
361 if (pci_set_power_state(pci_dev, PCI_D0) ||
362 pci_enable_device(pci_dev) ||
363 pci_restore_state(pci_dev)) {
364 ERROR(rt2x00dev, "Failed to resume device.\n");
365 return -EIO;
366 }
367
368 retval = rt2x00pci_alloc_reg(rt2x00dev);
369 if (retval)
370 return retval;
371
372 retval = rt2x00lib_resume(rt2x00dev);
373 if (retval)
374 goto exit_free_reg;
375
376 return 0;
377
378exit_free_reg:
379 rt2x00pci_free_reg(rt2x00dev);
380
381 return retval;
382}
383EXPORT_SYMBOL_GPL(rt2x00pci_resume);
384#endif /* CONFIG_PM */
385
386/*
387 * rt2x00pci module information.
388 */
389MODULE_AUTHOR(DRV_PROJECT);
390MODULE_VERSION(DRV_VERSION);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500391MODULE_DESCRIPTION("rt2x00 pci library");
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700392MODULE_LICENSE("GPL");