blob: 801be436cf1d1bf8f01f16bfd33c84a1ca72641e [file] [log] [blame]
Ivo van Doorn95ea3622007-09-25 17:57:13 -07001/*
Gertjan van Wingerde9c9a0d12009-11-08 16:39:55 +01002 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
Ivo van Doorn95ea3622007-09-25 17:57:13 -07003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 */
25
Ivo van Doorn95ea3622007-09-25 17:57:13 -070026#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#include "rt2x00.h"
32#include "rt2x00pci.h"
33
34/*
Ivo van Doornc9c3b1a2008-11-10 19:41:40 +010035 * Register access.
36 */
37int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
38 const unsigned int offset,
39 const struct rt2x00_field32 field,
40 u32 *reg)
41{
42 unsigned int i;
43
Alban Browaeysc70762f2009-12-04 23:46:57 +010044 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
Ivo van Doornc9c3b1a2008-11-10 19:41:40 +010047 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
48 rt2x00pci_register_read(rt2x00dev, offset, reg);
49 if (!rt2x00_get_field32(*reg, field))
50 return 1;
51 udelay(REGISTER_BUSY_DELAY);
52 }
53
54 ERROR(rt2x00dev, "Indirect register access failed: "
55 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
56 *reg = ~0;
57
58 return 0;
59}
60EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
61
62/*
Ivo van Doorn95ea3622007-09-25 17:57:13 -070063 * TX data handlers.
64 */
Ivo van Doorn6db37862008-06-06 22:50:28 +020065int rt2x00pci_write_tx_data(struct queue_entry *entry)
Ivo van Doorn95ea3622007-09-25 17:57:13 -070066{
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010067 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Ivo van Doornb8be63f2008-05-10 13:46:03 +020068 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -050069 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070070
Ivo van Doorn6db37862008-06-06 22:50:28 +020071 /*
72 * This should not happen, we already checked the entry
73 * was ours. When the hardware disagrees there has been
74 * a queue corruption!
75 */
Ivo van Doorn798b7ad2008-11-08 15:25:33 +010076 if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
77 ERROR(rt2x00dev,
Ivo van Doorn6db37862008-06-06 22:50:28 +020078 "Corrupt queue %d, accessing entry which is not ours.\n"
Ivo van Doorn95ea3622007-09-25 17:57:13 -070079 "Please file bug report to %s.\n",
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020080 entry->queue->qid, DRV_PROJECT);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070081 return -EINVAL;
82 }
83
Ivo van Doorn08992f72008-01-24 01:56:25 -080084 /*
85 * Fill in skb descriptor
86 */
Ivo van Doorn6db37862008-06-06 22:50:28 +020087 skbdesc = get_skb_frame_desc(entry->skb);
Ivo van Doornb8be63f2008-05-10 13:46:03 +020088 skbdesc->desc = entry_priv->desc;
Ivo van Doorn6db37862008-06-06 22:50:28 +020089 skbdesc->desc_len = entry->queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070090
Ivo van Doorn95ea3622007-09-25 17:57:13 -070091 return 0;
92}
93EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
94
95/*
Ivo van Doorn3957ccb2007-11-12 15:02:40 +010096 * TX/RX data handlers.
Ivo van Doorn95ea3622007-09-25 17:57:13 -070097 */
98void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
99{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500100 struct data_queue *queue = rt2x00dev->rx;
101 struct queue_entry *entry;
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200102 struct queue_entry_priv_pci *entry_priv;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200103 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700104
105 while (1) {
Ivo van Doorn181d6902008-02-05 16:42:23 -0500106 entry = rt2x00queue_get_entry(queue, Q_INDEX);
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200107 entry_priv = entry->priv_data;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700108
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100109 if (rt2x00dev->ops->lib->get_entry_state(entry))
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700110 break;
111
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200112 /*
113 * Fill in desc fields of the skb descriptor
114 */
115 skbdesc = get_skb_frame_desc(entry->skb);
116 skbdesc->desc = entry_priv->desc;
117 skbdesc->desc_len = entry->queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700118
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200119 /*
120 * Send the frame to rt2x00lib for further processing.
121 */
122 rt2x00lib_rxdone(rt2x00dev, entry);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700123 }
124}
125EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
126
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700127/*
128 * Device initialization handlers.
129 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500130static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
131 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700132{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200133 struct queue_entry_priv_pci *entry_priv;
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100134 void *addr;
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100135 dma_addr_t dma;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700136 unsigned int i;
137
138 /*
139 * Allocate DMA memory for descriptor and buffer.
140 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200141 addr = dma_alloc_coherent(rt2x00dev->dev,
142 queue->limit * queue->desc_size,
143 &dma, GFP_KERNEL | GFP_DMA);
Ivo van Doorn30b3a232008-02-17 17:33:24 +0100144 if (!addr)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700145 return -ENOMEM;
146
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200147 memset(addr, 0, queue->limit * queue->desc_size);
Ivo van Doorn9c9dd2c2008-02-10 22:46:52 +0100148
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700149 /*
Ivo van Doorn181d6902008-02-05 16:42:23 -0500150 * Initialize all queue entries to contain valid addresses.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700151 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500152 for (i = 0; i < queue->limit; i++) {
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200153 entry_priv = queue->entries[i].priv_data;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200154 entry_priv->desc = addr + i * queue->desc_size;
155 entry_priv->desc_dma = dma + i * queue->desc_size;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700156 }
157
158 return 0;
159}
160
Ivo van Doorn181d6902008-02-05 16:42:23 -0500161static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
162 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700163{
Ivo van Doornb8be63f2008-05-10 13:46:03 +0200164 struct queue_entry_priv_pci *entry_priv =
165 queue->entries[0].priv_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500166
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200167 if (entry_priv->desc)
168 dma_free_coherent(rt2x00dev->dev,
169 queue->limit * queue->desc_size,
170 entry_priv->desc, entry_priv->desc_dma);
171 entry_priv->desc = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700172}
173
174int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
175{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500176 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700177 int status;
178
179 /*
180 * Allocate DMA
181 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500182 queue_for_each(rt2x00dev, queue) {
183 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700184 if (status)
185 goto exit;
186 }
187
188 /*
189 * Register interrupt handler.
190 */
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100191 status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler,
192 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700193 if (status) {
194 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100195 rt2x00dev->irq, status);
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200196 goto exit;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700197 }
198
199 return 0;
200
201exit:
Ivo van Doornb30cdfc2008-05-05 17:24:03 +0200202 queue_for_each(rt2x00dev, queue)
203 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700204
205 return status;
206}
207EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
208
209void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
210{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500211 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700212
213 /*
214 * Free irq line.
215 */
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200216 free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700217
218 /*
219 * Free DMA
220 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500221 queue_for_each(rt2x00dev, queue)
222 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700223}
224EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
225
226/*
227 * PCI driver handlers.
228 */
229static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
230{
231 kfree(rt2x00dev->rf);
232 rt2x00dev->rf = NULL;
233
234 kfree(rt2x00dev->eeprom);
235 rt2x00dev->eeprom = NULL;
236
Ivo van Doorn21795092008-02-10 22:49:13 +0100237 if (rt2x00dev->csr.base) {
238 iounmap(rt2x00dev->csr.base);
239 rt2x00dev->csr.base = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700240 }
241}
242
243static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
244{
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200245 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700246
Arjan van de Ven275f1652008-10-20 21:42:39 -0700247 rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
Ivo van Doorn21795092008-02-10 22:49:13 +0100248 if (!rt2x00dev->csr.base)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700249 goto exit;
250
251 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
252 if (!rt2x00dev->eeprom)
253 goto exit;
254
255 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
256 if (!rt2x00dev->rf)
257 goto exit;
258
259 return 0;
260
261exit:
262 ERROR_PROBE("Failed to allocate registers.\n");
263
264 rt2x00pci_free_reg(rt2x00dev);
265
266 return -ENOMEM;
267}
268
269int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
270{
271 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
272 struct ieee80211_hw *hw;
273 struct rt2x00_dev *rt2x00dev;
274 int retval;
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100275 u16 chip;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700276
277 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
278 if (retval) {
279 ERROR_PROBE("PCI request regions failed.\n");
280 return retval;
281 }
282
283 retval = pci_enable_device(pci_dev);
284 if (retval) {
285 ERROR_PROBE("Enable device failed.\n");
286 goto exit_release_regions;
287 }
288
289 pci_set_master(pci_dev);
290
291 if (pci_set_mwi(pci_dev))
292 ERROR_PROBE("MWI not available.\n");
293
Yang Hongyang284901a2009-04-06 19:01:15 -0700294 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700295 ERROR_PROBE("PCI DMA not supported.\n");
296 retval = -EIO;
297 goto exit_disable_device;
298 }
299
300 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
301 if (!hw) {
302 ERROR_PROBE("Failed to allocate hardware.\n");
303 retval = -ENOMEM;
304 goto exit_disable_device;
305 }
306
307 pci_set_drvdata(pci_dev, hw);
308
309 rt2x00dev = hw->priv;
Gertjan van Wingerde14a3bf82008-06-16 19:55:43 +0200310 rt2x00dev->dev = &pci_dev->dev;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700311 rt2x00dev->ops = ops;
312 rt2x00dev->hw = hw;
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100313 rt2x00dev->irq = pci_dev->irq;
314 rt2x00dev->name = pci_name(pci_dev);
315
Gertjan van Wingerde2015d192009-11-08 12:30:14 +0100316 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
317
Ivo van Doorn440ddad2009-03-28 20:51:24 +0100318 /*
319 * Determine RT chipset by reading PCI header.
320 */
321 pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
322 rt2x00_set_chip_rt(rt2x00dev, chip);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700323
324 retval = rt2x00pci_alloc_reg(rt2x00dev);
325 if (retval)
326 goto exit_free_device;
327
328 retval = rt2x00lib_probe_dev(rt2x00dev);
329 if (retval)
330 goto exit_free_reg;
331
332 return 0;
333
334exit_free_reg:
335 rt2x00pci_free_reg(rt2x00dev);
336
337exit_free_device:
338 ieee80211_free_hw(hw);
339
340exit_disable_device:
341 if (retval != -EBUSY)
342 pci_disable_device(pci_dev);
343
344exit_release_regions:
345 pci_release_regions(pci_dev);
346
347 pci_set_drvdata(pci_dev, NULL);
348
349 return retval;
350}
351EXPORT_SYMBOL_GPL(rt2x00pci_probe);
352
353void rt2x00pci_remove(struct pci_dev *pci_dev)
354{
355 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
356 struct rt2x00_dev *rt2x00dev = hw->priv;
357
358 /*
359 * Free all allocated data.
360 */
361 rt2x00lib_remove_dev(rt2x00dev);
362 rt2x00pci_free_reg(rt2x00dev);
363 ieee80211_free_hw(hw);
364
365 /*
366 * Free the PCI device data.
367 */
368 pci_set_drvdata(pci_dev, NULL);
369 pci_disable_device(pci_dev);
370 pci_release_regions(pci_dev);
371}
372EXPORT_SYMBOL_GPL(rt2x00pci_remove);
373
374#ifdef CONFIG_PM
375int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
376{
377 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
378 struct rt2x00_dev *rt2x00dev = hw->priv;
379 int retval;
380
381 retval = rt2x00lib_suspend(rt2x00dev, state);
382 if (retval)
383 return retval;
384
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700385 pci_save_state(pci_dev);
386 pci_disable_device(pci_dev);
387 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
388}
389EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
390
391int rt2x00pci_resume(struct pci_dev *pci_dev)
392{
393 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
394 struct rt2x00_dev *rt2x00dev = hw->priv;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700395
396 if (pci_set_power_state(pci_dev, PCI_D0) ||
397 pci_enable_device(pci_dev) ||
398 pci_restore_state(pci_dev)) {
399 ERROR(rt2x00dev, "Failed to resume device.\n");
400 return -EIO;
401 }
402
Ivo van Doorn499a2142009-03-28 20:51:58 +0100403 return rt2x00lib_resume(rt2x00dev);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700404}
405EXPORT_SYMBOL_GPL(rt2x00pci_resume);
406#endif /* CONFIG_PM */
407
408/*
409 * rt2x00pci module information.
410 */
411MODULE_AUTHOR(DRV_PROJECT);
412MODULE_VERSION(DRV_VERSION);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500413MODULE_DESCRIPTION("rt2x00 pci library");
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700414MODULE_LICENSE("GPL");