blob: cae446bf212947c715178daff848dd85562d8ebc [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
Vasanthakumar Thiagarajan1b2df402012-02-06 20:15:53 +05303 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
Kalle Valobdcd8172011-07-18 00:22:30 +03004 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040018#include <linux/module.h>
Kalle Valobdcd8172011-07-18 00:22:30 +030019#include <linux/mmc/card.h>
20#include <linux/mmc/mmc.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/sdio_func.h>
23#include <linux/mmc/sdio_ids.h>
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sd.h>
Kalle Valo2e1cb232011-10-05 12:23:49 +030026#include "hif.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030027#include "hif-ops.h"
28#include "target.h"
29#include "debug.h"
Vivek Natarajan9df337a2011-09-15 20:30:43 +053030#include "cfg80211.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030031
32struct ath6kl_sdio {
33 struct sdio_func *func;
34
35 spinlock_t lock;
36
37 /* free list */
38 struct list_head bus_req_freeq;
39
40 /* available bus requests */
41 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
42
43 struct ath6kl *ar;
Raja Manifdb28582011-11-21 12:26:51 +053044
Kalle Valobdcd8172011-07-18 00:22:30 +030045 u8 *dma_buffer;
46
Raja Manifdb28582011-11-21 12:26:51 +053047 /* protects access to dma_buffer */
48 struct mutex dma_buffer_mutex;
49
Kalle Valobdcd8172011-07-18 00:22:30 +030050 /* scatter request list head */
51 struct list_head scat_req;
52
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +053053 /* Avoids disabling irq while the interrupts being handled */
54 struct mutex mtx_irq;
55
Kalle Valobdcd8172011-07-18 00:22:30 +030056 spinlock_t scat_lock;
Kalle Valo32a07e42011-10-30 21:15:57 +020057 bool scatter_enabled;
58
Kalle Valobdcd8172011-07-18 00:22:30 +030059 bool is_disabled;
Kalle Valobdcd8172011-07-18 00:22:30 +030060 const struct sdio_device_id *id;
61 struct work_struct wr_async_work;
62 struct list_head wr_asyncq;
63 spinlock_t wr_async_lock;
64};
65
66#define CMD53_ARG_READ 0
67#define CMD53_ARG_WRITE 1
68#define CMD53_ARG_BLOCK_BASIS 1
69#define CMD53_ARG_FIXED_ADDRESS 0
70#define CMD53_ARG_INCR_ADDRESS 1
71
72static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
73{
74 return ar->hif_priv;
75}
76
77/*
78 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
79 * Most host controllers assume the buffer is DMA'able and will
80 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
81 * check fails on stack memory.
82 */
83static inline bool buf_needs_bounce(u8 *buf)
84{
85 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
86}
87
88static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
89{
90 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
91
92 /* EP1 has an extended range */
93 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
94 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
95 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
96 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
97 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
98 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
99}
100
101static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
102 u8 mode, u8 opcode, u32 addr,
103 u16 blksz)
104{
105 *arg = (((rw & 1) << 31) |
106 ((func & 0x7) << 28) |
107 ((mode & 1) << 27) |
108 ((opcode & 1) << 26) |
109 ((addr & 0x1FFFF) << 9) |
110 (blksz & 0x1FF));
111}
112
113static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
114 unsigned int address,
115 unsigned char val)
116{
117 const u8 func = 0;
118
119 *arg = ((write & 1) << 31) |
120 ((func & 0x7) << 28) |
121 ((raw & 1) << 27) |
122 (1 << 26) |
123 ((address & 0x1FFFF) << 9) |
124 (1 << 8) |
125 (val & 0xFF);
126}
127
128static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
129 unsigned int address,
130 unsigned char byte)
131{
132 struct mmc_command io_cmd;
133
134 memset(&io_cmd, 0, sizeof(io_cmd));
135 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
136 io_cmd.opcode = SD_IO_RW_DIRECT;
137 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
138
139 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
140}
141
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530142static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
143 u8 *buf, u32 len)
144{
145 int ret = 0;
146
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530147 sdio_claim_host(func);
148
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530149 if (request & HIF_WRITE) {
Kalle Valof7325b82011-09-27 14:30:58 +0300150 /* FIXME: looks like ugly workaround for something */
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530151 if (addr >= HIF_MBOX_BASE_ADDR &&
152 addr <= HIF_MBOX_END_ADDR)
153 addr += (HIF_MBOX_WIDTH - len);
154
Kalle Valof7325b82011-09-27 14:30:58 +0300155 /* FIXME: this also looks like ugly workaround */
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530156 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
157 addr += HIF_MBOX0_EXT_WIDTH - len;
158
159 if (request & HIF_FIXED_ADDRESS)
160 ret = sdio_writesb(func, addr, buf, len);
161 else
162 ret = sdio_memcpy_toio(func, addr, buf, len);
163 } else {
164 if (request & HIF_FIXED_ADDRESS)
165 ret = sdio_readsb(func, buf, addr, len);
166 else
167 ret = sdio_memcpy_fromio(func, buf, addr, len);
168 }
169
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530170 sdio_release_host(func);
171
Kalle Valof7325b82011-09-27 14:30:58 +0300172 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
173 request & HIF_WRITE ? "wr" : "rd", addr,
174 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
175 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
176
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530177 return ret;
178}
179
Kalle Valobdcd8172011-07-18 00:22:30 +0300180static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
181{
182 struct bus_request *bus_req;
Kalle Valobdcd8172011-07-18 00:22:30 +0300183
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530184 spin_lock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300185
186 if (list_empty(&ar_sdio->bus_req_freeq)) {
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530187 spin_unlock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300188 return NULL;
189 }
190
191 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
192 struct bus_request, list);
193 list_del(&bus_req->list);
194
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530195 spin_unlock_bh(&ar_sdio->lock);
Kalle Valof7325b82011-09-27 14:30:58 +0300196 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
197 __func__, bus_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300198
199 return bus_req;
200}
201
202static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
203 struct bus_request *bus_req)
204{
Kalle Valof7325b82011-09-27 14:30:58 +0300205 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
206 __func__, bus_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300207
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530208 spin_lock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300209 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530210 spin_unlock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300211}
212
213static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
Kalle Valobdcd8172011-07-18 00:22:30 +0300214 struct mmc_data *data)
215{
216 struct scatterlist *sg;
217 int i;
218
219 data->blksz = HIF_MBOX_BLOCK_SIZE;
220 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
221
222 ath6kl_dbg(ATH6KL_DBG_SCATTER,
223 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
224 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
225 data->blksz, data->blocks, scat_req->len,
226 scat_req->scat_entries);
227
228 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
229 MMC_DATA_READ;
230
231 /* fill SG entries */
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530232 sg = scat_req->sgentries;
Kalle Valobdcd8172011-07-18 00:22:30 +0300233 sg_init_table(sg, scat_req->scat_entries);
234
235 /* assemble SG list */
236 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300237 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
238 i, scat_req->scat_list[i].buf,
239 scat_req->scat_list[i].len);
240
241 sg_set_buf(sg, scat_req->scat_list[i].buf,
242 scat_req->scat_list[i].len);
243 }
244
245 /* set scatter-gather table for request */
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530246 data->sg = scat_req->sgentries;
Kalle Valobdcd8172011-07-18 00:22:30 +0300247 data->sg_len = scat_req->scat_entries;
248}
249
250static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
251 struct bus_request *req)
252{
253 struct mmc_request mmc_req;
254 struct mmc_command cmd;
255 struct mmc_data data;
256 struct hif_scatter_req *scat_req;
257 u8 opcode, rw;
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530258 int status, len;
Kalle Valobdcd8172011-07-18 00:22:30 +0300259
260 scat_req = req->scat_req;
261
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530262 if (scat_req->virt_scat) {
263 len = scat_req->len;
264 if (scat_req->req & HIF_BLOCK_BASIS)
265 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
266
267 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
268 scat_req->addr, scat_req->virt_dma_buf,
269 len);
270 goto scat_complete;
271 }
272
Kalle Valobdcd8172011-07-18 00:22:30 +0300273 memset(&mmc_req, 0, sizeof(struct mmc_request));
274 memset(&cmd, 0, sizeof(struct mmc_command));
275 memset(&data, 0, sizeof(struct mmc_data));
276
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530277 ath6kl_sdio_setup_scat_data(scat_req, &data);
Kalle Valobdcd8172011-07-18 00:22:30 +0300278
279 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
280 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
281
282 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
283
284 /* Fixup the address so that the last byte will fall on MBOX EOM */
285 if (scat_req->req & HIF_WRITE) {
286 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
287 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
288 else
289 /* Uses extended address range */
290 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
291 }
292
293 /* set command argument */
294 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
295 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
296 data.blocks);
297
298 cmd.opcode = SD_IO_RW_EXTENDED;
299 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
300
301 mmc_req.cmd = &cmd;
302 mmc_req.data = &data;
303
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530304 sdio_claim_host(ar_sdio->func);
305
Kalle Valobdcd8172011-07-18 00:22:30 +0300306 mmc_set_data_timeout(&data, ar_sdio->func->card);
307 /* synchronous call to process request */
308 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
309
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530310 sdio_release_host(ar_sdio->func);
311
Kalle Valobdcd8172011-07-18 00:22:30 +0300312 status = cmd.error ? cmd.error : data.error;
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530313
314scat_complete:
Kalle Valobdcd8172011-07-18 00:22:30 +0300315 scat_req->status = status;
316
317 if (scat_req->status)
318 ath6kl_err("Scatter write request failed:%d\n",
319 scat_req->status);
320
321 if (scat_req->req & HIF_ASYNCHRONOUS)
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530322 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300323
324 return status;
325}
326
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530327static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
328 int n_scat_entry, int n_scat_req,
329 bool virt_scat)
330{
331 struct hif_scatter_req *s_req;
332 struct bus_request *bus_req;
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530333 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
334 u8 *virt_buf;
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530335
336 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
337 scat_req_sz = sizeof(*s_req) + scat_list_sz;
338
339 if (!virt_scat)
340 sg_sz = sizeof(struct scatterlist) * n_scat_entry;
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530341 else
342 buf_sz = 2 * L1_CACHE_BYTES +
343 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530344
345 for (i = 0; i < n_scat_req; i++) {
346 /* allocate the scatter request */
347 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
348 if (!s_req)
349 return -ENOMEM;
350
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530351 if (virt_scat) {
352 virt_buf = kzalloc(buf_sz, GFP_KERNEL);
353 if (!virt_buf) {
354 kfree(s_req);
355 return -ENOMEM;
356 }
357
358 s_req->virt_dma_buf =
359 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
360 } else {
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530361 /* allocate sglist */
362 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
363
364 if (!s_req->sgentries) {
365 kfree(s_req);
366 return -ENOMEM;
367 }
368 }
369
370 /* allocate a bus request for this scatter request */
371 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
372 if (!bus_req) {
373 kfree(s_req->sgentries);
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530374 kfree(s_req->virt_dma_buf);
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530375 kfree(s_req);
376 return -ENOMEM;
377 }
378
379 /* assign the scatter request to this bus request */
380 bus_req->scat_req = s_req;
381 s_req->busrequest = bus_req;
382
Vasanthakumar Thiagarajan4a005c32011-07-16 20:29:15 +0530383 s_req->virt_scat = virt_scat;
384
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530385 /* add it to the scatter pool */
386 hif_scatter_req_add(ar_sdio->ar, s_req);
387 }
388
389 return 0;
390}
391
Kalle Valobdcd8172011-07-18 00:22:30 +0300392static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
393 u32 len, u32 request)
394{
395 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
396 u8 *tbuf = NULL;
397 int ret;
398 bool bounced = false;
399
400 if (request & HIF_BLOCK_BASIS)
401 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
402
403 if (buf_needs_bounce(buf)) {
404 if (!ar_sdio->dma_buffer)
405 return -ENOMEM;
Raja Manifdb28582011-11-21 12:26:51 +0530406 mutex_lock(&ar_sdio->dma_buffer_mutex);
Kalle Valobdcd8172011-07-18 00:22:30 +0300407 tbuf = ar_sdio->dma_buffer;
408 memcpy(tbuf, buf, len);
409 bounced = true;
410 } else
411 tbuf = buf;
412
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530413 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
414 if ((request & HIF_READ) && bounced)
415 memcpy(buf, tbuf, len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300416
Raja Manifdb28582011-11-21 12:26:51 +0530417 if (bounced)
418 mutex_unlock(&ar_sdio->dma_buffer_mutex);
419
Kalle Valobdcd8172011-07-18 00:22:30 +0300420 return ret;
421}
422
423static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
424 struct bus_request *req)
425{
426 if (req->scat_req)
427 ath6kl_sdio_scat_rw(ar_sdio, req);
428 else {
429 void *context;
430 int status;
431
432 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
433 req->buffer, req->length,
434 req->request);
435 context = req->packet;
436 ath6kl_sdio_free_bus_req(ar_sdio, req);
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300437 ath6kl_hif_rw_comp_handler(context, status);
Kalle Valobdcd8172011-07-18 00:22:30 +0300438 }
439}
440
441static void ath6kl_sdio_write_async_work(struct work_struct *work)
442{
443 struct ath6kl_sdio *ar_sdio;
Kalle Valobdcd8172011-07-18 00:22:30 +0300444 struct bus_request *req, *tmp_req;
445
446 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
Kalle Valobdcd8172011-07-18 00:22:30 +0300447
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530448 spin_lock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300449 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
450 list_del(&req->list);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530451 spin_unlock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300452 __ath6kl_sdio_write_async(ar_sdio, req);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530453 spin_lock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300454 }
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530455 spin_unlock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300456}
457
458static void ath6kl_sdio_irq_handler(struct sdio_func *func)
459{
460 int status;
461 struct ath6kl_sdio *ar_sdio;
462
Kalle Valof7325b82011-09-27 14:30:58 +0300463 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
464
Kalle Valobdcd8172011-07-18 00:22:30 +0300465 ar_sdio = sdio_get_drvdata(func);
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +0530466 mutex_lock(&ar_sdio->mtx_irq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300467 /*
468 * Release the host during interrups so we can pick it back up when
469 * we process commands.
470 */
471 sdio_release_host(ar_sdio->func);
472
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300473 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300474 sdio_claim_host(ar_sdio->func);
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +0530475 mutex_unlock(&ar_sdio->mtx_irq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300476 WARN_ON(status && status != -ECANCELED);
477}
478
Kalle Valob2e75692011-10-27 18:48:14 +0300479static int ath6kl_sdio_power_on(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +0300480{
Kalle Valob2e75692011-10-27 18:48:14 +0300481 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300482 struct sdio_func *func = ar_sdio->func;
483 int ret = 0;
484
485 if (!ar_sdio->is_disabled)
486 return 0;
487
Kalle Valo3ef987b2011-10-24 12:18:07 +0300488 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
489
Kalle Valobdcd8172011-07-18 00:22:30 +0300490 sdio_claim_host(func);
491
492 ret = sdio_enable_func(func);
493 if (ret) {
494 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
495 sdio_release_host(func);
496 return ret;
497 }
498
499 sdio_release_host(func);
500
501 /*
502 * Wait for hardware to initialise. It should take a lot less than
503 * 10 ms but let's be conservative here.
504 */
505 msleep(10);
506
507 ar_sdio->is_disabled = false;
508
509 return ret;
510}
511
Kalle Valob2e75692011-10-27 18:48:14 +0300512static int ath6kl_sdio_power_off(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +0300513{
Kalle Valob2e75692011-10-27 18:48:14 +0300514 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300515 int ret;
516
517 if (ar_sdio->is_disabled)
518 return 0;
519
Kalle Valo3ef987b2011-10-24 12:18:07 +0300520 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
521
Kalle Valobdcd8172011-07-18 00:22:30 +0300522 /* Disable the card */
523 sdio_claim_host(ar_sdio->func);
524 ret = sdio_disable_func(ar_sdio->func);
525 sdio_release_host(ar_sdio->func);
526
527 if (ret)
528 return ret;
529
530 ar_sdio->is_disabled = true;
531
532 return ret;
533}
534
535static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
536 u32 length, u32 request,
537 struct htc_packet *packet)
538{
539 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
540 struct bus_request *bus_req;
Kalle Valobdcd8172011-07-18 00:22:30 +0300541
542 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
543
544 if (!bus_req)
545 return -ENOMEM;
546
547 bus_req->address = address;
548 bus_req->buffer = buffer;
549 bus_req->length = length;
550 bus_req->request = request;
551 bus_req->packet = packet;
552
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530553 spin_lock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300554 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530555 spin_unlock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300556 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
557
558 return 0;
559}
560
561static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
562{
563 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
564 int ret;
565
566 sdio_claim_host(ar_sdio->func);
567
568 /* Register the isr */
569 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
570 if (ret)
571 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
572
573 sdio_release_host(ar_sdio->func);
574}
575
576static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
577{
578 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
579 int ret;
580
581 sdio_claim_host(ar_sdio->func);
582
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +0530583 mutex_lock(&ar_sdio->mtx_irq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300584
585 ret = sdio_release_irq(ar_sdio->func);
586 if (ret)
587 ath6kl_err("Failed to release sdio irq: %d\n", ret);
588
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +0530589 mutex_unlock(&ar_sdio->mtx_irq);
590
Kalle Valobdcd8172011-07-18 00:22:30 +0300591 sdio_release_host(ar_sdio->func);
592}
593
594static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
595{
596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
597 struct hif_scatter_req *node = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300598
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530599 spin_lock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300600
601 if (!list_empty(&ar_sdio->scat_req)) {
602 node = list_first_entry(&ar_sdio->scat_req,
603 struct hif_scatter_req, list);
604 list_del(&node->list);
Chilam Ngb29072c2012-02-07 01:33:00 -0800605
606 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300607 }
608
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530609 spin_unlock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300610
611 return node;
612}
613
614static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
615 struct hif_scatter_req *s_req)
616{
617 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300618
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530619 spin_lock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300620
621 list_add_tail(&s_req->list, &ar_sdio->scat_req);
622
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530623 spin_unlock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300624
625}
626
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530627/* scatter gather read write request */
628static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
629 struct hif_scatter_req *scat_req)
630{
631 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530632 u32 request = scat_req->req;
633 int status = 0;
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530634
635 if (!scat_req->len)
636 return -EINVAL;
637
638 ath6kl_dbg(ATH6KL_DBG_SCATTER,
639 "hif-scatter: total len: %d scatter entries: %d\n",
640 scat_req->len, scat_req->scat_entries);
641
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530642 if (request & HIF_SYNCHRONOUS)
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530643 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530644 else {
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530645 spin_lock_bh(&ar_sdio->wr_async_lock);
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530646 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530647 spin_unlock_bh(&ar_sdio->wr_async_lock);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530648 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
649 }
650
651 return status;
652}
653
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530654/* clean up scatter support */
655static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
656{
657 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
658 struct hif_scatter_req *s_req, *tmp_req;
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530659
660 /* empty the free list */
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530661 spin_lock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530662 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
663 list_del(&s_req->list);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530664 spin_unlock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530665
Kalle Valo32a07e42011-10-30 21:15:57 +0200666 /*
667 * FIXME: should we also call completion handler with
668 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
669 * that the packet is properly freed?
670 */
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530671 if (s_req->busrequest)
672 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
673 kfree(s_req->virt_dma_buf);
674 kfree(s_req->sgentries);
675 kfree(s_req);
676
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530677 spin_lock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530678 }
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530679 spin_unlock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530680}
681
682/* setup of HIF scatter resources */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530683static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530684{
685 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530686 struct htc_target *target = ar->htc_target;
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530687 int ret;
688 bool virt_scat = false;
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530689
Kalle Valo32a07e42011-10-30 21:15:57 +0200690 if (ar_sdio->scatter_enabled)
691 return 0;
692
693 ar_sdio->scatter_enabled = true;
694
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530695 /* check if host supports scatter and it meets our requirements */
696 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530697 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530698 ar_sdio->func->card->host->max_segs,
699 MAX_SCATTER_ENTRIES_PER_REQ);
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530700 virt_scat = true;
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530701 }
702
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530703 if (!virt_scat) {
704 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
705 MAX_SCATTER_ENTRIES_PER_REQ,
706 MAX_SCATTER_REQUESTS, virt_scat);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530707
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530708 if (!ret) {
Kalle Valo3ef987b2011-10-24 12:18:07 +0300709 ath6kl_dbg(ATH6KL_DBG_BOOT,
710 "hif-scatter enabled requests %d entries %d\n",
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530711 MAX_SCATTER_REQUESTS,
712 MAX_SCATTER_ENTRIES_PER_REQ);
713
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530714 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
715 target->max_xfer_szper_scatreq =
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530716 MAX_SCATTER_REQ_TRANSFER_SIZE;
717 } else {
718 ath6kl_sdio_cleanup_scatter(ar);
719 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
720 }
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530721 }
722
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530723 if (virt_scat || ret) {
724 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
725 ATH6KL_SCATTER_ENTRIES_PER_REQ,
726 ATH6KL_SCATTER_REQS, virt_scat);
727
728 if (ret) {
729 ath6kl_err("failed to alloc virtual scatter resources !\n");
730 ath6kl_sdio_cleanup_scatter(ar);
731 return ret;
732 }
733
Kalle Valo3ef987b2011-10-24 12:18:07 +0300734 ath6kl_dbg(ATH6KL_DBG_BOOT,
735 "virtual scatter enabled requests %d entries %d\n",
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530736 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
737
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530738 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
739 target->max_xfer_szper_scatreq =
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530740 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
741 }
742
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530743 return 0;
744}
745
Kalle Valoe28e8102011-11-01 08:44:36 +0200746static int ath6kl_sdio_config(struct ath6kl *ar)
747{
748 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
749 struct sdio_func *func = ar_sdio->func;
750 int ret;
751
752 sdio_claim_host(func);
753
754 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
755 MANUFACTURER_ID_AR6003_BASE) {
756 /* enable 4-bit ASYNC interrupt on AR6003 or later */
757 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
758 CCCR_SDIO_IRQ_MODE_REG,
759 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
760 if (ret) {
761 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
762 ret);
763 goto out;
764 }
765
766 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
767 }
768
769 /* give us some time to enable, in ms */
770 func->enable_timeout = 100;
771
772 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
773 if (ret) {
774 ath6kl_err("Set sdio block size %d failed: %d)\n",
775 HIF_MBOX_BLOCK_SIZE, ret);
Kalle Valoe28e8102011-11-01 08:44:36 +0200776 goto out;
777 }
778
779out:
780 sdio_release_host(func);
781
782 return ret;
783}
784
Raja Manie390af72012-01-30 17:13:09 +0530785static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
Kalle Valoabcb3442011-07-22 08:26:20 +0300786{
787 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
788 struct sdio_func *func = ar_sdio->func;
789 mmc_pm_flag_t flags;
790 int ret;
791
792 flags = sdio_get_host_pm_caps(func);
793
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200794 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
795
Raja Manie390af72012-01-30 17:13:09 +0530796 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
797 !(flags & MMC_PM_KEEP_POWER))
798 return -EINVAL;
Kalle Valoabcb3442011-07-22 08:26:20 +0300799
800 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
801 if (ret) {
Raja Manie390af72012-01-30 17:13:09 +0530802 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
Kalle Valoabcb3442011-07-22 08:26:20 +0300803 return ret;
804 }
805
Kalle Valo10509f92011-12-13 14:52:07 +0200806 /* sdio irq wakes up host */
Raja Manie390af72012-01-30 17:13:09 +0530807 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
808 if (ret)
809 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
810
811 return ret;
812}
813
814static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
815{
816 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
817 struct sdio_func *func = ar_sdio->func;
818 mmc_pm_flag_t flags;
819 int ret;
Kalle Valo10509f92011-12-13 14:52:07 +0200820
821 if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
Raja Manie390af72012-01-30 17:13:09 +0530822 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
823
824 ret = ath6kl_set_sdio_pm_caps(ar);
825 if (ret)
826 goto cut_pwr;
827
Kalle Valo10509f92011-12-13 14:52:07 +0200828 ret = ath6kl_cfg80211_suspend(ar,
829 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
830 NULL);
Kalle Valo10509f92011-12-13 14:52:07 +0200831 if (ret)
Raja Manie390af72012-01-30 17:13:09 +0530832 goto cut_pwr;
Kalle Valo10509f92011-12-13 14:52:07 +0200833
Raja Manie390af72012-01-30 17:13:09 +0530834 return 0;
Kalle Valo10509f92011-12-13 14:52:07 +0200835 }
836
Raja Manie390af72012-01-30 17:13:09 +0530837 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
838 (!ar->suspend_mode && wow)) {
839
840 ret = ath6kl_set_sdio_pm_caps(ar);
841 if (ret)
842 goto cut_pwr;
843
Raja Manid7c44e02011-11-07 22:52:46 +0200844 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
845 if (ret)
Raja Manie390af72012-01-30 17:13:09 +0530846 goto cut_pwr;
Raja Manid7c44e02011-11-07 22:52:46 +0200847
Raja Manie390af72012-01-30 17:13:09 +0530848 return 0;
Raja Manid7c44e02011-11-07 22:52:46 +0200849 }
850
Raja Manie390af72012-01-30 17:13:09 +0530851 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
852 !ar->suspend_mode) {
853
854 flags = sdio_get_host_pm_caps(func);
855 if (!(flags & MMC_PM_KEEP_POWER))
856 goto cut_pwr;
857
858 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
859 if (ret)
860 goto cut_pwr;
861
Santosh Sajjancca4d5a2012-01-30 22:02:26 +0200862 /*
863 * Workaround to support Deep Sleep with MSM, set the host pm
864 * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
865 * the sdc2_clock and internally allows MSM to enter
866 * TCXO shutdown properly.
867 */
868 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
869 ret = sdio_set_host_pm_flags(func,
870 MMC_PM_WAKE_SDIO_IRQ);
871 if (ret)
872 goto cut_pwr;
873 }
874
Raja Manie390af72012-01-30 17:13:09 +0530875 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
876 NULL);
877 if (ret)
878 goto cut_pwr;
879
880 return 0;
881 }
882
883cut_pwr:
884 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
Kalle Valoabcb3442011-07-22 08:26:20 +0300885}
886
Chilam Ngaa6cffc2011-10-05 10:12:52 +0300887static int ath6kl_sdio_resume(struct ath6kl *ar)
888{
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200889 switch (ar->state) {
890 case ATH6KL_STATE_OFF:
891 case ATH6KL_STATE_CUTPOWER:
892 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
893 "sdio resume configuring sdio\n");
894
895 /* need to set sdio settings after power is cut from sdio */
896 ath6kl_sdio_config(ar);
897 break;
898
899 case ATH6KL_STATE_ON:
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200900 break;
901
902 case ATH6KL_STATE_DEEPSLEEP:
903 break;
Raja Manid7c44e02011-11-07 22:52:46 +0200904
905 case ATH6KL_STATE_WOW:
906 break;
Kalle Valo10509f92011-12-13 14:52:07 +0200907 case ATH6KL_STATE_SCHED_SCAN:
908 break;
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200909 }
910
Kalle Valo52d81a62011-11-01 08:44:21 +0200911 ath6kl_cfg80211_resume(ar);
Chilam Ngaa6cffc2011-10-05 10:12:52 +0300912
913 return 0;
914}
915
Kalle Valoc7111492011-11-11 12:17:51 +0200916/* set the window address register (using 4-byte register access ). */
917static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
918{
919 int status;
920 u8 addr_val[4];
921 s32 i;
922
923 /*
924 * Write bytes 1,2,3 of the register to set the upper address bytes,
925 * the LSB is written last to initiate the access cycle
926 */
927
928 for (i = 1; i <= 3; i++) {
929 /*
930 * Fill the buffer with the address byte value we want to
931 * hit 4 times.
932 */
933 memset(addr_val, ((u8 *)&addr)[i], 4);
934
935 /*
936 * Hit each byte of the register address with a 4-byte
937 * write operation to the same address, this is a harmless
938 * operation.
939 */
940 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
941 4, HIF_WR_SYNC_BYTE_FIX);
942 if (status)
943 break;
944 }
945
946 if (status) {
947 ath6kl_err("%s: failed to write initial bytes of 0x%x "
948 "to window reg: 0x%X\n", __func__,
949 addr, reg_addr);
950 return status;
951 }
952
953 /*
954 * Write the address register again, this time write the whole
955 * 4-byte value. The effect here is that the LSB write causes the
956 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
957 * effect since we are writing the same values again
958 */
959 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
960 4, HIF_WR_SYNC_BYTE_INC);
961
962 if (status) {
963 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
964 __func__, addr, reg_addr);
965 return status;
966 }
967
968 return 0;
969}
970
971static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
972{
973 int status;
974
975 /* set window register to start read cycle */
976 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
977 address);
978
979 if (status)
980 return status;
981
982 /* read the data */
983 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
984 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
985 if (status) {
986 ath6kl_err("%s: failed to read from window data addr\n",
987 __func__);
988 return status;
989 }
990
991 return status;
992}
993
994static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
995 __le32 data)
996{
997 int status;
998 u32 val = (__force u32) data;
999
1000 /* set write data */
1001 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1002 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1003 if (status) {
1004 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1005 __func__, data);
1006 return status;
1007 }
1008
1009 /* set window register, which starts the write cycle */
1010 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1011 address);
1012}
1013
Kalle Valo66b693c2011-11-11 12:17:33 +02001014static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1015{
1016 u32 addr;
1017 unsigned long timeout;
1018 int ret;
1019
1020 ar->bmi.cmd_credits = 0;
1021
1022 /* Read the counter register to get the command credits */
1023 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1024
1025 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1026 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1027
1028 /*
1029 * Hit the credit counter with a 4-byte access, the first byte
1030 * read will hit the counter and cause a decrement, while the
1031 * remaining 3 bytes has no effect. The rationale behind this
1032 * is to make all HIF accesses 4-byte aligned.
1033 */
1034 ret = ath6kl_sdio_read_write_sync(ar, addr,
1035 (u8 *)&ar->bmi.cmd_credits, 4,
1036 HIF_RD_SYNC_BYTE_INC);
1037 if (ret) {
1038 ath6kl_err("Unable to decrement the command credit "
1039 "count register: %d\n", ret);
1040 return ret;
1041 }
1042
1043 /* The counter is only 8 bits.
1044 * Ignore anything in the upper 3 bytes
1045 */
1046 ar->bmi.cmd_credits &= 0xFF;
1047 }
1048
1049 if (!ar->bmi.cmd_credits) {
1050 ath6kl_err("bmi communication timeout\n");
1051 return -ETIMEDOUT;
1052 }
1053
1054 return 0;
1055}
1056
1057static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1058{
1059 unsigned long timeout;
1060 u32 rx_word = 0;
1061 int ret = 0;
1062
1063 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1064 while ((time_before(jiffies, timeout)) && !rx_word) {
1065 ret = ath6kl_sdio_read_write_sync(ar,
1066 RX_LOOKAHEAD_VALID_ADDRESS,
1067 (u8 *)&rx_word, sizeof(rx_word),
1068 HIF_RD_SYNC_BYTE_INC);
1069 if (ret) {
1070 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1071 return ret;
1072 }
1073
1074 /* all we really want is one bit */
1075 rx_word &= (1 << ENDPOINT1);
1076 }
1077
1078 if (!rx_word) {
1079 ath6kl_err("bmi_recv_buf FIFO empty\n");
1080 return -EINVAL;
1081 }
1082
1083 return ret;
1084}
1085
1086static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1087{
1088 int ret;
1089 u32 addr;
1090
1091 ret = ath6kl_sdio_bmi_credits(ar);
1092 if (ret)
1093 return ret;
1094
1095 addr = ar->mbox_info.htc_addr;
1096
1097 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1098 HIF_WR_SYNC_BYTE_INC);
1099 if (ret)
1100 ath6kl_err("unable to send the bmi data to the device\n");
1101
1102 return ret;
1103}
1104
1105static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1106{
1107 int ret;
1108 u32 addr;
1109
1110 /*
1111 * During normal bootup, small reads may be required.
1112 * Rather than issue an HIF Read and then wait as the Target
1113 * adds successive bytes to the FIFO, we wait here until
1114 * we know that response data is available.
1115 *
1116 * This allows us to cleanly timeout on an unexpected
1117 * Target failure rather than risk problems at the HIF level.
1118 * In particular, this avoids SDIO timeouts and possibly garbage
1119 * data on some host controllers. And on an interconnect
1120 * such as Compact Flash (as well as some SDIO masters) which
1121 * does not provide any indication on data timeout, it avoids
1122 * a potential hang or garbage response.
1123 *
1124 * Synchronization is more difficult for reads larger than the
1125 * size of the MBOX FIFO (128B), because the Target is unable
1126 * to push the 129th byte of data until AFTER the Host posts an
1127 * HIF Read and removes some FIFO data. So for large reads the
1128 * Host proceeds to post an HIF Read BEFORE all the data is
1129 * actually available to read. Fortunately, large BMI reads do
1130 * not occur in practice -- they're supported for debug/development.
1131 *
1132 * So Host/Target BMI synchronization is divided into these cases:
1133 * CASE 1: length < 4
1134 * Should not happen
1135 *
1136 * CASE 2: 4 <= length <= 128
1137 * Wait for first 4 bytes to be in FIFO
1138 * If CONSERVATIVE_BMI_READ is enabled, also wait for
1139 * a BMI command credit, which indicates that the ENTIRE
1140 * response is available in the the FIFO
1141 *
1142 * CASE 3: length > 128
1143 * Wait for the first 4 bytes to be in FIFO
1144 *
1145 * For most uses, a small timeout should be sufficient and we will
1146 * usually see a response quickly; but there may be some unusual
1147 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1148 * For now, we use an unbounded busy loop while waiting for
1149 * BMI_EXECUTE.
1150 *
1151 * If BMI_EXECUTE ever needs to support longer-latency execution,
1152 * especially in production, this code needs to be enhanced to sleep
1153 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
1154 * a function of Host processor speed.
1155 */
1156 if (len >= 4) { /* NB: Currently, always true */
1157 ret = ath6kl_bmi_get_rx_lkahd(ar);
1158 if (ret)
1159 return ret;
1160 }
1161
1162 addr = ar->mbox_info.htc_addr;
1163 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1164 HIF_RD_SYNC_BYTE_INC);
1165 if (ret) {
1166 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1167 ret);
1168 return ret;
1169 }
1170
1171 return 0;
1172}
1173
Kalle Valo32a07e42011-10-30 21:15:57 +02001174static void ath6kl_sdio_stop(struct ath6kl *ar)
1175{
1176 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1177 struct bus_request *req, *tmp_req;
1178 void *context;
1179
1180 /* FIXME: make sure that wq is not queued again */
1181
1182 cancel_work_sync(&ar_sdio->wr_async_work);
1183
1184 spin_lock_bh(&ar_sdio->wr_async_lock);
1185
1186 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1187 list_del(&req->list);
1188
1189 if (req->scat_req) {
1190 /* this is a scatter gather request */
1191 req->scat_req->status = -ECANCELED;
1192 req->scat_req->complete(ar_sdio->ar->htc_target,
1193 req->scat_req);
1194 } else {
1195 context = req->packet;
1196 ath6kl_sdio_free_bus_req(ar_sdio, req);
1197 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1198 }
1199 }
1200
1201 spin_unlock_bh(&ar_sdio->wr_async_lock);
1202
1203 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1204}
1205
Kalle Valobdcd8172011-07-18 00:22:30 +03001206static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1207 .read_write_sync = ath6kl_sdio_read_write_sync,
1208 .write_async = ath6kl_sdio_write_async,
1209 .irq_enable = ath6kl_sdio_irq_enable,
1210 .irq_disable = ath6kl_sdio_irq_disable,
1211 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1212 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1213 .enable_scatter = ath6kl_sdio_enable_scatter,
Vasanthakumar Thiagarajanf74a7362011-07-16 20:29:05 +05301214 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
Kalle Valobdcd8172011-07-18 00:22:30 +03001215 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
Kalle Valoabcb3442011-07-22 08:26:20 +03001216 .suspend = ath6kl_sdio_suspend,
Chilam Ngaa6cffc2011-10-05 10:12:52 +03001217 .resume = ath6kl_sdio_resume,
Kalle Valoc7111492011-11-11 12:17:51 +02001218 .diag_read32 = ath6kl_sdio_diag_read32,
1219 .diag_write32 = ath6kl_sdio_diag_write32,
Kalle Valo66b693c2011-11-11 12:17:33 +02001220 .bmi_read = ath6kl_sdio_bmi_read,
1221 .bmi_write = ath6kl_sdio_bmi_write,
Kalle Valob2e75692011-10-27 18:48:14 +03001222 .power_on = ath6kl_sdio_power_on,
1223 .power_off = ath6kl_sdio_power_off,
Kalle Valo32a07e42011-10-30 21:15:57 +02001224 .stop = ath6kl_sdio_stop,
Kalle Valobdcd8172011-07-18 00:22:30 +03001225};
1226
Kalle Valob4b2a0b2011-11-01 08:44:44 +02001227#ifdef CONFIG_PM_SLEEP
1228
1229/*
1230 * Empty handlers so that mmc subsystem doesn't remove us entirely during
1231 * suspend. We instead follow cfg80211 suspend/resume handlers.
1232 */
1233static int ath6kl_sdio_pm_suspend(struct device *device)
1234{
1235 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1236
1237 return 0;
1238}
1239
1240static int ath6kl_sdio_pm_resume(struct device *device)
1241{
1242 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1243
1244 return 0;
1245}
1246
1247static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1248 ath6kl_sdio_pm_resume);
1249
1250#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1251
1252#else
1253
1254#define ATH6KL_SDIO_PM_OPS NULL
1255
1256#endif /* CONFIG_PM_SLEEP */
1257
Kalle Valobdcd8172011-07-18 00:22:30 +03001258static int ath6kl_sdio_probe(struct sdio_func *func,
1259 const struct sdio_device_id *id)
1260{
1261 int ret;
1262 struct ath6kl_sdio *ar_sdio;
1263 struct ath6kl *ar;
1264 int count;
1265
Kalle Valo3ef987b2011-10-24 12:18:07 +03001266 ath6kl_dbg(ATH6KL_DBG_BOOT,
1267 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
Kalle Valof7325b82011-09-27 14:30:58 +03001268 func->num, func->vendor, func->device,
1269 func->max_blksize, func->cur_blksize);
Kalle Valobdcd8172011-07-18 00:22:30 +03001270
1271 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1272 if (!ar_sdio)
1273 return -ENOMEM;
1274
1275 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1276 if (!ar_sdio->dma_buffer) {
1277 ret = -ENOMEM;
1278 goto err_hif;
1279 }
1280
1281 ar_sdio->func = func;
1282 sdio_set_drvdata(func, ar_sdio);
1283
1284 ar_sdio->id = id;
1285 ar_sdio->is_disabled = true;
1286
1287 spin_lock_init(&ar_sdio->lock);
1288 spin_lock_init(&ar_sdio->scat_lock);
1289 spin_lock_init(&ar_sdio->wr_async_lock);
Raja Manifdb28582011-11-21 12:26:51 +05301290 mutex_init(&ar_sdio->dma_buffer_mutex);
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +05301291 mutex_init(&ar_sdio->mtx_irq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001292
1293 INIT_LIST_HEAD(&ar_sdio->scat_req);
1294 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1295 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1296
1297 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1298
1299 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1300 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1301
Kalle Valo45eaa782012-01-17 20:09:05 +02001302 ar = ath6kl_core_create(&ar_sdio->func->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03001303 if (!ar) {
1304 ath6kl_err("Failed to alloc ath6kl core\n");
1305 ret = -ENOMEM;
1306 goto err_dma;
1307 }
1308
1309 ar_sdio->ar = ar;
Kalle Valo77eab1e2011-11-11 12:18:22 +02001310 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
Kalle Valobdcd8172011-07-18 00:22:30 +03001311 ar->hif_priv = ar_sdio;
1312 ar->hif_ops = &ath6kl_sdio_ops;
Kalle Valo1f4c8942011-11-11 12:17:42 +02001313 ar->bmi.max_data_size = 256;
Kalle Valobdcd8172011-07-18 00:22:30 +03001314
1315 ath6kl_sdio_set_mbox_info(ar);
1316
Kalle Valoe28e8102011-11-01 08:44:36 +02001317 ret = ath6kl_sdio_config(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03001318 if (ret) {
Kalle Valoe28e8102011-11-01 08:44:36 +02001319 ath6kl_err("Failed to config sdio: %d\n", ret);
1320 goto err_core_alloc;
Kalle Valobdcd8172011-07-18 00:22:30 +03001321 }
1322
Kalle Valobdcd8172011-07-18 00:22:30 +03001323 ret = ath6kl_core_init(ar);
1324 if (ret) {
1325 ath6kl_err("Failed to init ath6kl core\n");
Kalle Valoe28e8102011-11-01 08:44:36 +02001326 goto err_core_alloc;
Kalle Valobdcd8172011-07-18 00:22:30 +03001327 }
1328
1329 return ret;
1330
Vasanthakumar Thiagarajan8dafb702011-10-25 19:33:58 +05301331err_core_alloc:
Kalle Valo45eaa782012-01-17 20:09:05 +02001332 ath6kl_core_destroy(ar_sdio->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03001333err_dma:
1334 kfree(ar_sdio->dma_buffer);
1335err_hif:
1336 kfree(ar_sdio);
1337
1338 return ret;
1339}
1340
1341static void ath6kl_sdio_remove(struct sdio_func *func)
1342{
1343 struct ath6kl_sdio *ar_sdio;
1344
Kalle Valo3ef987b2011-10-24 12:18:07 +03001345 ath6kl_dbg(ATH6KL_DBG_BOOT,
1346 "sdio removed func %d vendor 0x%x device 0x%x\n",
Kalle Valof7325b82011-09-27 14:30:58 +03001347 func->num, func->vendor, func->device);
1348
Kalle Valobdcd8172011-07-18 00:22:30 +03001349 ar_sdio = sdio_get_drvdata(func);
1350
1351 ath6kl_stop_txrx(ar_sdio->ar);
1352 cancel_work_sync(&ar_sdio->wr_async_work);
1353
Vasanthakumar Thiagarajan6db8fa52011-10-25 19:34:16 +05301354 ath6kl_core_cleanup(ar_sdio->ar);
Vasanthakumar Thiagarajan0e7de662012-01-21 15:22:49 +05301355 ath6kl_core_destroy(ar_sdio->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03001356
Kalle Valobdcd8172011-07-18 00:22:30 +03001357 kfree(ar_sdio->dma_buffer);
1358 kfree(ar_sdio);
1359}
1360
1361static const struct sdio_device_id ath6kl_sdio_devices[] = {
1362 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
1363 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
Naveen Gangadharand93e2c22011-11-11 12:18:14 +02001364 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1365 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
Kalle Valobdcd8172011-07-18 00:22:30 +03001366 {},
1367};
1368
1369MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1370
1371static struct sdio_driver ath6kl_sdio_driver = {
Kalle Valo241b1282012-01-17 20:09:45 +02001372 .name = "ath6kl_sdio",
Kalle Valobdcd8172011-07-18 00:22:30 +03001373 .id_table = ath6kl_sdio_devices,
1374 .probe = ath6kl_sdio_probe,
1375 .remove = ath6kl_sdio_remove,
Kalle Valob4b2a0b2011-11-01 08:44:44 +02001376 .drv.pm = ATH6KL_SDIO_PM_OPS,
Kalle Valobdcd8172011-07-18 00:22:30 +03001377};
1378
1379static int __init ath6kl_sdio_init(void)
1380{
1381 int ret;
1382
1383 ret = sdio_register_driver(&ath6kl_sdio_driver);
1384 if (ret)
1385 ath6kl_err("sdio driver registration failed: %d\n", ret);
1386
1387 return ret;
1388}
1389
1390static void __exit ath6kl_sdio_exit(void)
1391{
1392 sdio_unregister_driver(&ath6kl_sdio_driver);
1393}
1394
1395module_init(ath6kl_sdio_init);
1396module_exit(ath6kl_sdio_exit);
1397
1398MODULE_AUTHOR("Atheros Communications, Inc.");
1399MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1400MODULE_LICENSE("Dual BSD/GPL");
1401
Kalle Valoc0038972011-12-16 20:53:31 +02001402MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1403MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1404MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
Kalle Valo0d0192ba2011-11-14 19:31:07 +02001405MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1406MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
Kalle Valoc0038972011-12-16 20:53:31 +02001407MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1408MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1409MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
Kalle Valo0d0192ba2011-11-14 19:31:07 +02001410MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1411MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
Kalle Valoc0038972011-12-16 20:53:31 +02001412MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
Kalle Valof0ea5d52011-11-14 19:31:15 +02001413MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1414MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
Kalle Valoc0038972011-12-16 20:53:31 +02001415MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
Kalle Valof0ea5d52011-11-14 19:31:15 +02001416MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1417MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);