blob: 44ea7a742101b31f4160f490b41e8cbcf818596c [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
Vasanthakumar Thiagarajan1b2df402012-02-06 20:15:53 +05303 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
Kalle Valobdcd8172011-07-18 00:22:30 +03004 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040018#include <linux/module.h>
Kalle Valobdcd8172011-07-18 00:22:30 +030019#include <linux/mmc/card.h>
20#include <linux/mmc/mmc.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/sdio_func.h>
23#include <linux/mmc/sdio_ids.h>
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sd.h>
Kalle Valo2e1cb232011-10-05 12:23:49 +030026#include "hif.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030027#include "hif-ops.h"
28#include "target.h"
29#include "debug.h"
Vivek Natarajan9df337a2011-09-15 20:30:43 +053030#include "cfg80211.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030031
32struct ath6kl_sdio {
33 struct sdio_func *func;
34
Kalle Valo12eb9442012-03-07 20:04:00 +020035 /* protects access to bus_req_freeq */
Kalle Valobdcd8172011-07-18 00:22:30 +030036 spinlock_t lock;
37
38 /* free list */
39 struct list_head bus_req_freeq;
40
41 /* available bus requests */
42 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
43
44 struct ath6kl *ar;
Raja Manifdb28582011-11-21 12:26:51 +053045
Kalle Valobdcd8172011-07-18 00:22:30 +030046 u8 *dma_buffer;
47
Raja Manifdb28582011-11-21 12:26:51 +053048 /* protects access to dma_buffer */
49 struct mutex dma_buffer_mutex;
50
Kalle Valobdcd8172011-07-18 00:22:30 +030051 /* scatter request list head */
52 struct list_head scat_req;
53
Raja Manid1f41592012-02-09 12:57:12 +053054 atomic_t irq_handling;
55 wait_queue_head_t irq_wq;
Vasanthakumar Thiagarajan9d826822012-01-04 15:57:19 +053056
Kalle Valo12eb9442012-03-07 20:04:00 +020057 /* protects access to scat_req */
Kalle Valobdcd8172011-07-18 00:22:30 +030058 spinlock_t scat_lock;
Kalle Valo12eb9442012-03-07 20:04:00 +020059
Kalle Valo32a07e42011-10-30 21:15:57 +020060 bool scatter_enabled;
61
Kalle Valobdcd8172011-07-18 00:22:30 +030062 bool is_disabled;
Kalle Valobdcd8172011-07-18 00:22:30 +030063 const struct sdio_device_id *id;
64 struct work_struct wr_async_work;
65 struct list_head wr_asyncq;
Kalle Valo12eb9442012-03-07 20:04:00 +020066
67 /* protects access to wr_asyncq */
Kalle Valobdcd8172011-07-18 00:22:30 +030068 spinlock_t wr_async_lock;
69};
70
71#define CMD53_ARG_READ 0
72#define CMD53_ARG_WRITE 1
73#define CMD53_ARG_BLOCK_BASIS 1
74#define CMD53_ARG_FIXED_ADDRESS 0
75#define CMD53_ARG_INCR_ADDRESS 1
76
77static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
78{
79 return ar->hif_priv;
80}
81
82/*
83 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
84 * Most host controllers assume the buffer is DMA'able and will
85 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
86 * check fails on stack memory.
87 */
88static inline bool buf_needs_bounce(u8 *buf)
89{
90 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
91}
92
93static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
94{
95 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
96
97 /* EP1 has an extended range */
98 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
99 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
100 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
101 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
102 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
103 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
104}
105
106static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
107 u8 mode, u8 opcode, u32 addr,
108 u16 blksz)
109{
110 *arg = (((rw & 1) << 31) |
111 ((func & 0x7) << 28) |
112 ((mode & 1) << 27) |
113 ((opcode & 1) << 26) |
114 ((addr & 0x1FFFF) << 9) |
115 (blksz & 0x1FF));
116}
117
118static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
119 unsigned int address,
120 unsigned char val)
121{
122 const u8 func = 0;
123
124 *arg = ((write & 1) << 31) |
125 ((func & 0x7) << 28) |
126 ((raw & 1) << 27) |
127 (1 << 26) |
128 ((address & 0x1FFFF) << 9) |
129 (1 << 8) |
130 (val & 0xFF);
131}
132
133static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
134 unsigned int address,
135 unsigned char byte)
136{
137 struct mmc_command io_cmd;
138
139 memset(&io_cmd, 0, sizeof(io_cmd));
140 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
141 io_cmd.opcode = SD_IO_RW_DIRECT;
142 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
143
144 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
145}
146
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530147static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
148 u8 *buf, u32 len)
149{
150 int ret = 0;
151
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530152 sdio_claim_host(func);
153
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530154 if (request & HIF_WRITE) {
Kalle Valof7325b82011-09-27 14:30:58 +0300155 /* FIXME: looks like ugly workaround for something */
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530156 if (addr >= HIF_MBOX_BASE_ADDR &&
157 addr <= HIF_MBOX_END_ADDR)
158 addr += (HIF_MBOX_WIDTH - len);
159
Kalle Valof7325b82011-09-27 14:30:58 +0300160 /* FIXME: this also looks like ugly workaround */
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530161 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
162 addr += HIF_MBOX0_EXT_WIDTH - len;
163
164 if (request & HIF_FIXED_ADDRESS)
165 ret = sdio_writesb(func, addr, buf, len);
166 else
167 ret = sdio_memcpy_toio(func, addr, buf, len);
168 } else {
169 if (request & HIF_FIXED_ADDRESS)
170 ret = sdio_readsb(func, buf, addr, len);
171 else
172 ret = sdio_memcpy_fromio(func, buf, addr, len);
173 }
174
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530175 sdio_release_host(func);
176
Kalle Valof7325b82011-09-27 14:30:58 +0300177 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
178 request & HIF_WRITE ? "wr" : "rd", addr,
179 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
180 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
181
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530182 return ret;
183}
184
Kalle Valobdcd8172011-07-18 00:22:30 +0300185static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
186{
187 struct bus_request *bus_req;
Kalle Valobdcd8172011-07-18 00:22:30 +0300188
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530189 spin_lock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300190
191 if (list_empty(&ar_sdio->bus_req_freeq)) {
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530192 spin_unlock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300193 return NULL;
194 }
195
196 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
197 struct bus_request, list);
198 list_del(&bus_req->list);
199
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530200 spin_unlock_bh(&ar_sdio->lock);
Kalle Valof7325b82011-09-27 14:30:58 +0300201 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
202 __func__, bus_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300203
204 return bus_req;
205}
206
207static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
208 struct bus_request *bus_req)
209{
Kalle Valof7325b82011-09-27 14:30:58 +0300210 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
211 __func__, bus_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300212
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530213 spin_lock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300214 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530215 spin_unlock_bh(&ar_sdio->lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300216}
217
218static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
Kalle Valobdcd8172011-07-18 00:22:30 +0300219 struct mmc_data *data)
220{
221 struct scatterlist *sg;
222 int i;
223
224 data->blksz = HIF_MBOX_BLOCK_SIZE;
225 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
226
227 ath6kl_dbg(ATH6KL_DBG_SCATTER,
228 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
229 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
230 data->blksz, data->blocks, scat_req->len,
231 scat_req->scat_entries);
232
233 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
234 MMC_DATA_READ;
235
236 /* fill SG entries */
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530237 sg = scat_req->sgentries;
Kalle Valobdcd8172011-07-18 00:22:30 +0300238 sg_init_table(sg, scat_req->scat_entries);
239
240 /* assemble SG list */
241 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300242 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
243 i, scat_req->scat_list[i].buf,
244 scat_req->scat_list[i].len);
245
246 sg_set_buf(sg, scat_req->scat_list[i].buf,
247 scat_req->scat_list[i].len);
248 }
249
250 /* set scatter-gather table for request */
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530251 data->sg = scat_req->sgentries;
Kalle Valobdcd8172011-07-18 00:22:30 +0300252 data->sg_len = scat_req->scat_entries;
253}
254
255static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
256 struct bus_request *req)
257{
258 struct mmc_request mmc_req;
259 struct mmc_command cmd;
260 struct mmc_data data;
261 struct hif_scatter_req *scat_req;
262 u8 opcode, rw;
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530263 int status, len;
Kalle Valobdcd8172011-07-18 00:22:30 +0300264
265 scat_req = req->scat_req;
266
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530267 if (scat_req->virt_scat) {
268 len = scat_req->len;
269 if (scat_req->req & HIF_BLOCK_BASIS)
270 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
271
272 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
273 scat_req->addr, scat_req->virt_dma_buf,
274 len);
275 goto scat_complete;
276 }
277
Kalle Valobdcd8172011-07-18 00:22:30 +0300278 memset(&mmc_req, 0, sizeof(struct mmc_request));
279 memset(&cmd, 0, sizeof(struct mmc_command));
280 memset(&data, 0, sizeof(struct mmc_data));
281
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530282 ath6kl_sdio_setup_scat_data(scat_req, &data);
Kalle Valobdcd8172011-07-18 00:22:30 +0300283
284 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
285 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
286
287 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
288
289 /* Fixup the address so that the last byte will fall on MBOX EOM */
290 if (scat_req->req & HIF_WRITE) {
291 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
292 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
293 else
294 /* Uses extended address range */
295 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
296 }
297
298 /* set command argument */
299 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
300 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
301 data.blocks);
302
303 cmd.opcode = SD_IO_RW_EXTENDED;
304 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
305
306 mmc_req.cmd = &cmd;
307 mmc_req.data = &data;
308
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530309 sdio_claim_host(ar_sdio->func);
310
Kalle Valobdcd8172011-07-18 00:22:30 +0300311 mmc_set_data_timeout(&data, ar_sdio->func->card);
312 /* synchronous call to process request */
313 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
314
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530315 sdio_release_host(ar_sdio->func);
316
Kalle Valobdcd8172011-07-18 00:22:30 +0300317 status = cmd.error ? cmd.error : data.error;
Vasanthakumar Thiagarajan348a8fb2011-07-16 20:29:17 +0530318
319scat_complete:
Kalle Valobdcd8172011-07-18 00:22:30 +0300320 scat_req->status = status;
321
322 if (scat_req->status)
323 ath6kl_err("Scatter write request failed:%d\n",
324 scat_req->status);
325
326 if (scat_req->req & HIF_ASYNCHRONOUS)
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530327 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300328
329 return status;
330}
331
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530332static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
333 int n_scat_entry, int n_scat_req,
334 bool virt_scat)
335{
336 struct hif_scatter_req *s_req;
337 struct bus_request *bus_req;
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530338 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
339 u8 *virt_buf;
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530340
341 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
342 scat_req_sz = sizeof(*s_req) + scat_list_sz;
343
344 if (!virt_scat)
345 sg_sz = sizeof(struct scatterlist) * n_scat_entry;
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530346 else
347 buf_sz = 2 * L1_CACHE_BYTES +
348 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530349
350 for (i = 0; i < n_scat_req; i++) {
351 /* allocate the scatter request */
352 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
353 if (!s_req)
354 return -ENOMEM;
355
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530356 if (virt_scat) {
357 virt_buf = kzalloc(buf_sz, GFP_KERNEL);
358 if (!virt_buf) {
359 kfree(s_req);
360 return -ENOMEM;
361 }
362
363 s_req->virt_dma_buf =
364 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
365 } else {
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530366 /* allocate sglist */
367 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
368
369 if (!s_req->sgentries) {
370 kfree(s_req);
371 return -ENOMEM;
372 }
373 }
374
375 /* allocate a bus request for this scatter request */
376 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
377 if (!bus_req) {
378 kfree(s_req->sgentries);
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530379 kfree(s_req->virt_dma_buf);
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530380 kfree(s_req);
381 return -ENOMEM;
382 }
383
384 /* assign the scatter request to this bus request */
385 bus_req->scat_req = s_req;
386 s_req->busrequest = bus_req;
387
Vasanthakumar Thiagarajan4a005c32011-07-16 20:29:15 +0530388 s_req->virt_scat = virt_scat;
389
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530390 /* add it to the scatter pool */
391 hif_scatter_req_add(ar_sdio->ar, s_req);
392 }
393
394 return 0;
395}
396
Kalle Valobdcd8172011-07-18 00:22:30 +0300397static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
398 u32 len, u32 request)
399{
400 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
401 u8 *tbuf = NULL;
402 int ret;
403 bool bounced = false;
404
405 if (request & HIF_BLOCK_BASIS)
406 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
407
408 if (buf_needs_bounce(buf)) {
409 if (!ar_sdio->dma_buffer)
410 return -ENOMEM;
Raja Manifdb28582011-11-21 12:26:51 +0530411 mutex_lock(&ar_sdio->dma_buffer_mutex);
Kalle Valobdcd8172011-07-18 00:22:30 +0300412 tbuf = ar_sdio->dma_buffer;
Raja Manidaa16bc2012-03-02 18:02:08 +0530413
414 if (request & HIF_WRITE)
415 memcpy(tbuf, buf, len);
416
Kalle Valobdcd8172011-07-18 00:22:30 +0300417 bounced = true;
418 } else
419 tbuf = buf;
420
Vasanthakumar Thiagarajanda220692011-07-16 20:29:16 +0530421 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
422 if ((request & HIF_READ) && bounced)
423 memcpy(buf, tbuf, len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300424
Raja Manifdb28582011-11-21 12:26:51 +0530425 if (bounced)
426 mutex_unlock(&ar_sdio->dma_buffer_mutex);
427
Kalle Valobdcd8172011-07-18 00:22:30 +0300428 return ret;
429}
430
431static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
432 struct bus_request *req)
433{
434 if (req->scat_req)
435 ath6kl_sdio_scat_rw(ar_sdio, req);
436 else {
437 void *context;
438 int status;
439
440 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
441 req->buffer, req->length,
442 req->request);
443 context = req->packet;
444 ath6kl_sdio_free_bus_req(ar_sdio, req);
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300445 ath6kl_hif_rw_comp_handler(context, status);
Kalle Valobdcd8172011-07-18 00:22:30 +0300446 }
447}
448
449static void ath6kl_sdio_write_async_work(struct work_struct *work)
450{
451 struct ath6kl_sdio *ar_sdio;
Kalle Valobdcd8172011-07-18 00:22:30 +0300452 struct bus_request *req, *tmp_req;
453
454 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
Kalle Valobdcd8172011-07-18 00:22:30 +0300455
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530456 spin_lock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300457 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
458 list_del(&req->list);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530459 spin_unlock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300460 __ath6kl_sdio_write_async(ar_sdio, req);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530461 spin_lock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300462 }
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530463 spin_unlock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300464}
465
466static void ath6kl_sdio_irq_handler(struct sdio_func *func)
467{
468 int status;
469 struct ath6kl_sdio *ar_sdio;
470
Kalle Valof7325b82011-09-27 14:30:58 +0300471 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
472
Kalle Valobdcd8172011-07-18 00:22:30 +0300473 ar_sdio = sdio_get_drvdata(func);
Raja Manid1f41592012-02-09 12:57:12 +0530474 atomic_set(&ar_sdio->irq_handling, 1);
Kalle Valobdcd8172011-07-18 00:22:30 +0300475 /*
476 * Release the host during interrups so we can pick it back up when
477 * we process commands.
478 */
479 sdio_release_host(ar_sdio->func);
480
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300481 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300482 sdio_claim_host(ar_sdio->func);
Raja Manid1f41592012-02-09 12:57:12 +0530483
484 atomic_set(&ar_sdio->irq_handling, 0);
485 wake_up(&ar_sdio->irq_wq);
486
Kalle Valobdcd8172011-07-18 00:22:30 +0300487 WARN_ON(status && status != -ECANCELED);
488}
489
Kalle Valob2e75692011-10-27 18:48:14 +0300490static int ath6kl_sdio_power_on(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +0300491{
Kalle Valob2e75692011-10-27 18:48:14 +0300492 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300493 struct sdio_func *func = ar_sdio->func;
494 int ret = 0;
495
496 if (!ar_sdio->is_disabled)
497 return 0;
498
Kalle Valo3ef987b2011-10-24 12:18:07 +0300499 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
500
Kalle Valobdcd8172011-07-18 00:22:30 +0300501 sdio_claim_host(func);
502
503 ret = sdio_enable_func(func);
504 if (ret) {
505 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
506 sdio_release_host(func);
507 return ret;
508 }
509
510 sdio_release_host(func);
511
512 /*
513 * Wait for hardware to initialise. It should take a lot less than
514 * 10 ms but let's be conservative here.
515 */
516 msleep(10);
517
518 ar_sdio->is_disabled = false;
519
520 return ret;
521}
522
Kalle Valob2e75692011-10-27 18:48:14 +0300523static int ath6kl_sdio_power_off(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +0300524{
Kalle Valob2e75692011-10-27 18:48:14 +0300525 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300526 int ret;
527
528 if (ar_sdio->is_disabled)
529 return 0;
530
Kalle Valo3ef987b2011-10-24 12:18:07 +0300531 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
532
Kalle Valobdcd8172011-07-18 00:22:30 +0300533 /* Disable the card */
534 sdio_claim_host(ar_sdio->func);
535 ret = sdio_disable_func(ar_sdio->func);
536 sdio_release_host(ar_sdio->func);
537
538 if (ret)
539 return ret;
540
541 ar_sdio->is_disabled = true;
542
543 return ret;
544}
545
546static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
547 u32 length, u32 request,
548 struct htc_packet *packet)
549{
550 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
551 struct bus_request *bus_req;
Kalle Valobdcd8172011-07-18 00:22:30 +0300552
553 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
554
555 if (!bus_req)
556 return -ENOMEM;
557
558 bus_req->address = address;
559 bus_req->buffer = buffer;
560 bus_req->length = length;
561 bus_req->request = request;
562 bus_req->packet = packet;
563
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530564 spin_lock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300565 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530566 spin_unlock_bh(&ar_sdio->wr_async_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300567 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
568
569 return 0;
570}
571
572static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
573{
574 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
575 int ret;
576
577 sdio_claim_host(ar_sdio->func);
578
579 /* Register the isr */
580 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
581 if (ret)
582 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
583
584 sdio_release_host(ar_sdio->func);
585}
586
Raja Manid1f41592012-02-09 12:57:12 +0530587static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
588{
589 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
590
591 return !atomic_read(&ar_sdio->irq_handling);
592}
593
Kalle Valobdcd8172011-07-18 00:22:30 +0300594static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
595{
596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
597 int ret;
598
599 sdio_claim_host(ar_sdio->func);
600
Raja Manid1f41592012-02-09 12:57:12 +0530601 if (atomic_read(&ar_sdio->irq_handling)) {
602 sdio_release_host(ar_sdio->func);
603
604 ret = wait_event_interruptible(ar_sdio->irq_wq,
605 ath6kl_sdio_is_on_irq(ar));
606 if (ret)
607 return;
608
609 sdio_claim_host(ar_sdio->func);
610 }
Kalle Valobdcd8172011-07-18 00:22:30 +0300611
612 ret = sdio_release_irq(ar_sdio->func);
613 if (ret)
614 ath6kl_err("Failed to release sdio irq: %d\n", ret);
615
616 sdio_release_host(ar_sdio->func);
617}
618
619static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
620{
621 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
622 struct hif_scatter_req *node = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300623
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530624 spin_lock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300625
626 if (!list_empty(&ar_sdio->scat_req)) {
627 node = list_first_entry(&ar_sdio->scat_req,
628 struct hif_scatter_req, list);
629 list_del(&node->list);
Chilam Ngb29072c2012-02-07 01:33:00 -0800630
631 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300632 }
633
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530634 spin_unlock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300635
636 return node;
637}
638
639static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
640 struct hif_scatter_req *s_req)
641{
642 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +0300643
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530644 spin_lock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300645
646 list_add_tail(&s_req->list, &ar_sdio->scat_req);
647
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530648 spin_unlock_bh(&ar_sdio->scat_lock);
Kalle Valobdcd8172011-07-18 00:22:30 +0300649
650}
651
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530652/* scatter gather read write request */
653static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
654 struct hif_scatter_req *scat_req)
655{
656 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530657 u32 request = scat_req->req;
658 int status = 0;
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530659
660 if (!scat_req->len)
661 return -EINVAL;
662
663 ath6kl_dbg(ATH6KL_DBG_SCATTER,
Kalle Valo96f1fad2012-03-07 20:03:57 +0200664 "hif-scatter: total len: %d scatter entries: %d\n",
665 scat_req->len, scat_req->scat_entries);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530666
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530667 if (request & HIF_SYNCHRONOUS)
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530668 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
Vasanthakumar Thiagarajan861dd052011-09-30 21:46:59 +0530669 else {
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530670 spin_lock_bh(&ar_sdio->wr_async_lock);
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530671 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530672 spin_unlock_bh(&ar_sdio->wr_async_lock);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530673 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
674 }
675
676 return status;
677}
678
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530679/* clean up scatter support */
680static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
681{
682 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
683 struct hif_scatter_req *s_req, *tmp_req;
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530684
685 /* empty the free list */
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530686 spin_lock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530687 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
688 list_del(&s_req->list);
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530689 spin_unlock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530690
Kalle Valo32a07e42011-10-30 21:15:57 +0200691 /*
692 * FIXME: should we also call completion handler with
693 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
694 * that the packet is properly freed?
695 */
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530696 if (s_req->busrequest)
697 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
698 kfree(s_req->virt_dma_buf);
699 kfree(s_req->sgentries);
700 kfree(s_req);
701
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530702 spin_lock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530703 }
Vasanthakumar Thiagarajan151bd302011-09-30 19:18:43 +0530704 spin_unlock_bh(&ar_sdio->scat_lock);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530705}
706
707/* setup of HIF scatter resources */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530708static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530709{
710 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530711 struct htc_target *target = ar->htc_target;
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530712 int ret;
713 bool virt_scat = false;
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530714
Kalle Valo32a07e42011-10-30 21:15:57 +0200715 if (ar_sdio->scatter_enabled)
716 return 0;
717
718 ar_sdio->scatter_enabled = true;
719
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530720 /* check if host supports scatter and it meets our requirements */
721 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530722 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530723 ar_sdio->func->card->host->max_segs,
724 MAX_SCATTER_ENTRIES_PER_REQ);
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530725 virt_scat = true;
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530726 }
727
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530728 if (!virt_scat) {
729 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
730 MAX_SCATTER_ENTRIES_PER_REQ,
731 MAX_SCATTER_REQUESTS, virt_scat);
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530732
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530733 if (!ret) {
Kalle Valo3ef987b2011-10-24 12:18:07 +0300734 ath6kl_dbg(ATH6KL_DBG_BOOT,
735 "hif-scatter enabled requests %d entries %d\n",
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530736 MAX_SCATTER_REQUESTS,
737 MAX_SCATTER_ENTRIES_PER_REQ);
738
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530739 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
740 target->max_xfer_szper_scatreq =
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530741 MAX_SCATTER_REQ_TRANSFER_SIZE;
742 } else {
743 ath6kl_sdio_cleanup_scatter(ar);
744 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
745 }
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530746 }
747
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530748 if (virt_scat || ret) {
749 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
750 ATH6KL_SCATTER_ENTRIES_PER_REQ,
751 ATH6KL_SCATTER_REQS, virt_scat);
752
753 if (ret) {
754 ath6kl_err("failed to alloc virtual scatter resources !\n");
755 ath6kl_sdio_cleanup_scatter(ar);
756 return ret;
757 }
758
Kalle Valo3ef987b2011-10-24 12:18:07 +0300759 ath6kl_dbg(ATH6KL_DBG_BOOT,
760 "virtual scatter enabled requests %d entries %d\n",
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530761 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
762
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +0530763 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
764 target->max_xfer_szper_scatreq =
Vasanthakumar Thiagarajancfeab102011-07-16 20:29:14 +0530765 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
766 }
767
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530768 return 0;
769}
770
Kalle Valoe28e8102011-11-01 08:44:36 +0200771static int ath6kl_sdio_config(struct ath6kl *ar)
772{
773 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
774 struct sdio_func *func = ar_sdio->func;
775 int ret;
776
777 sdio_claim_host(func);
778
779 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
780 MANUFACTURER_ID_AR6003_BASE) {
781 /* enable 4-bit ASYNC interrupt on AR6003 or later */
782 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
783 CCCR_SDIO_IRQ_MODE_REG,
784 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
785 if (ret) {
786 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
787 ret);
788 goto out;
789 }
790
791 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
792 }
793
794 /* give us some time to enable, in ms */
795 func->enable_timeout = 100;
796
797 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
798 if (ret) {
799 ath6kl_err("Set sdio block size %d failed: %d)\n",
800 HIF_MBOX_BLOCK_SIZE, ret);
Kalle Valoe28e8102011-11-01 08:44:36 +0200801 goto out;
802 }
803
804out:
805 sdio_release_host(func);
806
807 return ret;
808}
809
Raja Manie390af72012-01-30 17:13:09 +0530810static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
Kalle Valoabcb3442011-07-22 08:26:20 +0300811{
812 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
813 struct sdio_func *func = ar_sdio->func;
814 mmc_pm_flag_t flags;
815 int ret;
816
817 flags = sdio_get_host_pm_caps(func);
818
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200819 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
820
Raja Manie390af72012-01-30 17:13:09 +0530821 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
822 !(flags & MMC_PM_KEEP_POWER))
823 return -EINVAL;
Kalle Valoabcb3442011-07-22 08:26:20 +0300824
825 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
826 if (ret) {
Raja Manie390af72012-01-30 17:13:09 +0530827 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
Kalle Valoabcb3442011-07-22 08:26:20 +0300828 return ret;
829 }
830
Kalle Valo10509f92011-12-13 14:52:07 +0200831 /* sdio irq wakes up host */
Raja Manie390af72012-01-30 17:13:09 +0530832 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
833 if (ret)
834 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
835
836 return ret;
837}
838
839static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
840{
841 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
842 struct sdio_func *func = ar_sdio->func;
843 mmc_pm_flag_t flags;
Raja Mani1e9a9052012-03-06 15:03:59 +0530844 bool try_deepsleep = false;
Raja Manie390af72012-01-30 17:13:09 +0530845 int ret;
Kalle Valo10509f92011-12-13 14:52:07 +0200846
847 if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
Raja Manie390af72012-01-30 17:13:09 +0530848 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
849
850 ret = ath6kl_set_sdio_pm_caps(ar);
851 if (ret)
852 goto cut_pwr;
853
Kalle Valo10509f92011-12-13 14:52:07 +0200854 ret = ath6kl_cfg80211_suspend(ar,
855 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
856 NULL);
Kalle Valo10509f92011-12-13 14:52:07 +0200857 if (ret)
Raja Manie390af72012-01-30 17:13:09 +0530858 goto cut_pwr;
Kalle Valo10509f92011-12-13 14:52:07 +0200859
Raja Manie390af72012-01-30 17:13:09 +0530860 return 0;
Kalle Valo10509f92011-12-13 14:52:07 +0200861 }
862
Raja Manie390af72012-01-30 17:13:09 +0530863 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
864 (!ar->suspend_mode && wow)) {
865
866 ret = ath6kl_set_sdio_pm_caps(ar);
867 if (ret)
868 goto cut_pwr;
869
Raja Manid7c44e02011-11-07 22:52:46 +0200870 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
Raja Mani1e9a9052012-03-06 15:03:59 +0530871 if (ret && ret != -ENOTCONN)
872 ath6kl_err("wow suspend failed: %d\n", ret);
Raja Manid7c44e02011-11-07 22:52:46 +0200873
Kalle Valo7433a492012-03-12 13:23:14 +0200874 if (ret &&
875 (!ar->wow_suspend_mode ||
876 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
877 try_deepsleep = true;
Raja Mani1e9a9052012-03-06 15:03:59 +0530878 else if (ret &&
879 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
Kalle Valo7433a492012-03-12 13:23:14 +0200880 goto cut_pwr;
Raja Mani1e9a9052012-03-06 15:03:59 +0530881 if (!ret)
882 return 0;
Raja Manid7c44e02011-11-07 22:52:46 +0200883 }
884
Raja Manie390af72012-01-30 17:13:09 +0530885 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
Raja Mani1e9a9052012-03-06 15:03:59 +0530886 !ar->suspend_mode || try_deepsleep) {
Raja Manie390af72012-01-30 17:13:09 +0530887
888 flags = sdio_get_host_pm_caps(func);
889 if (!(flags & MMC_PM_KEEP_POWER))
890 goto cut_pwr;
891
892 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
893 if (ret)
894 goto cut_pwr;
895
Santosh Sajjancca4d5a2012-01-30 22:02:26 +0200896 /*
897 * Workaround to support Deep Sleep with MSM, set the host pm
898 * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
899 * the sdc2_clock and internally allows MSM to enter
900 * TCXO shutdown properly.
901 */
902 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
903 ret = sdio_set_host_pm_flags(func,
904 MMC_PM_WAKE_SDIO_IRQ);
905 if (ret)
906 goto cut_pwr;
907 }
908
Raja Manie390af72012-01-30 17:13:09 +0530909 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
910 NULL);
911 if (ret)
912 goto cut_pwr;
913
914 return 0;
915 }
916
917cut_pwr:
918 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
Kalle Valoabcb3442011-07-22 08:26:20 +0300919}
920
Chilam Ngaa6cffc2011-10-05 10:12:52 +0300921static int ath6kl_sdio_resume(struct ath6kl *ar)
922{
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200923 switch (ar->state) {
924 case ATH6KL_STATE_OFF:
925 case ATH6KL_STATE_CUTPOWER:
926 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
927 "sdio resume configuring sdio\n");
928
929 /* need to set sdio settings after power is cut from sdio */
930 ath6kl_sdio_config(ar);
931 break;
932
933 case ATH6KL_STATE_ON:
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200934 break;
935
936 case ATH6KL_STATE_DEEPSLEEP:
937 break;
Raja Manid7c44e02011-11-07 22:52:46 +0200938
939 case ATH6KL_STATE_WOW:
940 break;
Raja Mani390a8c82012-03-07 11:35:04 +0530941
Kalle Valo10509f92011-12-13 14:52:07 +0200942 case ATH6KL_STATE_SCHED_SCAN:
943 break;
Raja Mani390a8c82012-03-07 11:35:04 +0530944
945 case ATH6KL_STATE_SUSPENDING:
946 break;
947
948 case ATH6KL_STATE_RESUMING:
949 break;
Kalle Valob4b2a0b2011-11-01 08:44:44 +0200950 }
951
Kalle Valo52d81a62011-11-01 08:44:21 +0200952 ath6kl_cfg80211_resume(ar);
Chilam Ngaa6cffc2011-10-05 10:12:52 +0300953
954 return 0;
955}
956
Kalle Valoc7111492011-11-11 12:17:51 +0200957/* set the window address register (using 4-byte register access ). */
958static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
959{
960 int status;
961 u8 addr_val[4];
962 s32 i;
963
964 /*
965 * Write bytes 1,2,3 of the register to set the upper address bytes,
966 * the LSB is written last to initiate the access cycle
967 */
968
969 for (i = 1; i <= 3; i++) {
970 /*
971 * Fill the buffer with the address byte value we want to
972 * hit 4 times.
973 */
974 memset(addr_val, ((u8 *)&addr)[i], 4);
975
976 /*
977 * Hit each byte of the register address with a 4-byte
978 * write operation to the same address, this is a harmless
979 * operation.
980 */
981 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
982 4, HIF_WR_SYNC_BYTE_FIX);
983 if (status)
984 break;
985 }
986
987 if (status) {
988 ath6kl_err("%s: failed to write initial bytes of 0x%x "
989 "to window reg: 0x%X\n", __func__,
990 addr, reg_addr);
991 return status;
992 }
993
994 /*
995 * Write the address register again, this time write the whole
996 * 4-byte value. The effect here is that the LSB write causes the
997 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
998 * effect since we are writing the same values again
999 */
1000 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
1001 4, HIF_WR_SYNC_BYTE_INC);
1002
1003 if (status) {
1004 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
1005 __func__, addr, reg_addr);
1006 return status;
1007 }
1008
1009 return 0;
1010}
1011
1012static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
1013{
1014 int status;
1015
1016 /* set window register to start read cycle */
1017 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
1018 address);
1019
1020 if (status)
1021 return status;
1022
1023 /* read the data */
1024 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1025 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
1026 if (status) {
1027 ath6kl_err("%s: failed to read from window data addr\n",
Kalle Valo96f1fad2012-03-07 20:03:57 +02001028 __func__);
Kalle Valoc7111492011-11-11 12:17:51 +02001029 return status;
1030 }
1031
1032 return status;
1033}
1034
1035static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
1036 __le32 data)
1037{
1038 int status;
1039 u32 val = (__force u32) data;
1040
1041 /* set write data */
1042 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1043 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1044 if (status) {
1045 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1046 __func__, data);
1047 return status;
1048 }
1049
1050 /* set window register, which starts the write cycle */
1051 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1052 address);
1053}
1054
Kalle Valo66b693c2011-11-11 12:17:33 +02001055static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1056{
1057 u32 addr;
1058 unsigned long timeout;
1059 int ret;
1060
1061 ar->bmi.cmd_credits = 0;
1062
1063 /* Read the counter register to get the command credits */
1064 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1065
1066 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1067 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1068
1069 /*
1070 * Hit the credit counter with a 4-byte access, the first byte
1071 * read will hit the counter and cause a decrement, while the
1072 * remaining 3 bytes has no effect. The rationale behind this
1073 * is to make all HIF accesses 4-byte aligned.
1074 */
1075 ret = ath6kl_sdio_read_write_sync(ar, addr,
1076 (u8 *)&ar->bmi.cmd_credits, 4,
1077 HIF_RD_SYNC_BYTE_INC);
1078 if (ret) {
1079 ath6kl_err("Unable to decrement the command credit "
1080 "count register: %d\n", ret);
1081 return ret;
1082 }
1083
1084 /* The counter is only 8 bits.
1085 * Ignore anything in the upper 3 bytes
1086 */
1087 ar->bmi.cmd_credits &= 0xFF;
1088 }
1089
1090 if (!ar->bmi.cmd_credits) {
1091 ath6kl_err("bmi communication timeout\n");
1092 return -ETIMEDOUT;
1093 }
1094
1095 return 0;
1096}
1097
1098static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1099{
1100 unsigned long timeout;
1101 u32 rx_word = 0;
1102 int ret = 0;
1103
1104 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1105 while ((time_before(jiffies, timeout)) && !rx_word) {
1106 ret = ath6kl_sdio_read_write_sync(ar,
1107 RX_LOOKAHEAD_VALID_ADDRESS,
1108 (u8 *)&rx_word, sizeof(rx_word),
1109 HIF_RD_SYNC_BYTE_INC);
1110 if (ret) {
1111 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1112 return ret;
1113 }
1114
1115 /* all we really want is one bit */
1116 rx_word &= (1 << ENDPOINT1);
1117 }
1118
1119 if (!rx_word) {
1120 ath6kl_err("bmi_recv_buf FIFO empty\n");
1121 return -EINVAL;
1122 }
1123
1124 return ret;
1125}
1126
1127static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1128{
1129 int ret;
1130 u32 addr;
1131
1132 ret = ath6kl_sdio_bmi_credits(ar);
1133 if (ret)
1134 return ret;
1135
1136 addr = ar->mbox_info.htc_addr;
1137
1138 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1139 HIF_WR_SYNC_BYTE_INC);
1140 if (ret)
1141 ath6kl_err("unable to send the bmi data to the device\n");
1142
1143 return ret;
1144}
1145
1146static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1147{
1148 int ret;
1149 u32 addr;
1150
1151 /*
1152 * During normal bootup, small reads may be required.
1153 * Rather than issue an HIF Read and then wait as the Target
1154 * adds successive bytes to the FIFO, we wait here until
1155 * we know that response data is available.
1156 *
1157 * This allows us to cleanly timeout on an unexpected
1158 * Target failure rather than risk problems at the HIF level.
1159 * In particular, this avoids SDIO timeouts and possibly garbage
1160 * data on some host controllers. And on an interconnect
1161 * such as Compact Flash (as well as some SDIO masters) which
1162 * does not provide any indication on data timeout, it avoids
1163 * a potential hang or garbage response.
1164 *
1165 * Synchronization is more difficult for reads larger than the
1166 * size of the MBOX FIFO (128B), because the Target is unable
1167 * to push the 129th byte of data until AFTER the Host posts an
1168 * HIF Read and removes some FIFO data. So for large reads the
1169 * Host proceeds to post an HIF Read BEFORE all the data is
1170 * actually available to read. Fortunately, large BMI reads do
1171 * not occur in practice -- they're supported for debug/development.
1172 *
1173 * So Host/Target BMI synchronization is divided into these cases:
1174 * CASE 1: length < 4
1175 * Should not happen
1176 *
1177 * CASE 2: 4 <= length <= 128
1178 * Wait for first 4 bytes to be in FIFO
1179 * If CONSERVATIVE_BMI_READ is enabled, also wait for
1180 * a BMI command credit, which indicates that the ENTIRE
1181 * response is available in the the FIFO
1182 *
1183 * CASE 3: length > 128
1184 * Wait for the first 4 bytes to be in FIFO
1185 *
1186 * For most uses, a small timeout should be sufficient and we will
1187 * usually see a response quickly; but there may be some unusual
1188 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1189 * For now, we use an unbounded busy loop while waiting for
1190 * BMI_EXECUTE.
1191 *
1192 * If BMI_EXECUTE ever needs to support longer-latency execution,
1193 * especially in production, this code needs to be enhanced to sleep
1194 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
1195 * a function of Host processor speed.
1196 */
1197 if (len >= 4) { /* NB: Currently, always true */
1198 ret = ath6kl_bmi_get_rx_lkahd(ar);
1199 if (ret)
1200 return ret;
1201 }
1202
1203 addr = ar->mbox_info.htc_addr;
1204 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1205 HIF_RD_SYNC_BYTE_INC);
1206 if (ret) {
1207 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1208 ret);
1209 return ret;
1210 }
1211
1212 return 0;
1213}
1214
Kalle Valo32a07e42011-10-30 21:15:57 +02001215static void ath6kl_sdio_stop(struct ath6kl *ar)
1216{
1217 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1218 struct bus_request *req, *tmp_req;
1219 void *context;
1220
1221 /* FIXME: make sure that wq is not queued again */
1222
1223 cancel_work_sync(&ar_sdio->wr_async_work);
1224
1225 spin_lock_bh(&ar_sdio->wr_async_lock);
1226
1227 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1228 list_del(&req->list);
1229
1230 if (req->scat_req) {
1231 /* this is a scatter gather request */
1232 req->scat_req->status = -ECANCELED;
1233 req->scat_req->complete(ar_sdio->ar->htc_target,
1234 req->scat_req);
1235 } else {
1236 context = req->packet;
1237 ath6kl_sdio_free_bus_req(ar_sdio, req);
1238 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1239 }
1240 }
1241
1242 spin_unlock_bh(&ar_sdio->wr_async_lock);
1243
1244 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1245}
1246
Kalle Valobdcd8172011-07-18 00:22:30 +03001247static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1248 .read_write_sync = ath6kl_sdio_read_write_sync,
1249 .write_async = ath6kl_sdio_write_async,
1250 .irq_enable = ath6kl_sdio_irq_enable,
1251 .irq_disable = ath6kl_sdio_irq_disable,
1252 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1253 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1254 .enable_scatter = ath6kl_sdio_enable_scatter,
Vasanthakumar Thiagarajanf74a7362011-07-16 20:29:05 +05301255 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
Kalle Valobdcd8172011-07-18 00:22:30 +03001256 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
Kalle Valoabcb3442011-07-22 08:26:20 +03001257 .suspend = ath6kl_sdio_suspend,
Chilam Ngaa6cffc2011-10-05 10:12:52 +03001258 .resume = ath6kl_sdio_resume,
Kalle Valoc7111492011-11-11 12:17:51 +02001259 .diag_read32 = ath6kl_sdio_diag_read32,
1260 .diag_write32 = ath6kl_sdio_diag_write32,
Kalle Valo66b693c2011-11-11 12:17:33 +02001261 .bmi_read = ath6kl_sdio_bmi_read,
1262 .bmi_write = ath6kl_sdio_bmi_write,
Kalle Valob2e75692011-10-27 18:48:14 +03001263 .power_on = ath6kl_sdio_power_on,
1264 .power_off = ath6kl_sdio_power_off,
Kalle Valo32a07e42011-10-30 21:15:57 +02001265 .stop = ath6kl_sdio_stop,
Kalle Valobdcd8172011-07-18 00:22:30 +03001266};
1267
Kalle Valob4b2a0b2011-11-01 08:44:44 +02001268#ifdef CONFIG_PM_SLEEP
1269
1270/*
1271 * Empty handlers so that mmc subsystem doesn't remove us entirely during
1272 * suspend. We instead follow cfg80211 suspend/resume handlers.
1273 */
1274static int ath6kl_sdio_pm_suspend(struct device *device)
1275{
1276 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1277
1278 return 0;
1279}
1280
1281static int ath6kl_sdio_pm_resume(struct device *device)
1282{
1283 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1284
1285 return 0;
1286}
1287
1288static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1289 ath6kl_sdio_pm_resume);
1290
1291#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1292
1293#else
1294
1295#define ATH6KL_SDIO_PM_OPS NULL
1296
1297#endif /* CONFIG_PM_SLEEP */
1298
Kalle Valobdcd8172011-07-18 00:22:30 +03001299static int ath6kl_sdio_probe(struct sdio_func *func,
1300 const struct sdio_device_id *id)
1301{
1302 int ret;
1303 struct ath6kl_sdio *ar_sdio;
1304 struct ath6kl *ar;
1305 int count;
1306
Kalle Valo3ef987b2011-10-24 12:18:07 +03001307 ath6kl_dbg(ATH6KL_DBG_BOOT,
1308 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
Kalle Valof7325b82011-09-27 14:30:58 +03001309 func->num, func->vendor, func->device,
1310 func->max_blksize, func->cur_blksize);
Kalle Valobdcd8172011-07-18 00:22:30 +03001311
1312 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1313 if (!ar_sdio)
1314 return -ENOMEM;
1315
1316 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1317 if (!ar_sdio->dma_buffer) {
1318 ret = -ENOMEM;
1319 goto err_hif;
1320 }
1321
1322 ar_sdio->func = func;
1323 sdio_set_drvdata(func, ar_sdio);
1324
1325 ar_sdio->id = id;
1326 ar_sdio->is_disabled = true;
1327
1328 spin_lock_init(&ar_sdio->lock);
1329 spin_lock_init(&ar_sdio->scat_lock);
1330 spin_lock_init(&ar_sdio->wr_async_lock);
Raja Manifdb28582011-11-21 12:26:51 +05301331 mutex_init(&ar_sdio->dma_buffer_mutex);
Kalle Valobdcd8172011-07-18 00:22:30 +03001332
1333 INIT_LIST_HEAD(&ar_sdio->scat_req);
1334 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1335 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1336
1337 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1338
Raja Manid1f41592012-02-09 12:57:12 +05301339 init_waitqueue_head(&ar_sdio->irq_wq);
1340
Kalle Valobdcd8172011-07-18 00:22:30 +03001341 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1342 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1343
Kalle Valo45eaa782012-01-17 20:09:05 +02001344 ar = ath6kl_core_create(&ar_sdio->func->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03001345 if (!ar) {
1346 ath6kl_err("Failed to alloc ath6kl core\n");
1347 ret = -ENOMEM;
1348 goto err_dma;
1349 }
1350
1351 ar_sdio->ar = ar;
Kalle Valo77eab1e2011-11-11 12:18:22 +02001352 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
Kalle Valobdcd8172011-07-18 00:22:30 +03001353 ar->hif_priv = ar_sdio;
1354 ar->hif_ops = &ath6kl_sdio_ops;
Kalle Valo1f4c8942011-11-11 12:17:42 +02001355 ar->bmi.max_data_size = 256;
Kalle Valobdcd8172011-07-18 00:22:30 +03001356
1357 ath6kl_sdio_set_mbox_info(ar);
1358
Kalle Valoe28e8102011-11-01 08:44:36 +02001359 ret = ath6kl_sdio_config(ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03001360 if (ret) {
Kalle Valoe28e8102011-11-01 08:44:36 +02001361 ath6kl_err("Failed to config sdio: %d\n", ret);
1362 goto err_core_alloc;
Kalle Valobdcd8172011-07-18 00:22:30 +03001363 }
1364
Kalle Valoe76ac2bf2012-03-25 17:15:27 +03001365 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
Kalle Valobdcd8172011-07-18 00:22:30 +03001366 if (ret) {
1367 ath6kl_err("Failed to init ath6kl core\n");
Kalle Valoe28e8102011-11-01 08:44:36 +02001368 goto err_core_alloc;
Kalle Valobdcd8172011-07-18 00:22:30 +03001369 }
1370
1371 return ret;
1372
Vasanthakumar Thiagarajan8dafb702011-10-25 19:33:58 +05301373err_core_alloc:
Kalle Valo45eaa782012-01-17 20:09:05 +02001374 ath6kl_core_destroy(ar_sdio->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03001375err_dma:
1376 kfree(ar_sdio->dma_buffer);
1377err_hif:
1378 kfree(ar_sdio);
1379
1380 return ret;
1381}
1382
1383static void ath6kl_sdio_remove(struct sdio_func *func)
1384{
1385 struct ath6kl_sdio *ar_sdio;
1386
Kalle Valo3ef987b2011-10-24 12:18:07 +03001387 ath6kl_dbg(ATH6KL_DBG_BOOT,
1388 "sdio removed func %d vendor 0x%x device 0x%x\n",
Kalle Valof7325b82011-09-27 14:30:58 +03001389 func->num, func->vendor, func->device);
1390
Kalle Valobdcd8172011-07-18 00:22:30 +03001391 ar_sdio = sdio_get_drvdata(func);
1392
1393 ath6kl_stop_txrx(ar_sdio->ar);
1394 cancel_work_sync(&ar_sdio->wr_async_work);
1395
Vasanthakumar Thiagarajan6db8fa52011-10-25 19:34:16 +05301396 ath6kl_core_cleanup(ar_sdio->ar);
Vasanthakumar Thiagarajan0e7de662012-01-21 15:22:49 +05301397 ath6kl_core_destroy(ar_sdio->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03001398
Kalle Valobdcd8172011-07-18 00:22:30 +03001399 kfree(ar_sdio->dma_buffer);
1400 kfree(ar_sdio);
1401}
1402
1403static const struct sdio_device_id ath6kl_sdio_devices[] = {
1404 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
1405 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
Naveen Gangadharand93e2c22011-11-11 12:18:14 +02001406 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1407 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
Kalle Valobdcd8172011-07-18 00:22:30 +03001408 {},
1409};
1410
1411MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1412
1413static struct sdio_driver ath6kl_sdio_driver = {
Kalle Valo241b1282012-01-17 20:09:45 +02001414 .name = "ath6kl_sdio",
Kalle Valobdcd8172011-07-18 00:22:30 +03001415 .id_table = ath6kl_sdio_devices,
1416 .probe = ath6kl_sdio_probe,
1417 .remove = ath6kl_sdio_remove,
Kalle Valob4b2a0b2011-11-01 08:44:44 +02001418 .drv.pm = ATH6KL_SDIO_PM_OPS,
Kalle Valobdcd8172011-07-18 00:22:30 +03001419};
1420
1421static int __init ath6kl_sdio_init(void)
1422{
1423 int ret;
1424
1425 ret = sdio_register_driver(&ath6kl_sdio_driver);
1426 if (ret)
1427 ath6kl_err("sdio driver registration failed: %d\n", ret);
1428
1429 return ret;
1430}
1431
1432static void __exit ath6kl_sdio_exit(void)
1433{
1434 sdio_unregister_driver(&ath6kl_sdio_driver);
1435}
1436
1437module_init(ath6kl_sdio_init);
1438module_exit(ath6kl_sdio_exit);
1439
1440MODULE_AUTHOR("Atheros Communications, Inc.");
1441MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1442MODULE_LICENSE("Dual BSD/GPL");
1443
Kalle Valoc0038972011-12-16 20:53:31 +02001444MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1445MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1446MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
Kalle Valo0d0192ba2011-11-14 19:31:07 +02001447MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1448MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
Kalle Valoc0038972011-12-16 20:53:31 +02001449MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1450MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1451MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
Kalle Valo0d0192ba2011-11-14 19:31:07 +02001452MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1453MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
Kalle Valoc0038972011-12-16 20:53:31 +02001454MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
Kalle Valof0ea5d52011-11-14 19:31:15 +02001455MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1456MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
Kalle Valoc0038972011-12-16 20:53:31 +02001457MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
Kalle Valof0ea5d52011-11-14 19:31:15 +02001458MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1459MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);