blob: 9220a01915b1a29115142f40b38978e27ae54569 [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/mmc/card.h>
18#include <linux/mmc/mmc.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/sdio_func.h>
21#include <linux/mmc/sdio_ids.h>
22#include <linux/mmc/sdio.h>
23#include <linux/mmc/sd.h>
24#include "htc_hif.h"
25#include "hif-ops.h"
26#include "target.h"
27#include "debug.h"
28
29struct ath6kl_sdio {
30 struct sdio_func *func;
31
32 spinlock_t lock;
33
34 /* free list */
35 struct list_head bus_req_freeq;
36
37 /* available bus requests */
38 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
39
40 struct ath6kl *ar;
41 u8 *dma_buffer;
42
43 /* scatter request list head */
44 struct list_head scat_req;
45
46 spinlock_t scat_lock;
47 bool is_disabled;
48 atomic_t irq_handling;
49 const struct sdio_device_id *id;
50 struct work_struct wr_async_work;
51 struct list_head wr_asyncq;
52 spinlock_t wr_async_lock;
53};
54
55#define CMD53_ARG_READ 0
56#define CMD53_ARG_WRITE 1
57#define CMD53_ARG_BLOCK_BASIS 1
58#define CMD53_ARG_FIXED_ADDRESS 0
59#define CMD53_ARG_INCR_ADDRESS 1
60
61static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
62{
63 return ar->hif_priv;
64}
65
66/*
67 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
68 * Most host controllers assume the buffer is DMA'able and will
69 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
70 * check fails on stack memory.
71 */
72static inline bool buf_needs_bounce(u8 *buf)
73{
74 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
75}
76
77static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
78{
79 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
80
81 /* EP1 has an extended range */
82 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
83 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
84 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
85 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
86 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
87 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
88}
89
90static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
91 u8 mode, u8 opcode, u32 addr,
92 u16 blksz)
93{
94 *arg = (((rw & 1) << 31) |
95 ((func & 0x7) << 28) |
96 ((mode & 1) << 27) |
97 ((opcode & 1) << 26) |
98 ((addr & 0x1FFFF) << 9) |
99 (blksz & 0x1FF));
100}
101
102static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
103 unsigned int address,
104 unsigned char val)
105{
106 const u8 func = 0;
107
108 *arg = ((write & 1) << 31) |
109 ((func & 0x7) << 28) |
110 ((raw & 1) << 27) |
111 (1 << 26) |
112 ((address & 0x1FFFF) << 9) |
113 (1 << 8) |
114 (val & 0xFF);
115}
116
117static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
118 unsigned int address,
119 unsigned char byte)
120{
121 struct mmc_command io_cmd;
122
123 memset(&io_cmd, 0, sizeof(io_cmd));
124 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
125 io_cmd.opcode = SD_IO_RW_DIRECT;
126 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
127
128 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
129}
130
131static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
132{
133 struct bus_request *bus_req;
134 unsigned long flag;
135
136 spin_lock_irqsave(&ar_sdio->lock, flag);
137
138 if (list_empty(&ar_sdio->bus_req_freeq)) {
139 spin_unlock_irqrestore(&ar_sdio->lock, flag);
140 return NULL;
141 }
142
143 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
144 struct bus_request, list);
145 list_del(&bus_req->list);
146
147 spin_unlock_irqrestore(&ar_sdio->lock, flag);
148 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
149
150 return bus_req;
151}
152
153static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
154 struct bus_request *bus_req)
155{
156 unsigned long flag;
157
158 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
159
160 spin_lock_irqsave(&ar_sdio->lock, flag);
161 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
162 spin_unlock_irqrestore(&ar_sdio->lock, flag);
163}
164
165static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
Kalle Valobdcd8172011-07-18 00:22:30 +0300166 struct mmc_data *data)
167{
168 struct scatterlist *sg;
169 int i;
170
171 data->blksz = HIF_MBOX_BLOCK_SIZE;
172 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
173
174 ath6kl_dbg(ATH6KL_DBG_SCATTER,
175 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
176 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
177 data->blksz, data->blocks, scat_req->len,
178 scat_req->scat_entries);
179
180 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
181 MMC_DATA_READ;
182
183 /* fill SG entries */
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530184 sg = scat_req->sgentries;
Kalle Valobdcd8172011-07-18 00:22:30 +0300185 sg_init_table(sg, scat_req->scat_entries);
186
187 /* assemble SG list */
188 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
189 if ((unsigned long)scat_req->scat_list[i].buf & 0x3)
190 /*
191 * Some scatter engines can handle unaligned
192 * buffers, print this as informational only.
193 */
194 ath6kl_dbg(ATH6KL_DBG_SCATTER,
195 "(%s) scatter buffer is unaligned 0x%p\n",
196 scat_req->req & HIF_WRITE ? "WR" : "RD",
197 scat_req->scat_list[i].buf);
198
199 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
200 i, scat_req->scat_list[i].buf,
201 scat_req->scat_list[i].len);
202
203 sg_set_buf(sg, scat_req->scat_list[i].buf,
204 scat_req->scat_list[i].len);
205 }
206
207 /* set scatter-gather table for request */
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530208 data->sg = scat_req->sgentries;
Kalle Valobdcd8172011-07-18 00:22:30 +0300209 data->sg_len = scat_req->scat_entries;
210}
211
212static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
213 struct bus_request *req)
214{
215 struct mmc_request mmc_req;
216 struct mmc_command cmd;
217 struct mmc_data data;
218 struct hif_scatter_req *scat_req;
219 u8 opcode, rw;
220 int status;
221
222 scat_req = req->scat_req;
223
224 memset(&mmc_req, 0, sizeof(struct mmc_request));
225 memset(&cmd, 0, sizeof(struct mmc_command));
226 memset(&data, 0, sizeof(struct mmc_data));
227
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530228 ath6kl_sdio_setup_scat_data(scat_req, &data);
Kalle Valobdcd8172011-07-18 00:22:30 +0300229
230 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
231 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
232
233 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
234
235 /* Fixup the address so that the last byte will fall on MBOX EOM */
236 if (scat_req->req & HIF_WRITE) {
237 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
238 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
239 else
240 /* Uses extended address range */
241 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
242 }
243
244 /* set command argument */
245 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
246 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
247 data.blocks);
248
249 cmd.opcode = SD_IO_RW_EXTENDED;
250 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
251
252 mmc_req.cmd = &cmd;
253 mmc_req.data = &data;
254
255 mmc_set_data_timeout(&data, ar_sdio->func->card);
256 /* synchronous call to process request */
257 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
258
259 status = cmd.error ? cmd.error : data.error;
260 scat_req->status = status;
261
262 if (scat_req->status)
263 ath6kl_err("Scatter write request failed:%d\n",
264 scat_req->status);
265
266 if (scat_req->req & HIF_ASYNCHRONOUS)
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530267 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
Kalle Valobdcd8172011-07-18 00:22:30 +0300268
269 return status;
270}
271
Vasanthakumar Thiagarajan3df505a2011-07-16 20:29:10 +0530272static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
273 int n_scat_entry, int n_scat_req,
274 bool virt_scat)
275{
276 struct hif_scatter_req *s_req;
277 struct bus_request *bus_req;
278 int i, scat_req_sz, scat_list_sz, sg_sz = 0;
279
280 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
281 scat_req_sz = sizeof(*s_req) + scat_list_sz;
282
283 if (!virt_scat)
284 sg_sz = sizeof(struct scatterlist) * n_scat_entry;
285
286 for (i = 0; i < n_scat_req; i++) {
287 /* allocate the scatter request */
288 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
289 if (!s_req)
290 return -ENOMEM;
291
292 if (sg_sz) {
293 /* allocate sglist */
294 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
295
296 if (!s_req->sgentries) {
297 kfree(s_req);
298 return -ENOMEM;
299 }
300 }
301
302 /* allocate a bus request for this scatter request */
303 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
304 if (!bus_req) {
305 kfree(s_req->sgentries);
306 kfree(s_req);
307 return -ENOMEM;
308 }
309
310 /* assign the scatter request to this bus request */
311 bus_req->scat_req = s_req;
312 s_req->busrequest = bus_req;
313
314 /* add it to the scatter pool */
315 hif_scatter_req_add(ar_sdio->ar, s_req);
316 }
317
318 return 0;
319}
320
Kalle Valobdcd8172011-07-18 00:22:30 +0300321static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
322 u32 len, u32 request)
323{
324 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
325 u8 *tbuf = NULL;
326 int ret;
327 bool bounced = false;
328
329 if (request & HIF_BLOCK_BASIS)
330 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
331
332 if (buf_needs_bounce(buf)) {
333 if (!ar_sdio->dma_buffer)
334 return -ENOMEM;
335 tbuf = ar_sdio->dma_buffer;
336 memcpy(tbuf, buf, len);
337 bounced = true;
338 } else
339 tbuf = buf;
340
341 sdio_claim_host(ar_sdio->func);
342 if (request & HIF_WRITE) {
343 if (addr >= HIF_MBOX_BASE_ADDR &&
344 addr <= HIF_MBOX_END_ADDR)
345 addr += (HIF_MBOX_WIDTH - len);
346
347 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
348 addr += HIF_MBOX0_EXT_WIDTH - len;
349
350 if (request & HIF_FIXED_ADDRESS)
351 ret = sdio_writesb(ar_sdio->func, addr, tbuf, len);
352 else
353 ret = sdio_memcpy_toio(ar_sdio->func, addr, tbuf, len);
354 } else {
355 if (request & HIF_FIXED_ADDRESS)
356 ret = sdio_readsb(ar_sdio->func, tbuf, addr, len);
357 else
358 ret = sdio_memcpy_fromio(ar_sdio->func, tbuf,
359 addr, len);
360 if (bounced)
361 memcpy(buf, tbuf, len);
362 }
363 sdio_release_host(ar_sdio->func);
364
365 return ret;
366}
367
368static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
369 struct bus_request *req)
370{
371 if (req->scat_req)
372 ath6kl_sdio_scat_rw(ar_sdio, req);
373 else {
374 void *context;
375 int status;
376
377 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
378 req->buffer, req->length,
379 req->request);
380 context = req->packet;
381 ath6kl_sdio_free_bus_req(ar_sdio, req);
382 ath6kldev_rw_comp_handler(context, status);
383 }
384}
385
386static void ath6kl_sdio_write_async_work(struct work_struct *work)
387{
388 struct ath6kl_sdio *ar_sdio;
389 unsigned long flags;
390 struct bus_request *req, *tmp_req;
391
392 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
393 sdio_claim_host(ar_sdio->func);
394
395 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
396 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
397 list_del(&req->list);
398 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
399 __ath6kl_sdio_write_async(ar_sdio, req);
400 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
401 }
402 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
403
404 sdio_release_host(ar_sdio->func);
405}
406
407static void ath6kl_sdio_irq_handler(struct sdio_func *func)
408{
409 int status;
410 struct ath6kl_sdio *ar_sdio;
411
412 ar_sdio = sdio_get_drvdata(func);
413 atomic_set(&ar_sdio->irq_handling, 1);
414
415 /*
416 * Release the host during interrups so we can pick it back up when
417 * we process commands.
418 */
419 sdio_release_host(ar_sdio->func);
420
421 status = ath6kldev_intr_bh_handler(ar_sdio->ar);
422 sdio_claim_host(ar_sdio->func);
423 atomic_set(&ar_sdio->irq_handling, 0);
424 WARN_ON(status && status != -ECANCELED);
425}
426
427static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
428{
429 struct sdio_func *func = ar_sdio->func;
430 int ret = 0;
431
432 if (!ar_sdio->is_disabled)
433 return 0;
434
435 sdio_claim_host(func);
436
437 ret = sdio_enable_func(func);
438 if (ret) {
439 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
440 sdio_release_host(func);
441 return ret;
442 }
443
444 sdio_release_host(func);
445
446 /*
447 * Wait for hardware to initialise. It should take a lot less than
448 * 10 ms but let's be conservative here.
449 */
450 msleep(10);
451
452 ar_sdio->is_disabled = false;
453
454 return ret;
455}
456
457static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
458{
459 int ret;
460
461 if (ar_sdio->is_disabled)
462 return 0;
463
464 /* Disable the card */
465 sdio_claim_host(ar_sdio->func);
466 ret = sdio_disable_func(ar_sdio->func);
467 sdio_release_host(ar_sdio->func);
468
469 if (ret)
470 return ret;
471
472 ar_sdio->is_disabled = true;
473
474 return ret;
475}
476
477static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
478 u32 length, u32 request,
479 struct htc_packet *packet)
480{
481 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
482 struct bus_request *bus_req;
483 unsigned long flags;
484
485 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
486
487 if (!bus_req)
488 return -ENOMEM;
489
490 bus_req->address = address;
491 bus_req->buffer = buffer;
492 bus_req->length = length;
493 bus_req->request = request;
494 bus_req->packet = packet;
495
496 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
497 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
498 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
499 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
500
501 return 0;
502}
503
504static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
505{
506 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
507 int ret;
508
509 sdio_claim_host(ar_sdio->func);
510
511 /* Register the isr */
512 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
513 if (ret)
514 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
515
516 sdio_release_host(ar_sdio->func);
517}
518
519static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
520{
521 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
522 int ret;
523
524 sdio_claim_host(ar_sdio->func);
525
526 /* Mask our function IRQ */
527 while (atomic_read(&ar_sdio->irq_handling)) {
528 sdio_release_host(ar_sdio->func);
529 schedule_timeout(HZ / 10);
530 sdio_claim_host(ar_sdio->func);
531 }
532
533 ret = sdio_release_irq(ar_sdio->func);
534 if (ret)
535 ath6kl_err("Failed to release sdio irq: %d\n", ret);
536
537 sdio_release_host(ar_sdio->func);
538}
539
540static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
541{
542 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
543 struct hif_scatter_req *node = NULL;
544 unsigned long flag;
545
546 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
547
548 if (!list_empty(&ar_sdio->scat_req)) {
549 node = list_first_entry(&ar_sdio->scat_req,
550 struct hif_scatter_req, list);
551 list_del(&node->list);
552 }
553
554 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
555
556 return node;
557}
558
559static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
560 struct hif_scatter_req *s_req)
561{
562 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
563 unsigned long flag;
564
565 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
566
567 list_add_tail(&s_req->list, &ar_sdio->scat_req);
568
569 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
570
571}
572
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530573/* scatter gather read write request */
574static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
575 struct hif_scatter_req *scat_req)
576{
577 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530578 u32 request = scat_req->req;
579 int status = 0;
580 unsigned long flags;
581
582 if (!scat_req->len)
583 return -EINVAL;
584
585 ath6kl_dbg(ATH6KL_DBG_SCATTER,
586 "hif-scatter: total len: %d scatter entries: %d\n",
587 scat_req->len, scat_req->scat_entries);
588
589 if (request & HIF_SYNCHRONOUS) {
590 sdio_claim_host(ar_sdio->func);
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530591 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530592 sdio_release_host(ar_sdio->func);
593 } else {
594 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
Vasanthakumar Thiagarajand4df7892011-07-16 20:29:07 +0530595 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
Vasanthakumar Thiagarajanc630d182011-07-16 20:29:06 +0530596 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
597 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
598 }
599
600 return status;
601}
602
Vasanthakumar Thiagarajan18a0f932011-07-16 20:29:13 +0530603/* clean up scatter support */
604static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
605{
606 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
607 struct hif_scatter_req *s_req, *tmp_req;
608 unsigned long flag;
609
610 /* empty the free list */
611 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
612 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
613 list_del(&s_req->list);
614 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
615
616 if (s_req->busrequest)
617 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
618 kfree(s_req->virt_dma_buf);
619 kfree(s_req->sgentries);
620 kfree(s_req);
621
622 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
623 }
624 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
625}
626
627/* setup of HIF scatter resources */
628static int ath6kl_sdio_enable_scatter(struct ath6kl *ar,
629 struct hif_dev_scat_sup_info *pinfo)
630{
631 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
632 int ret = 0;
633
634 /* check if host supports scatter and it meets our requirements */
635 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
636 ath6kl_err("hif-scatter: host only supports scatter of : %d entries, need: %d\n",
637 ar_sdio->func->card->host->max_segs,
638 MAX_SCATTER_ENTRIES_PER_REQ);
639 return -EINVAL;
640 }
641
642 ath6kl_dbg(ATH6KL_DBG_ANY,
643 "hif-scatter enabled: max scatter req : %d entries: %d\n",
644 MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ);
645
646 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
647 MAX_SCATTER_ENTRIES_PER_REQ,
648 MAX_SCATTER_REQUESTS, 0);
649 if (ret) {
650 ath6kl_err("hif-scatter: failed to alloc scatter resources !\n");
651 ath6kl_sdio_cleanup_scatter(ar);
652 return ret;
653 }
654
655 pinfo->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
656 pinfo->max_xfer_szper_scatreq = MAX_SCATTER_REQ_TRANSFER_SIZE;
657
658 return 0;
659}
660
Kalle Valobdcd8172011-07-18 00:22:30 +0300661static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
662 .read_write_sync = ath6kl_sdio_read_write_sync,
663 .write_async = ath6kl_sdio_write_async,
664 .irq_enable = ath6kl_sdio_irq_enable,
665 .irq_disable = ath6kl_sdio_irq_disable,
666 .scatter_req_get = ath6kl_sdio_scatter_req_get,
667 .scatter_req_add = ath6kl_sdio_scatter_req_add,
668 .enable_scatter = ath6kl_sdio_enable_scatter,
Vasanthakumar Thiagarajanf74a7362011-07-16 20:29:05 +0530669 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
Kalle Valobdcd8172011-07-18 00:22:30 +0300670 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
671};
672
673static int ath6kl_sdio_probe(struct sdio_func *func,
674 const struct sdio_device_id *id)
675{
676 int ret;
677 struct ath6kl_sdio *ar_sdio;
678 struct ath6kl *ar;
679 int count;
680
681 ath6kl_dbg(ATH6KL_DBG_TRC,
682 "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
683 __func__, func->num, func->vendor,
684 func->device, func->max_blksize, func->cur_blksize);
685
686 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
687 if (!ar_sdio)
688 return -ENOMEM;
689
690 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
691 if (!ar_sdio->dma_buffer) {
692 ret = -ENOMEM;
693 goto err_hif;
694 }
695
696 ar_sdio->func = func;
697 sdio_set_drvdata(func, ar_sdio);
698
699 ar_sdio->id = id;
700 ar_sdio->is_disabled = true;
701
702 spin_lock_init(&ar_sdio->lock);
703 spin_lock_init(&ar_sdio->scat_lock);
704 spin_lock_init(&ar_sdio->wr_async_lock);
705
706 INIT_LIST_HEAD(&ar_sdio->scat_req);
707 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
708 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
709
710 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
711
712 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
713 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
714
715 ar = ath6kl_core_alloc(&ar_sdio->func->dev);
716 if (!ar) {
717 ath6kl_err("Failed to alloc ath6kl core\n");
718 ret = -ENOMEM;
719 goto err_dma;
720 }
721
722 ar_sdio->ar = ar;
723 ar->hif_priv = ar_sdio;
724 ar->hif_ops = &ath6kl_sdio_ops;
725
726 ath6kl_sdio_set_mbox_info(ar);
727
728 sdio_claim_host(func);
729
730 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
731 MANUFACTURER_ID_AR6003_BASE) {
732 /* enable 4-bit ASYNC interrupt on AR6003 or later */
733 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
734 CCCR_SDIO_IRQ_MODE_REG,
735 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
736 if (ret) {
737 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
738 ret);
739 sdio_release_host(func);
740 goto err_dma;
741 }
742
743 ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
744 }
745
746 /* give us some time to enable, in ms */
747 func->enable_timeout = 100;
748
749 sdio_release_host(func);
750
751 ret = ath6kl_sdio_power_on(ar_sdio);
752 if (ret)
753 goto err_dma;
754
755 sdio_claim_host(func);
756
757 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
758 if (ret) {
759 ath6kl_err("Set sdio block size %d failed: %d)\n",
760 HIF_MBOX_BLOCK_SIZE, ret);
761 sdio_release_host(func);
762 goto err_off;
763 }
764
765 sdio_release_host(func);
766
767 ret = ath6kl_core_init(ar);
768 if (ret) {
769 ath6kl_err("Failed to init ath6kl core\n");
770 goto err_off;
771 }
772
773 return ret;
774
775err_off:
776 ath6kl_sdio_power_off(ar_sdio);
777err_dma:
778 kfree(ar_sdio->dma_buffer);
779err_hif:
780 kfree(ar_sdio);
781
782 return ret;
783}
784
785static void ath6kl_sdio_remove(struct sdio_func *func)
786{
787 struct ath6kl_sdio *ar_sdio;
788
789 ar_sdio = sdio_get_drvdata(func);
790
791 ath6kl_stop_txrx(ar_sdio->ar);
792 cancel_work_sync(&ar_sdio->wr_async_work);
793
794 ath6kl_unavail_ev(ar_sdio->ar);
795
796 ath6kl_sdio_power_off(ar_sdio);
797
798 kfree(ar_sdio->dma_buffer);
799 kfree(ar_sdio);
800}
801
802static const struct sdio_device_id ath6kl_sdio_devices[] = {
803 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
804 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
805 {},
806};
807
808MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
809
810static struct sdio_driver ath6kl_sdio_driver = {
811 .name = "ath6kl_sdio",
812 .id_table = ath6kl_sdio_devices,
813 .probe = ath6kl_sdio_probe,
814 .remove = ath6kl_sdio_remove,
815};
816
817static int __init ath6kl_sdio_init(void)
818{
819 int ret;
820
821 ret = sdio_register_driver(&ath6kl_sdio_driver);
822 if (ret)
823 ath6kl_err("sdio driver registration failed: %d\n", ret);
824
825 return ret;
826}
827
828static void __exit ath6kl_sdio_exit(void)
829{
830 sdio_unregister_driver(&ath6kl_sdio_driver);
831}
832
833module_init(ath6kl_sdio_init);
834module_exit(ath6kl_sdio_exit);
835
836MODULE_AUTHOR("Atheros Communications, Inc.");
837MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
838MODULE_LICENSE("Dual BSD/GPL");
839
840MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
841MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
842MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
843MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
844MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
845MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
846MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
847MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
848MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
849MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);