blob: 97b7b012983e4d0dd9d6f4bc777c3b9a843adba8 [file] [log] [blame]
Micky Chingfa590c22013-11-12 17:16:08 +08001/* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * Wei WANG (wei_wang@realsil.com.cn)
20 * Micky Ching (micky_ching@realsil.com.cn)
21 */
22
23#include <linux/blkdev.h>
24#include <linux/kthread.h>
25#include <linux/sched.h>
26
27#include "rtsx.h"
28#include "rtsx_scsi.h"
29#include "rtsx_transport.h"
30#include "rtsx_chip.h"
31#include "rtsx_card.h"
32#include "debug.h"
33
34/***********************************************************************
35 * Scatter-gather transfer buffer access routines
36 ***********************************************************************/
37
38/* Copy a buffer of length buflen to/from the srb's transfer buffer.
39 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
40 * points to a list of s-g entries and we ignore srb->request_bufflen.
41 * For non-scatter-gather transfers, srb->request_buffer points to the
42 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
43 * Update the *index and *offset variables so that the next copy will
44 * pick up from where this one left off. */
45
46unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
47 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
48 unsigned int *offset, enum xfer_buf_dir dir)
49{
50 unsigned int cnt;
51
52 /* If not using scatter-gather, just transfer the data directly.
53 * Make certain it will fit in the available buffer space. */
54 if (scsi_sg_count(srb) == 0) {
55 if (*offset >= scsi_bufflen(srb))
56 return 0;
57 cnt = min(buflen, scsi_bufflen(srb) - *offset);
58 if (dir == TO_XFER_BUF)
59 memcpy((unsigned char *) scsi_sglist(srb) + *offset,
60 buffer, cnt);
61 else
62 memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
63 *offset, cnt);
64 *offset += cnt;
65
66 /* Using scatter-gather. We have to go through the list one entry
67 * at a time. Each s-g entry contains some number of pages, and
68 * each page has to be kmap()'ed separately. If the page is already
69 * in kernel-addressable memory then kmap() will return its address.
70 * If the page is not directly accessible -- such as a user buffer
71 * located in high memory -- then kmap() will map it to a temporary
72 * position in the kernel's virtual address space. */
73 } else {
74 struct scatterlist *sg =
75 (struct scatterlist *) scsi_sglist(srb)
76 + *index;
77
78 /* This loop handles a single s-g list entry, which may
79 * include multiple pages. Find the initial page structure
80 * and the starting offset within the page, and update
81 * the *offset and *index values for the next loop. */
82 cnt = 0;
83 while (cnt < buflen && *index < scsi_sg_count(srb)) {
84 struct page *page = sg_page(sg) +
85 ((sg->offset + *offset) >> PAGE_SHIFT);
86 unsigned int poff =
87 (sg->offset + *offset) & (PAGE_SIZE-1);
88 unsigned int sglen = sg->length - *offset;
89
90 if (sglen > buflen - cnt) {
91
92 /* Transfer ends within this s-g entry */
93 sglen = buflen - cnt;
94 *offset += sglen;
95 } else {
96
97 /* Transfer continues to next s-g entry */
98 *offset = 0;
99 ++*index;
100 ++sg;
101 }
102
103 /* Transfer the data for all the pages in this
104 * s-g entry. For each page: call kmap(), do the
105 * transfer, and call kunmap() immediately after. */
106 while (sglen > 0) {
107 unsigned int plen = min(sglen, (unsigned int)
108 PAGE_SIZE - poff);
109 unsigned char *ptr = kmap(page);
110
111 if (dir == TO_XFER_BUF)
112 memcpy(ptr + poff, buffer + cnt, plen);
113 else
114 memcpy(buffer + cnt, ptr + poff, plen);
115 kunmap(page);
116
117 /* Start at the beginning of the next page */
118 poff = 0;
119 ++page;
120 cnt += plen;
121 sglen -= plen;
122 }
123 }
124 }
125
126 /* Return the amount actually transferred */
127 return cnt;
128}
129
130/* Store the contents of buffer into srb's transfer buffer and set the
131* SCSI residue. */
132void rtsx_stor_set_xfer_buf(unsigned char *buffer,
133 unsigned int buflen, struct scsi_cmnd *srb)
134{
135 unsigned int index = 0, offset = 0;
136
137 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 TO_XFER_BUF);
139 if (buflen < scsi_bufflen(srb))
140 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
141}
142
143void rtsx_stor_get_xfer_buf(unsigned char *buffer,
144 unsigned int buflen, struct scsi_cmnd *srb)
145{
146 unsigned int index = 0, offset = 0;
147
148 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
149 FROM_XFER_BUF);
150 if (buflen < scsi_bufflen(srb))
151 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
152}
153
154
155/***********************************************************************
156 * Transport routines
157 ***********************************************************************/
158
159/* Invoke the transport and basic error-handling/recovery methods
160 *
161 * This is used to send the message to the device and receive the response.
162 */
163void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
164{
165 int result;
166
167 result = rtsx_scsi_handler(srb, chip);
168
169 /* if the command gets aborted by the higher layers, we need to
170 * short-circuit all other processing
171 */
172 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
173 RTSX_DEBUGP("-- command was aborted\n");
174 srb->result = DID_ABORT << 16;
175 goto Handle_Errors;
176 }
177
178 /* if there is a transport error, reset and don't auto-sense */
179 if (result == TRANSPORT_ERROR) {
180 RTSX_DEBUGP("-- transport indicates error, resetting\n");
181 srb->result = DID_ERROR << 16;
182 goto Handle_Errors;
183 }
184
185 srb->result = SAM_STAT_GOOD;
186
187 /*
188 * If we have a failure, we're going to do a REQUEST_SENSE
189 * automatically. Note that we differentiate between a command
190 * "failure" and an "error" in the transport mechanism.
191 */
192 if (result == TRANSPORT_FAILED) {
193 /* set the result so the higher layers expect this data */
194 srb->result = SAM_STAT_CHECK_CONDITION;
195 memcpy(srb->sense_buffer,
196 (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
197 sizeof(struct sense_data_t));
198 }
199
200 return;
201
202 /* Error and abort processing: try to resynchronize with the device
203 * by issuing a port reset. If that fails, try a class-specific
204 * device reset. */
205Handle_Errors:
206 return;
207}
208
209void rtsx_add_cmd(struct rtsx_chip *chip,
210 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
211{
212 u32 *cb = (u32 *)(chip->host_cmds_ptr);
213 u32 val = 0;
214
215 val |= (u32)(cmd_type & 0x03) << 30;
216 val |= (u32)(reg_addr & 0x3FFF) << 16;
217 val |= (u32)mask << 8;
218 val |= (u32)data;
219
220 spin_lock_irq(&chip->rtsx->reg_lock);
221 if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
222 cb[(chip->ci)++] = cpu_to_le32(val);
223
224 spin_unlock_irq(&chip->rtsx->reg_lock);
225}
226
227void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
228{
229 u32 val = 1 << 31;
230
231 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
232
233 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
234 /* Hardware Auto Response */
235 val |= 0x40000000;
236 rtsx_writel(chip, RTSX_HCBCTLR, val);
237}
238
239int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
240{
241 struct rtsx_dev *rtsx = chip->rtsx;
242 struct completion trans_done;
243 u32 val = 1 << 31;
244 long timeleft;
245 int err = 0;
246
247 if (card == SD_CARD)
248 rtsx->check_card_cd = SD_EXIST;
249 else if (card == MS_CARD)
250 rtsx->check_card_cd = MS_EXIST;
251 else if (card == XD_CARD)
252 rtsx->check_card_cd = XD_EXIST;
253 else
254 rtsx->check_card_cd = 0;
255
256 spin_lock_irq(&rtsx->reg_lock);
257
258 /* set up data structures for the wakeup system */
259 rtsx->done = &trans_done;
260 rtsx->trans_result = TRANS_NOT_READY;
261 init_completion(&trans_done);
262 rtsx->trans_state = STATE_TRANS_CMD;
263
264 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
265
266 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
267 /* Hardware Auto Response */
268 val |= 0x40000000;
269 rtsx_writel(chip, RTSX_HCBCTLR, val);
270
271 spin_unlock_irq(&rtsx->reg_lock);
272
273 /* Wait for TRANS_OK_INT */
274 timeleft = wait_for_completion_interruptible_timeout(
275 &trans_done, timeout * HZ / 1000);
276 if (timeleft <= 0) {
277 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
278 err = -ETIMEDOUT;
279 TRACE_GOTO(chip, finish_send_cmd);
280 }
281
282 spin_lock_irq(&rtsx->reg_lock);
283 if (rtsx->trans_result == TRANS_RESULT_FAIL)
284 err = -EIO;
285 else if (rtsx->trans_result == TRANS_RESULT_OK)
286 err = 0;
287
288 spin_unlock_irq(&rtsx->reg_lock);
289
290finish_send_cmd:
291 rtsx->done = NULL;
292 rtsx->trans_state = STATE_TRANS_NONE;
293
294 if (err < 0)
295 rtsx_stop_cmd(chip, card);
296
297 return err;
298}
299
300static inline void rtsx_add_sg_tbl(
301 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
302{
303 u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
304 u64 val = 0;
305 u32 temp_len = 0;
306 u8 temp_opt = 0;
307
308 do {
309 if (len > 0x80000) {
310 temp_len = 0x80000;
311 temp_opt = option & (~SG_END);
312 } else {
313 temp_len = len;
314 temp_opt = option;
315 }
316 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
317
318 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
319 sgb[(chip->sgi)++] = cpu_to_le64(val);
320
321 len -= temp_len;
322 addr += temp_len;
323 } while (len);
324}
325
326static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
327 struct scatterlist *sg, int num_sg, unsigned int *index,
328 unsigned int *offset, int size,
329 enum dma_data_direction dma_dir, int timeout)
330{
331 struct rtsx_dev *rtsx = chip->rtsx;
332 struct completion trans_done;
333 u8 dir;
334 int sg_cnt, i, resid;
335 int err = 0;
336 long timeleft;
337 struct scatterlist *sg_ptr;
338 u32 val = TRIG_DMA;
339
340 if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
341 return -EIO;
342
343 if (dma_dir == DMA_TO_DEVICE)
344 dir = HOST_TO_DEVICE;
345 else if (dma_dir == DMA_FROM_DEVICE)
346 dir = DEVICE_TO_HOST;
347 else
348 return -ENXIO;
349
350 if (card == SD_CARD)
351 rtsx->check_card_cd = SD_EXIST;
352 else if (card == MS_CARD)
353 rtsx->check_card_cd = MS_EXIST;
354 else if (card == XD_CARD)
355 rtsx->check_card_cd = XD_EXIST;
356 else
357 rtsx->check_card_cd = 0;
358
359 spin_lock_irq(&rtsx->reg_lock);
360
361 /* set up data structures for the wakeup system */
362 rtsx->done = &trans_done;
363
364 rtsx->trans_state = STATE_TRANS_SG;
365 rtsx->trans_result = TRANS_NOT_READY;
366
367 spin_unlock_irq(&rtsx->reg_lock);
368
369 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
370
371 resid = size;
372 sg_ptr = sg;
373 chip->sgi = 0;
374 /* Usually the next entry will be @sg@ + 1, but if this sg element
375 * is part of a chained scatterlist, it could jump to the start of
376 * a new scatterlist array. So here we use sg_next to move to
377 * the proper sg
378 */
379 for (i = 0; i < *index; i++)
380 sg_ptr = sg_next(sg_ptr);
381 for (i = *index; i < sg_cnt; i++) {
382 dma_addr_t addr;
383 unsigned int len;
384 u8 option;
385
386 addr = sg_dma_address(sg_ptr);
387 len = sg_dma_len(sg_ptr);
388
389 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
390 (unsigned int)addr, len);
391 RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
392
393 addr += *offset;
394
395 if ((len - *offset) > resid) {
396 *offset += resid;
397 len = resid;
398 resid = 0;
399 } else {
400 resid -= (len - *offset);
401 len -= *offset;
402 *offset = 0;
403 *index = *index + 1;
404 }
405 if ((i == (sg_cnt - 1)) || !resid)
406 option = SG_VALID | SG_END | SG_TRANS_DATA;
407 else
408 option = SG_VALID | SG_TRANS_DATA;
409
410 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
411
412 if (!resid)
413 break;
414
415 sg_ptr = sg_next(sg_ptr);
416 }
417
418 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
419
420 val |= (u32)(dir & 0x01) << 29;
421 val |= ADMA_MODE;
422
423 spin_lock_irq(&rtsx->reg_lock);
424
425 init_completion(&trans_done);
426
427 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
428 rtsx_writel(chip, RTSX_HDBCTLR, val);
429
430 spin_unlock_irq(&rtsx->reg_lock);
431
432 timeleft = wait_for_completion_interruptible_timeout(
433 &trans_done, timeout * HZ / 1000);
434 if (timeleft <= 0) {
435 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
436 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
437 err = -ETIMEDOUT;
438 goto out;
439 }
440
441 spin_lock_irq(&rtsx->reg_lock);
442 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
443 err = -EIO;
444 spin_unlock_irq(&rtsx->reg_lock);
445 goto out;
446 }
447 spin_unlock_irq(&rtsx->reg_lock);
448
449 /* Wait for TRANS_OK_INT */
450 spin_lock_irq(&rtsx->reg_lock);
451 if (rtsx->trans_result == TRANS_NOT_READY) {
452 init_completion(&trans_done);
453 spin_unlock_irq(&rtsx->reg_lock);
454 timeleft = wait_for_completion_interruptible_timeout(
455 &trans_done, timeout * HZ / 1000);
456 if (timeleft <= 0) {
457 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
458 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
459 err = -ETIMEDOUT;
460 goto out;
461 }
462 } else {
463 spin_unlock_irq(&rtsx->reg_lock);
464 }
465
466 spin_lock_irq(&rtsx->reg_lock);
467 if (rtsx->trans_result == TRANS_RESULT_FAIL)
468 err = -EIO;
469 else if (rtsx->trans_result == TRANS_RESULT_OK)
470 err = 0;
471
472 spin_unlock_irq(&rtsx->reg_lock);
473
474out:
475 rtsx->done = NULL;
476 rtsx->trans_state = STATE_TRANS_NONE;
477 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
478
479 if (err < 0)
480 rtsx_stop_cmd(chip, card);
481
482 return err;
483}
484
485static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
486 struct scatterlist *sg, int num_sg,
487 enum dma_data_direction dma_dir, int timeout)
488{
489 struct rtsx_dev *rtsx = chip->rtsx;
490 struct completion trans_done;
491 u8 dir;
492 int buf_cnt, i;
493 int err = 0;
494 long timeleft;
495 struct scatterlist *sg_ptr;
496
497 if ((sg == NULL) || (num_sg <= 0))
498 return -EIO;
499
500 if (dma_dir == DMA_TO_DEVICE)
501 dir = HOST_TO_DEVICE;
502 else if (dma_dir == DMA_FROM_DEVICE)
503 dir = DEVICE_TO_HOST;
504 else
505 return -ENXIO;
506
507 if (card == SD_CARD)
508 rtsx->check_card_cd = SD_EXIST;
509 else if (card == MS_CARD)
510 rtsx->check_card_cd = MS_EXIST;
511 else if (card == XD_CARD)
512 rtsx->check_card_cd = XD_EXIST;
513 else
514 rtsx->check_card_cd = 0;
515
516 spin_lock_irq(&rtsx->reg_lock);
517
518 /* set up data structures for the wakeup system */
519 rtsx->done = &trans_done;
520
521 rtsx->trans_state = STATE_TRANS_SG;
522 rtsx->trans_result = TRANS_NOT_READY;
523
524 spin_unlock_irq(&rtsx->reg_lock);
525
526 buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
527
528 sg_ptr = sg;
529
530 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
531 u32 val = TRIG_DMA;
532 int sg_cnt, j;
533
534 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
535 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
536 else
537 sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
538
539 chip->sgi = 0;
540 for (j = 0; j < sg_cnt; j++) {
541 dma_addr_t addr = sg_dma_address(sg_ptr);
542 unsigned int len = sg_dma_len(sg_ptr);
543 u8 option;
544
545 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
546 (unsigned int)addr, len);
547
548 if (j == (sg_cnt - 1))
549 option = SG_VALID | SG_END | SG_TRANS_DATA;
550 else
551 option = SG_VALID | SG_TRANS_DATA;
552
553 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
554
555 sg_ptr = sg_next(sg_ptr);
556 }
557
558 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
559
560 val |= (u32)(dir & 0x01) << 29;
561 val |= ADMA_MODE;
562
563 spin_lock_irq(&rtsx->reg_lock);
564
565 init_completion(&trans_done);
566
567 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
568 rtsx_writel(chip, RTSX_HDBCTLR, val);
569
570 spin_unlock_irq(&rtsx->reg_lock);
571
572 timeleft = wait_for_completion_interruptible_timeout(
573 &trans_done, timeout * HZ / 1000);
574 if (timeleft <= 0) {
575 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
576 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
577 err = -ETIMEDOUT;
578 goto out;
579 }
580
581 spin_lock_irq(&rtsx->reg_lock);
582 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
583 err = -EIO;
584 spin_unlock_irq(&rtsx->reg_lock);
585 goto out;
586 }
587 spin_unlock_irq(&rtsx->reg_lock);
588
589 sg_ptr += sg_cnt;
590 }
591
592 /* Wait for TRANS_OK_INT */
593 spin_lock_irq(&rtsx->reg_lock);
594 if (rtsx->trans_result == TRANS_NOT_READY) {
595 init_completion(&trans_done);
596 spin_unlock_irq(&rtsx->reg_lock);
597 timeleft = wait_for_completion_interruptible_timeout(
598 &trans_done, timeout * HZ / 1000);
599 if (timeleft <= 0) {
600 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
601 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
602 err = -ETIMEDOUT;
603 goto out;
604 }
605 } else {
606 spin_unlock_irq(&rtsx->reg_lock);
607 }
608
609 spin_lock_irq(&rtsx->reg_lock);
610 if (rtsx->trans_result == TRANS_RESULT_FAIL)
611 err = -EIO;
612 else if (rtsx->trans_result == TRANS_RESULT_OK)
613 err = 0;
614
615 spin_unlock_irq(&rtsx->reg_lock);
616
617out:
618 rtsx->done = NULL;
619 rtsx->trans_state = STATE_TRANS_NONE;
620 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
621
622 if (err < 0)
623 rtsx_stop_cmd(chip, card);
624
625 return err;
626}
627
628static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
629 enum dma_data_direction dma_dir, int timeout)
630{
631 struct rtsx_dev *rtsx = chip->rtsx;
632 struct completion trans_done;
633 dma_addr_t addr;
634 u8 dir;
635 int err = 0;
636 u32 val = (1 << 31);
637 long timeleft;
638
639 if ((buf == NULL) || (len <= 0))
640 return -EIO;
641
642 if (dma_dir == DMA_TO_DEVICE)
643 dir = HOST_TO_DEVICE;
644 else if (dma_dir == DMA_FROM_DEVICE)
645 dir = DEVICE_TO_HOST;
646 else
647 return -ENXIO;
648
649 addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
650 if (!addr)
651 return -ENOMEM;
652
653 if (card == SD_CARD)
654 rtsx->check_card_cd = SD_EXIST;
655 else if (card == MS_CARD)
656 rtsx->check_card_cd = MS_EXIST;
657 else if (card == XD_CARD)
658 rtsx->check_card_cd = XD_EXIST;
659 else
660 rtsx->check_card_cd = 0;
661
662 val |= (u32)(dir & 0x01) << 29;
663 val |= (u32)(len & 0x00FFFFFF);
664
665 spin_lock_irq(&rtsx->reg_lock);
666
667 /* set up data structures for the wakeup system */
668 rtsx->done = &trans_done;
669
670 init_completion(&trans_done);
671
672 rtsx->trans_state = STATE_TRANS_BUF;
673 rtsx->trans_result = TRANS_NOT_READY;
674
675 rtsx_writel(chip, RTSX_HDBAR, addr);
676 rtsx_writel(chip, RTSX_HDBCTLR, val);
677
678 spin_unlock_irq(&rtsx->reg_lock);
679
680 /* Wait for TRANS_OK_INT */
681 timeleft = wait_for_completion_interruptible_timeout(
682 &trans_done, timeout * HZ / 1000);
683 if (timeleft <= 0) {
684 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
685 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
686 err = -ETIMEDOUT;
687 goto out;
688 }
689
690 spin_lock_irq(&rtsx->reg_lock);
691 if (rtsx->trans_result == TRANS_RESULT_FAIL)
692 err = -EIO;
693 else if (rtsx->trans_result == TRANS_RESULT_OK)
694 err = 0;
695
696 spin_unlock_irq(&rtsx->reg_lock);
697
698out:
699 rtsx->done = NULL;
700 rtsx->trans_state = STATE_TRANS_NONE;
701 dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
702
703 if (err < 0)
704 rtsx_stop_cmd(chip, card);
705
706 return err;
707}
708
709int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
710 void *buf, size_t len, int use_sg, unsigned int *index,
711 unsigned int *offset, enum dma_data_direction dma_dir,
712 int timeout)
713{
714 int err = 0;
715
716 /* don't transfer data during abort processing */
717 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
718 return -EIO;
719
720 if (use_sg) {
721 err = rtsx_transfer_sglist_adma_partial(chip, card,
722 (struct scatterlist *)buf, use_sg,
723 index, offset, (int)len, dma_dir, timeout);
724 } else {
725 err = rtsx_transfer_buf(chip, card,
726 buf, len, dma_dir, timeout);
727 }
728
729 if (err < 0) {
730 if (RTSX_TST_DELINK(chip)) {
731 RTSX_CLR_DELINK(chip);
732 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
733 rtsx_reinit_cards(chip, 1);
734 }
735 }
736
737 return err;
738}
739
740int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
741 int use_sg, enum dma_data_direction dma_dir, int timeout)
742{
743 int err = 0;
744
745 RTSX_DEBUGP("use_sg = %d\n", use_sg);
746
747 /* don't transfer data during abort processing */
748 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
749 return -EIO;
750
751 if (use_sg) {
752 err = rtsx_transfer_sglist_adma(chip, card,
753 (struct scatterlist *)buf,
754 use_sg, dma_dir, timeout);
755 } else {
756 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
757 }
758
759 if (err < 0) {
760 if (RTSX_TST_DELINK(chip)) {
761 RTSX_CLR_DELINK(chip);
762 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
763 rtsx_reinit_cards(chip, 1);
764 }
765 }
766
767 return err;
768}
769