blob: 56d4377c62c2b5b4e62bb6e51f24163ff4765629 [file] [log] [blame]
Wei WANGada8a8a2012-10-29 13:49:33 +08001/* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * Wei WANG <wei_wang@realsil.com.cn>
20 * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
21 */
22
23#include <linux/pci.h>
24#include <linux/module.h>
25#include <linux/dma-mapping.h>
26#include <linux/highmem.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/idr.h>
30#include <linux/platform_device.h>
31#include <linux/mfd/core.h>
32#include <linux/mfd/rtsx_pci.h>
33#include <asm/unaligned.h>
34
35#include "rtsx_pcr.h"
36
37static bool msi_en = true;
38module_param(msi_en, bool, S_IRUGO | S_IWUSR);
39MODULE_PARM_DESC(msi_en, "Enable MSI");
40
41static DEFINE_IDR(rtsx_pci_idr);
42static DEFINE_SPINLOCK(rtsx_pci_lock);
43
44static struct mfd_cell rtsx_pcr_cells[] = {
45 [RTSX_SD_CARD] = {
46 .name = DRV_NAME_RTSX_PCI_SDMMC,
47 },
48 [RTSX_MS_CARD] = {
49 .name = DRV_NAME_RTSX_PCI_MS,
50 },
51};
52
53static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = {
54 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { 0, }
58};
59
60MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
61
62void rtsx_pci_start_run(struct rtsx_pcr *pcr)
63{
64 /* If pci device removed, don't queue idle work any more */
65 if (pcr->remove_pci)
66 return;
67
68 if (pcr->state != PDEV_STAT_RUN) {
69 pcr->state = PDEV_STAT_RUN;
70 if (pcr->ops->enable_auto_blink)
71 pcr->ops->enable_auto_blink(pcr);
72 }
73
74 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
75}
76EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
77
78int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
79{
80 int i;
81 u32 val = HAIMR_WRITE_START;
82
83 val |= (u32)(addr & 0x3FFF) << 16;
84 val |= (u32)mask << 8;
85 val |= (u32)data;
86
87 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
88
89 for (i = 0; i < MAX_RW_REG_CNT; i++) {
90 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
91 if ((val & HAIMR_TRANS_END) == 0) {
92 if (data != (u8)val)
93 return -EIO;
94 return 0;
95 }
96 }
97
98 return -ETIMEDOUT;
99}
100EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
101
102int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
103{
104 u32 val = HAIMR_READ_START;
105 int i;
106
107 val |= (u32)(addr & 0x3FFF) << 16;
108 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
109
110 for (i = 0; i < MAX_RW_REG_CNT; i++) {
111 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
112 if ((val & HAIMR_TRANS_END) == 0)
113 break;
114 }
115
116 if (i >= MAX_RW_REG_CNT)
117 return -ETIMEDOUT;
118
119 if (data)
120 *data = (u8)(val & 0xFF);
121
122 return 0;
123}
124EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
125
126int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
127{
128 int err, i, finished = 0;
129 u8 tmp;
130
131 rtsx_pci_init_cmd(pcr);
132
133 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
134 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
135 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
136 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
137
138 err = rtsx_pci_send_cmd(pcr, 100);
139 if (err < 0)
140 return err;
141
142 for (i = 0; i < 100000; i++) {
143 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
144 if (err < 0)
145 return err;
146
147 if (!(tmp & 0x80)) {
148 finished = 1;
149 break;
150 }
151 }
152
153 if (!finished)
154 return -ETIMEDOUT;
155
156 return 0;
157}
158EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
159
160int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
161{
162 int err, i, finished = 0;
163 u16 data;
164 u8 *ptr, tmp;
165
166 rtsx_pci_init_cmd(pcr);
167
168 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
169 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
170
171 err = rtsx_pci_send_cmd(pcr, 100);
172 if (err < 0)
173 return err;
174
175 for (i = 0; i < 100000; i++) {
176 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
177 if (err < 0)
178 return err;
179
180 if (!(tmp & 0x80)) {
181 finished = 1;
182 break;
183 }
184 }
185
186 if (!finished)
187 return -ETIMEDOUT;
188
189 rtsx_pci_init_cmd(pcr);
190
191 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
192 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
193
194 err = rtsx_pci_send_cmd(pcr, 100);
195 if (err < 0)
196 return err;
197
198 ptr = rtsx_pci_get_cmd_data(pcr);
199 data = ((u16)ptr[1] << 8) | ptr[0];
200
201 if (val)
202 *val = data;
203
204 return 0;
205}
206EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
207
208void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
209{
210 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
211 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
212
213 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
214 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
215}
216EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
217
218void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
219 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
220{
221 unsigned long flags;
222 u32 val = 0;
223 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
224
225 val |= (u32)(cmd_type & 0x03) << 30;
226 val |= (u32)(reg_addr & 0x3FFF) << 16;
227 val |= (u32)mask << 8;
228 val |= (u32)data;
229
230 spin_lock_irqsave(&pcr->lock, flags);
231 ptr += pcr->ci;
232 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
233 put_unaligned_le32(val, ptr);
234 ptr++;
235 pcr->ci++;
236 }
237 spin_unlock_irqrestore(&pcr->lock, flags);
238}
239EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
240
241void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
242{
243 u32 val = 1 << 31;
244
245 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
246
247 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
248 /* Hardware Auto Response */
249 val |= 0x40000000;
250 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
251}
252EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
253
254int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
255{
256 struct completion trans_done;
257 u32 val = 1 << 31;
258 long timeleft;
259 unsigned long flags;
260 int err = 0;
261
262 spin_lock_irqsave(&pcr->lock, flags);
263
264 /* set up data structures for the wakeup system */
265 pcr->done = &trans_done;
266 pcr->trans_result = TRANS_NOT_READY;
267 init_completion(&trans_done);
268
269 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
270
271 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
272 /* Hardware Auto Response */
273 val |= 0x40000000;
274 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
275
276 spin_unlock_irqrestore(&pcr->lock, flags);
277
278 /* Wait for TRANS_OK_INT */
279 timeleft = wait_for_completion_interruptible_timeout(
280 &trans_done, msecs_to_jiffies(timeout));
281 if (timeleft <= 0) {
282 dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
283 __func__, __LINE__);
284 err = -ETIMEDOUT;
285 goto finish_send_cmd;
286 }
287
288 spin_lock_irqsave(&pcr->lock, flags);
289 if (pcr->trans_result == TRANS_RESULT_FAIL)
290 err = -EINVAL;
291 else if (pcr->trans_result == TRANS_RESULT_OK)
292 err = 0;
293 else if (pcr->trans_result == TRANS_NO_DEVICE)
294 err = -ENODEV;
295 spin_unlock_irqrestore(&pcr->lock, flags);
296
297finish_send_cmd:
298 spin_lock_irqsave(&pcr->lock, flags);
299 pcr->done = NULL;
300 spin_unlock_irqrestore(&pcr->lock, flags);
301
302 if ((err < 0) && (err != -ENODEV))
303 rtsx_pci_stop_cmd(pcr);
304
305 if (pcr->finish_me)
306 complete(pcr->finish_me);
307
308 return err;
309}
310EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
311
312static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
313 dma_addr_t addr, unsigned int len, int end)
314{
315 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
316 u64 val;
317 u8 option = SG_VALID | SG_TRANS_DATA;
318
319 dev_dbg(&(pcr->pci->dev), "DMA addr: 0x%x, Len: 0x%x\n",
320 (unsigned int)addr, len);
321
322 if (end)
323 option |= SG_END;
324 val = ((u64)addr << 32) | ((u64)len << 12) | option;
325
326 put_unaligned_le64(val, ptr);
327 ptr++;
328 pcr->sgi++;
329}
330
331int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
332 int num_sg, bool read, int timeout)
333{
334 struct completion trans_done;
335 u8 dir;
336 int err = 0, i, count;
337 long timeleft;
338 unsigned long flags;
339 struct scatterlist *sg;
340 enum dma_data_direction dma_dir;
341 u32 val;
342 dma_addr_t addr;
343 unsigned int len;
344
345 dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
346
347 /* don't transfer data during abort processing */
348 if (pcr->remove_pci)
349 return -EINVAL;
350
351 if ((sglist == NULL) || (num_sg <= 0))
352 return -EINVAL;
353
354 if (read) {
355 dir = DEVICE_TO_HOST;
356 dma_dir = DMA_FROM_DEVICE;
357 } else {
358 dir = HOST_TO_DEVICE;
359 dma_dir = DMA_TO_DEVICE;
360 }
361
362 count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
363 if (count < 1) {
364 dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
365 return -EINVAL;
366 }
367 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
368
369 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
370 pcr->sgi = 0;
371 for_each_sg(sglist, sg, count, i) {
372 addr = sg_dma_address(sg);
373 len = sg_dma_len(sg);
374 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
375 }
376
377 spin_lock_irqsave(&pcr->lock, flags);
378
379 pcr->done = &trans_done;
380 pcr->trans_result = TRANS_NOT_READY;
381 init_completion(&trans_done);
382 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
383 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
384
385 spin_unlock_irqrestore(&pcr->lock, flags);
386
387 timeleft = wait_for_completion_interruptible_timeout(
388 &trans_done, msecs_to_jiffies(timeout));
389 if (timeleft <= 0) {
390 dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
391 __func__, __LINE__);
392 err = -ETIMEDOUT;
393 goto out;
394 }
395
396 spin_lock_irqsave(&pcr->lock, flags);
397
398 if (pcr->trans_result == TRANS_RESULT_FAIL)
399 err = -EINVAL;
400 else if (pcr->trans_result == TRANS_NO_DEVICE)
401 err = -ENODEV;
402
403 spin_unlock_irqrestore(&pcr->lock, flags);
404
405out:
406 spin_lock_irqsave(&pcr->lock, flags);
407 pcr->done = NULL;
408 spin_unlock_irqrestore(&pcr->lock, flags);
409
410 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
411
412 if ((err < 0) && (err != -ENODEV))
413 rtsx_pci_stop_cmd(pcr);
414
415 if (pcr->finish_me)
416 complete(pcr->finish_me);
417
418 return err;
419}
420EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
421
422int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
423{
424 int err;
425 int i, j;
426 u16 reg;
427 u8 *ptr;
428
429 if (buf_len > 512)
430 buf_len = 512;
431
432 ptr = buf;
433 reg = PPBUF_BASE2;
434 for (i = 0; i < buf_len / 256; i++) {
435 rtsx_pci_init_cmd(pcr);
436
437 for (j = 0; j < 256; j++)
438 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
439
440 err = rtsx_pci_send_cmd(pcr, 250);
441 if (err < 0)
442 return err;
443
444 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
445 ptr += 256;
446 }
447
448 if (buf_len % 256) {
449 rtsx_pci_init_cmd(pcr);
450
451 for (j = 0; j < buf_len % 256; j++)
452 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
453
454 err = rtsx_pci_send_cmd(pcr, 250);
455 if (err < 0)
456 return err;
457 }
458
459 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
460
461 return 0;
462}
463EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
464
465int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
466{
467 int err;
468 int i, j;
469 u16 reg;
470 u8 *ptr;
471
472 if (buf_len > 512)
473 buf_len = 512;
474
475 ptr = buf;
476 reg = PPBUF_BASE2;
477 for (i = 0; i < buf_len / 256; i++) {
478 rtsx_pci_init_cmd(pcr);
479
480 for (j = 0; j < 256; j++) {
481 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
482 reg++, 0xFF, *ptr);
483 ptr++;
484 }
485
486 err = rtsx_pci_send_cmd(pcr, 250);
487 if (err < 0)
488 return err;
489 }
490
491 if (buf_len % 256) {
492 rtsx_pci_init_cmd(pcr);
493
494 for (j = 0; j < buf_len % 256; j++) {
495 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
496 reg++, 0xFF, *ptr);
497 ptr++;
498 }
499
500 err = rtsx_pci_send_cmd(pcr, 250);
501 if (err < 0)
502 return err;
503 }
504
505 return 0;
506}
507EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
508
509static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
510{
511 int err;
512
513 rtsx_pci_init_cmd(pcr);
514
515 while (*tbl & 0xFFFF0000) {
516 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
517 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
518 tbl++;
519 }
520
521 err = rtsx_pci_send_cmd(pcr, 100);
522 if (err < 0)
523 return err;
524
525 return 0;
526}
527
528int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
529{
530 const u32 *tbl;
531
532 if (card == RTSX_SD_CARD)
533 tbl = pcr->sd_pull_ctl_enable_tbl;
534 else if (card == RTSX_MS_CARD)
535 tbl = pcr->ms_pull_ctl_enable_tbl;
536 else
537 return -EINVAL;
538
539 return rtsx_pci_set_pull_ctl(pcr, tbl);
540}
541EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
542
543int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
544{
545 const u32 *tbl;
546
547 if (card == RTSX_SD_CARD)
548 tbl = pcr->sd_pull_ctl_disable_tbl;
549 else if (card == RTSX_MS_CARD)
550 tbl = pcr->ms_pull_ctl_disable_tbl;
551 else
552 return -EINVAL;
553
554
555 return rtsx_pci_set_pull_ctl(pcr, tbl);
556}
557EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
558
559static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
560{
561 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
562
563 if (pcr->num_slots > 1)
564 pcr->bier |= MS_INT_EN;
565
566 /* Enable Bus Interrupt */
567 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
568
569 dev_dbg(&(pcr->pci->dev), "RTSX_BIER: 0x%08x\n", pcr->bier);
570}
571
572static inline u8 double_ssc_depth(u8 depth)
573{
574 return ((depth > 1) ? (depth - 1) : depth);
575}
576
577static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
578{
579 if (div > CLK_DIV_1) {
580 if (ssc_depth > (div - 1))
581 ssc_depth -= (div - 1);
582 else
583 ssc_depth = SSC_DEPTH_4M;
584 }
585
586 return ssc_depth;
587}
588
589int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
590 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
591{
592 int err, clk;
593 u8 N, min_N, max_N, clk_divider;
594 u8 mcu_cnt, div, max_div;
595 u8 depth[] = {
596 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
597 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
598 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
599 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
600 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
601 };
602
603 if (initial_mode) {
604 /* We use 250k(around) here, in initial stage */
605 clk_divider = SD_CLK_DIVIDE_128;
606 card_clock = 30000000;
607 } else {
608 clk_divider = SD_CLK_DIVIDE_0;
609 }
610 err = rtsx_pci_write_register(pcr, SD_CFG1,
611 SD_CLK_DIVIDE_MASK, clk_divider);
612 if (err < 0)
613 return err;
614
615 card_clock /= 1000000;
616 dev_dbg(&(pcr->pci->dev), "Switch card clock to %dMHz\n", card_clock);
617
618 min_N = 80;
619 max_N = 208;
620 max_div = CLK_DIV_8;
621
622 clk = card_clock;
623 if (!initial_mode && double_clk)
624 clk = card_clock * 2;
625 dev_dbg(&(pcr->pci->dev),
626 "Internal SSC clock: %dMHz (cur_clock = %d)\n",
627 clk, pcr->cur_clock);
628
629 if (clk == pcr->cur_clock)
630 return 0;
631
632 N = (u8)(clk - 2);
633 if ((clk <= 2) || (N > max_N))
634 return -EINVAL;
635
636 mcu_cnt = (u8)(125/clk + 3);
637 if (mcu_cnt > 15)
638 mcu_cnt = 15;
639
640 /* Make sure that the SSC clock div_n is equal or greater than min_N */
641 div = CLK_DIV_1;
642 while ((N < min_N) && (div < max_div)) {
643 N = (N + 2) * 2 - 2;
644 div++;
645 }
646 dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
647
648 ssc_depth = depth[ssc_depth];
649 if (double_clk)
650 ssc_depth = double_ssc_depth(ssc_depth);
651
652 ssc_depth = revise_ssc_depth(ssc_depth, div);
653 dev_dbg(&(pcr->pci->dev), "ssc_depth = %d\n", ssc_depth);
654
655 rtsx_pci_init_cmd(pcr);
656 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
657 CLK_LOW_FREQ, CLK_LOW_FREQ);
658 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
659 0xFF, (div << 4) | mcu_cnt);
660 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
661 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
662 SSC_DEPTH_MASK, ssc_depth);
663 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
664 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
665 if (vpclk) {
666 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
667 PHASE_NOT_RESET, 0);
668 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
669 PHASE_NOT_RESET, PHASE_NOT_RESET);
670 }
671
672 err = rtsx_pci_send_cmd(pcr, 2000);
673 if (err < 0)
674 return err;
675
676 /* Wait SSC clock stable */
677 udelay(10);
678 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
679 if (err < 0)
680 return err;
681
682 pcr->cur_clock = clk;
683 return 0;
684}
685EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
686
687int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
688{
689 if (pcr->ops->card_power_on)
690 return pcr->ops->card_power_on(pcr, card);
691
692 return 0;
693}
694EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
695
696int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
697{
698 if (pcr->ops->card_power_off)
699 return pcr->ops->card_power_off(pcr, card);
700
701 return 0;
702}
703EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
704
705unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
706{
707 unsigned int val;
708
709 val = rtsx_pci_readl(pcr, RTSX_BIPR);
710 if (pcr->ops->cd_deglitch)
711 val = pcr->ops->cd_deglitch(pcr);
712
713 return val;
714}
715EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
716
717void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
718{
719 struct completion finish;
720
721 pcr->finish_me = &finish;
722 init_completion(&finish);
723
724 if (pcr->done)
725 complete(pcr->done);
726
727 if (!pcr->remove_pci)
728 rtsx_pci_stop_cmd(pcr);
729
730 wait_for_completion_interruptible_timeout(&finish,
731 msecs_to_jiffies(2));
732 pcr->finish_me = NULL;
733}
734EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
735
736static void rtsx_pci_card_detect(struct work_struct *work)
737{
738 struct delayed_work *dwork;
739 struct rtsx_pcr *pcr;
740 unsigned long flags;
741 unsigned int card_detect = 0;
742 u32 irq_status;
743
744 dwork = to_delayed_work(work);
745 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
746
747 dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
748
749 spin_lock_irqsave(&pcr->lock, flags);
750
751 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
752 dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status);
753
754 if (pcr->card_inserted || pcr->card_removed) {
755 dev_dbg(&(pcr->pci->dev),
756 "card_inserted: 0x%x, card_removed: 0x%x\n",
757 pcr->card_inserted, pcr->card_removed);
758
759 if (pcr->ops->cd_deglitch)
760 pcr->card_inserted = pcr->ops->cd_deglitch(pcr);
761
762 card_detect = pcr->card_inserted | pcr->card_removed;
763 pcr->card_inserted = 0;
764 pcr->card_removed = 0;
765 }
766
767 spin_unlock_irqrestore(&pcr->lock, flags);
768
769 if (card_detect & SD_EXIST)
770 pcr->slots[RTSX_SD_CARD].card_event(
771 pcr->slots[RTSX_SD_CARD].p_dev);
772 if (card_detect & MS_EXIST)
773 pcr->slots[RTSX_MS_CARD].card_event(
774 pcr->slots[RTSX_MS_CARD].p_dev);
775}
776
777static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
778{
779 struct rtsx_pcr *pcr = dev_id;
780 u32 int_reg;
781
782 if (!pcr)
783 return IRQ_NONE;
784
785 spin_lock(&pcr->lock);
786
787 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
788 /* Clear interrupt flag */
789 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
790 if ((int_reg & pcr->bier) == 0) {
791 spin_unlock(&pcr->lock);
792 return IRQ_NONE;
793 }
794 if (int_reg == 0xFFFFFFFF) {
795 spin_unlock(&pcr->lock);
796 return IRQ_HANDLED;
797 }
798
799 int_reg &= (pcr->bier | 0x7FFFFF);
800
801 if (int_reg & SD_INT) {
802 if (int_reg & SD_EXIST) {
803 pcr->card_inserted |= SD_EXIST;
804 } else {
805 pcr->card_removed |= SD_EXIST;
806 pcr->card_inserted &= ~SD_EXIST;
807 }
808 }
809
810 if (int_reg & MS_INT) {
811 if (int_reg & MS_EXIST) {
812 pcr->card_inserted |= MS_EXIST;
813 } else {
814 pcr->card_removed |= MS_EXIST;
815 pcr->card_inserted &= ~MS_EXIST;
816 }
817 }
818
819 if (pcr->card_inserted || pcr->card_removed)
820 schedule_delayed_work(&pcr->carddet_work,
821 msecs_to_jiffies(200));
822
823 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
824 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
825 pcr->trans_result = TRANS_RESULT_FAIL;
826 if (pcr->done)
827 complete(pcr->done);
828 } else if (int_reg & TRANS_OK_INT) {
829 pcr->trans_result = TRANS_RESULT_OK;
830 if (pcr->done)
831 complete(pcr->done);
832 }
833 }
834
835 spin_unlock(&pcr->lock);
836 return IRQ_HANDLED;
837}
838
839static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
840{
841 dev_info(&(pcr->pci->dev), "%s: pcr->msi_en = %d, pci->irq = %d\n",
842 __func__, pcr->msi_en, pcr->pci->irq);
843
844 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
845 pcr->msi_en ? 0 : IRQF_SHARED,
846 DRV_NAME_RTSX_PCI, pcr)) {
847 dev_err(&(pcr->pci->dev),
848 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
849 pcr->pci->irq);
850 return -1;
851 }
852
853 pcr->irq = pcr->pci->irq;
854 pci_intx(pcr->pci, !pcr->msi_en);
855
856 return 0;
857}
858
859static void rtsx_pci_idle_work(struct work_struct *work)
860{
861 struct delayed_work *dwork = to_delayed_work(work);
862 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
863
864 dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
865
866 mutex_lock(&pcr->pcr_mutex);
867
868 pcr->state = PDEV_STAT_IDLE;
869
870 if (pcr->ops->disable_auto_blink)
871 pcr->ops->disable_auto_blink(pcr);
872 if (pcr->ops->turn_off_led)
873 pcr->ops->turn_off_led(pcr);
874
875 mutex_unlock(&pcr->pcr_mutex);
876}
877
878static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
879{
880 int err;
881
882 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
883
884 rtsx_pci_enable_bus_int(pcr);
885
886 /* Power on SSC */
887 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
888 if (err < 0)
889 return err;
890
891 /* Wait SSC power stable */
892 udelay(200);
893
894 if (pcr->ops->optimize_phy) {
895 err = pcr->ops->optimize_phy(pcr);
896 if (err < 0)
897 return err;
898 }
899
900 rtsx_pci_init_cmd(pcr);
901
902 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
903 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
904
905 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
906 /* Disable card clock */
907 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
908 /* Reset ASPM state to default value */
909 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
910 /* Reset delink mode */
911 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
912 /* Card driving select */
913 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
914 0x07, DRIVER_TYPE_D);
915 /* Enable SSC Clock */
916 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
917 0xFF, SSC_8X_EN | SSC_SEL_4M);
918 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
919 /* Disable cd_pwr_save */
920 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
921 /* Clear Link Ready Interrupt */
922 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
923 LINK_RDY_INT, LINK_RDY_INT);
924 /* Enlarge the estimation window of PERST# glitch
925 * to reduce the chance of invalid card interrupt
926 */
927 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
928 /* Update RC oscillator to 400k
929 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
930 * 1: 2M 0: 400k
931 */
932 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
933 /* Set interrupt write clear
934 * bit 1: U_elbi_if_rd_clr_en
935 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
936 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
937 */
938 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
939 /* Force CLKREQ# PIN to drive 0 to request clock */
940 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
941
942 err = rtsx_pci_send_cmd(pcr, 100);
943 if (err < 0)
944 return err;
945
946 /* Enable clk_request_n to enable clock power management */
947 rtsx_pci_write_config_byte(pcr, 0x81, 1);
948 /* Enter L1 when host tx idle */
949 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
950
951 if (pcr->ops->extra_init_hw) {
952 err = pcr->ops->extra_init_hw(pcr);
953 if (err < 0)
954 return err;
955 }
956
957 return 0;
958}
959
960static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
961{
962 int err;
963
964 spin_lock_init(&pcr->lock);
965 mutex_init(&pcr->pcr_mutex);
966
967 switch (PCI_PID(pcr)) {
968 default:
969 case 0x5209:
970 rts5209_init_params(pcr);
971 break;
972
973 case 0x5229:
974 rts5229_init_params(pcr);
975 break;
976
977 case 0x5289:
978 rtl8411_init_params(pcr);
979 break;
980 }
981
982 dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
983 PCI_PID(pcr), pcr->ic_version);
984
985 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
986 GFP_KERNEL);
987 if (!pcr->slots)
988 return -ENOMEM;
989
990 pcr->state = PDEV_STAT_IDLE;
991 err = rtsx_pci_init_hw(pcr);
992 if (err < 0) {
993 kfree(pcr->slots);
994 return err;
995 }
996
997 return 0;
998}
999
1000static int __devinit rtsx_pci_probe(struct pci_dev *pcidev,
1001 const struct pci_device_id *id)
1002{
1003 struct rtsx_pcr *pcr;
1004 struct pcr_handle *handle;
1005 u32 base, len;
1006 int ret, i;
1007
1008 dev_dbg(&(pcidev->dev),
1009 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1010 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1011 (int)pcidev->revision);
1012
1013 ret = pci_enable_device(pcidev);
1014 if (ret)
1015 return ret;
1016
1017 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1018 if (ret)
1019 goto disable;
1020
1021 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1022 if (!pcr) {
1023 ret = -ENOMEM;
1024 goto release_pci;
1025 }
1026
1027 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1028 if (!handle) {
1029 ret = -ENOMEM;
1030 goto free_pcr;
1031 }
1032 handle->pcr = pcr;
1033
1034 if (!idr_pre_get(&rtsx_pci_idr, GFP_KERNEL)) {
1035 ret = -ENOMEM;
1036 goto free_handle;
1037 }
1038
1039 spin_lock(&rtsx_pci_lock);
1040 ret = idr_get_new(&rtsx_pci_idr, pcr, &pcr->id);
1041 spin_unlock(&rtsx_pci_lock);
1042 if (ret)
1043 goto free_handle;
1044
1045 pcr->pci = pcidev;
1046 dev_set_drvdata(&pcidev->dev, handle);
1047
1048 len = pci_resource_len(pcidev, 0);
1049 base = pci_resource_start(pcidev, 0);
1050 pcr->remap_addr = ioremap_nocache(base, len);
1051 if (!pcr->remap_addr) {
1052 ret = -ENOMEM;
1053 goto free_host;
1054 }
1055
1056 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1057 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1058 GFP_KERNEL);
1059 if (pcr->rtsx_resv_buf == NULL) {
1060 ret = -ENXIO;
1061 goto unmap;
1062 }
1063 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1064 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1065 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1066 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1067
1068 pcr->card_inserted = 0;
1069 pcr->card_removed = 0;
1070 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1071 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1072
1073 pcr->msi_en = msi_en;
1074 if (pcr->msi_en) {
1075 ret = pci_enable_msi(pcidev);
1076 if (ret < 0)
1077 pcr->msi_en = false;
1078 }
1079
1080 ret = rtsx_pci_acquire_irq(pcr);
1081 if (ret < 0)
1082 goto free_dma;
1083
1084 pci_set_master(pcidev);
1085 synchronize_irq(pcr->irq);
1086
1087 ret = rtsx_pci_init_chip(pcr);
1088 if (ret < 0)
1089 goto disable_irq;
1090
1091 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1092 rtsx_pcr_cells[i].platform_data = handle;
1093 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1094 }
1095 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1096 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1097 if (ret < 0)
1098 goto disable_irq;
1099
1100 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1101
1102 return 0;
1103
1104disable_irq:
1105 free_irq(pcr->irq, (void *)pcr);
1106free_dma:
1107 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1108 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1109unmap:
1110 iounmap(pcr->remap_addr);
1111free_host:
1112 dev_set_drvdata(&pcidev->dev, NULL);
1113free_handle:
1114 kfree(handle);
1115free_pcr:
1116 kfree(pcr);
1117release_pci:
1118 pci_release_regions(pcidev);
1119disable:
1120 pci_disable_device(pcidev);
1121
1122 return ret;
1123}
1124
1125static void __devexit rtsx_pci_remove(struct pci_dev *pcidev)
1126{
1127 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1128 struct rtsx_pcr *pcr = handle->pcr;
1129
1130 pcr->remove_pci = true;
1131
1132 cancel_delayed_work(&pcr->carddet_work);
1133 cancel_delayed_work(&pcr->idle_work);
1134
1135 mfd_remove_devices(&pcidev->dev);
1136
1137 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1138 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1139 free_irq(pcr->irq, (void *)pcr);
1140 if (pcr->msi_en)
1141 pci_disable_msi(pcr->pci);
1142 iounmap(pcr->remap_addr);
1143
1144 dev_set_drvdata(&pcidev->dev, NULL);
1145 pci_release_regions(pcidev);
1146 pci_disable_device(pcidev);
1147
1148 spin_lock(&rtsx_pci_lock);
1149 idr_remove(&rtsx_pci_idr, pcr->id);
1150 spin_unlock(&rtsx_pci_lock);
1151
1152 kfree(pcr->slots);
1153 kfree(pcr);
1154 kfree(handle);
1155
1156 dev_dbg(&(pcidev->dev),
1157 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1158 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1159}
1160
1161#ifdef CONFIG_PM
1162
1163static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1164{
1165 struct pcr_handle *handle;
1166 struct rtsx_pcr *pcr;
1167 int ret = 0;
1168
1169 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1170
1171 handle = pci_get_drvdata(pcidev);
1172 pcr = handle->pcr;
1173
1174 cancel_delayed_work(&pcr->carddet_work);
1175 cancel_delayed_work(&pcr->idle_work);
1176
1177 mutex_lock(&pcr->pcr_mutex);
1178
1179 if (pcr->ops->turn_off_led)
1180 pcr->ops->turn_off_led(pcr);
1181
1182 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1183 pcr->bier = 0;
1184
1185 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1186 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x02);
1187
1188 pci_save_state(pcidev);
1189 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1190 pci_disable_device(pcidev);
1191 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1192
1193 mutex_unlock(&pcr->pcr_mutex);
1194 return ret;
1195}
1196
1197static int rtsx_pci_resume(struct pci_dev *pcidev)
1198{
1199 struct pcr_handle *handle;
1200 struct rtsx_pcr *pcr;
1201 int ret = 0;
1202
1203 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1204
1205 handle = pci_get_drvdata(pcidev);
1206 pcr = handle->pcr;
1207
1208 mutex_lock(&pcr->pcr_mutex);
1209
1210 pci_set_power_state(pcidev, PCI_D0);
1211 pci_restore_state(pcidev);
1212 ret = pci_enable_device(pcidev);
1213 if (ret)
1214 goto out;
1215 pci_set_master(pcidev);
1216
1217 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1218 if (ret)
1219 goto out;
1220
1221 ret = rtsx_pci_init_hw(pcr);
1222 if (ret)
1223 goto out;
1224
1225 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1226
1227out:
1228 mutex_unlock(&pcr->pcr_mutex);
1229 return ret;
1230}
1231
1232#else /* CONFIG_PM */
1233
1234#define rtsx_pci_suspend NULL
1235#define rtsx_pci_resume NULL
1236
1237#endif /* CONFIG_PM */
1238
1239static struct pci_driver rtsx_pci_driver = {
1240 .name = DRV_NAME_RTSX_PCI,
1241 .id_table = rtsx_pci_ids,
1242 .probe = rtsx_pci_probe,
1243 .remove = __devexit_p(rtsx_pci_remove),
1244 .suspend = rtsx_pci_suspend,
1245 .resume = rtsx_pci_resume,
1246};
1247module_pci_driver(rtsx_pci_driver);
1248
1249MODULE_LICENSE("GPL");
1250MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1251MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");