blob: cb271167b4a51527586fddd07737c61fcd0c6197 [file] [log] [blame]
Maxim Levitsky67e054e2010-02-22 20:39:42 +02001/*
2 * Copyright © 2009 - Maxim Levitsky
3 * driver for Ricoh xD readers
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/jiffies.h>
13#include <linux/workqueue.h>
14#include <linux/interrupt.h>
15#include <linux/pci_ids.h>
16#include <asm/byteorder.h>
17#include <linux/sched.h>
18#include "sm_common.h"
19#include "r852.h"
20
21
22static int enable_dma = 1;
23module_param(enable_dma, bool, S_IRUGO);
24MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");
25
26static int debug;
27module_param(debug, int, S_IRUGO | S_IWUSR);
28MODULE_PARM_DESC(debug, "Debug level (0-2)");
29
30/* read register */
31static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
32{
33 uint8_t reg = readb(dev->mmio + address);
34 return reg;
35}
36
37/* write register */
38static inline void r852_write_reg(struct r852_device *dev,
39 int address, uint8_t value)
40{
41 writeb(value, dev->mmio + address);
42 mmiowb();
43}
44
45
46/* read dword sized register */
47static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
48{
49 uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
50 return reg;
51}
52
53/* write dword sized register */
54static inline void r852_write_reg_dword(struct r852_device *dev,
55 int address, uint32_t value)
56{
57 writel(cpu_to_le32(value), dev->mmio + address);
58 mmiowb();
59}
60
61/* returns pointer to our private structure */
62static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
63{
64 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
65 return (struct r852_device *)chip->priv;
66}
67
68
69/* check if controller supports dma */
70static void r852_dma_test(struct r852_device *dev)
71{
72 dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
73 (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
74
75 if (!dev->dma_usable)
76 message("Non dma capable device detected, dma disabled");
77
78 if (!enable_dma) {
79 message("disabling dma on user request");
80 dev->dma_usable = 0;
81 }
82}
83
84/*
85 * Enable dma. Enables ether first or second stage of the DMA,
86 * Expects dev->dma_dir and dev->dma_state be set
87 */
88static void r852_dma_enable(struct r852_device *dev)
89{
90 uint8_t dma_reg, dma_irq_reg;
91
92 /* Set up dma settings */
93 dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
94 dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
95
96 if (dev->dma_dir)
97 dma_reg |= R852_DMA_READ;
98
Maxim Levitskyfb45d322010-02-27 02:04:02 +020099 if (dev->dma_state == DMA_INTERNAL) {
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200100 dma_reg |= R852_DMA_INTERNAL;
Maxim Levitskyfb45d322010-02-27 02:04:02 +0200101 /* Precaution to make sure HW doesn't write */
102 /* to random kernel memory */
103 r852_write_reg_dword(dev, R852_DMA_ADDR,
104 cpu_to_le32(dev->phys_bounce_buffer));
105 } else {
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200106 dma_reg |= R852_DMA_MEMORY;
107 r852_write_reg_dword(dev, R852_DMA_ADDR,
108 cpu_to_le32(dev->phys_dma_addr));
109 }
110
Maxim Levitskyfb45d322010-02-27 02:04:02 +0200111 /* Precaution: make sure write reached the device */
112 r852_read_reg_dword(dev, R852_DMA_ADDR);
113
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200114 r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
115
116 /* Set dma irq */
117 dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
118 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
119 dma_irq_reg |
120 R852_DMA_IRQ_INTERNAL |
121 R852_DMA_IRQ_ERROR |
122 R852_DMA_IRQ_MEMORY);
123}
124
125/*
126 * Disable dma, called from the interrupt handler, which specifies
127 * success of the operation via 'error' argument
128 */
129static void r852_dma_done(struct r852_device *dev, int error)
130{
131 WARN_ON(dev->dma_stage == 0);
132
133 r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
134 r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
135
136 r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
137 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
138
Maxim Levitskyfb45d322010-02-27 02:04:02 +0200139 /* Precaution to make sure HW doesn't write to random kernel memory */
140 r852_write_reg_dword(dev, R852_DMA_ADDR,
141 cpu_to_le32(dev->phys_bounce_buffer));
142 r852_read_reg_dword(dev, R852_DMA_ADDR);
143
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200144 dev->dma_error = error;
145 dev->dma_stage = 0;
146
147 if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
148 pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
149 dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
150 complete(&dev->dma_done);
151}
152
153/*
154 * Wait, till dma is done, which includes both phases of it
155 */
156static int r852_dma_wait(struct r852_device *dev)
157{
158 long timeout = wait_for_completion_timeout(&dev->dma_done,
159 msecs_to_jiffies(1000));
160 if (!timeout) {
161 dbg("timeout waiting for DMA interrupt");
162 return -ETIMEDOUT;
163 }
164
165 return 0;
166}
167
168/*
169 * Read/Write one page using dma. Only pages can be read (512 bytes)
170*/
171static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
172{
173 int bounce = 0;
174 unsigned long flags;
175 int error;
176
177 dev->dma_error = 0;
178
179 /* Set dma direction */
180 dev->dma_dir = do_read;
181 dev->dma_stage = 1;
182
183 dbg_verbose("doing dma %s ", do_read ? "read" : "write");
184
185 /* Set intial dma state: for reading first fill on board buffer,
186 from device, for writes first fill the buffer from memory*/
187 dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
188
189 /* if incoming buffer is not page aligned, we should do bounce */
190 if ((unsigned long)buf & (R852_DMA_LEN-1))
191 bounce = 1;
192
193 if (!bounce) {
194 dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
195 R852_DMA_LEN,
196 (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
197
198 if (dev->phys_dma_addr == DMA_ERROR_CODE)
199 bounce = 1;
200 }
201
202 if (bounce) {
203 dbg_verbose("dma: using bounce buffer");
204 dev->phys_dma_addr = dev->phys_bounce_buffer;
205 if (!do_read)
206 memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
207 }
208
209 /* Enable DMA */
210 spin_lock_irqsave(&dev->irqlock, flags);
211 r852_dma_enable(dev);
212 spin_unlock_irqrestore(&dev->irqlock, flags);
213
214 /* Wait till complete */
215 error = r852_dma_wait(dev);
216
217 if (error) {
218 r852_dma_done(dev, error);
219 return;
220 }
221
222 if (do_read && bounce)
223 memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
224}
225
226/*
227 * Program data lines of the nand chip to send data to it
228 */
229void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
230{
231 struct r852_device *dev = r852_get_dev(mtd);
232 uint32_t reg;
233
234 /* Don't allow any access to hardware if we suspect card removal */
235 if (dev->card_unstable)
236 return;
237
238 /* Special case for whole sector read */
239 if (len == R852_DMA_LEN && dev->dma_usable) {
240 r852_do_dma(dev, (uint8_t *)buf, 0);
241 return;
242 }
243
244 /* write DWORD chinks - faster */
245 while (len) {
246 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
247 r852_write_reg_dword(dev, R852_DATALINE, reg);
248 buf += 4;
249 len -= 4;
250
251 }
252
253 /* write rest */
254 while (len)
255 r852_write_reg(dev, R852_DATALINE, *buf++);
256}
257
258/*
259 * Read data lines of the nand chip to retrieve data
260 */
261void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
262{
263 struct r852_device *dev = r852_get_dev(mtd);
264 uint32_t reg;
265
266 if (dev->card_unstable) {
267 /* since we can't signal error here, at least, return
268 predictable buffer */
269 memset(buf, 0, len);
270 return;
271 }
272
273 /* special case for whole sector read */
274 if (len == R852_DMA_LEN && dev->dma_usable) {
275 r852_do_dma(dev, buf, 1);
276 return;
277 }
278
279 /* read in dword sized chunks */
280 while (len >= 4) {
281
282 reg = r852_read_reg_dword(dev, R852_DATALINE);
283 *buf++ = reg & 0xFF;
284 *buf++ = (reg >> 8) & 0xFF;
285 *buf++ = (reg >> 16) & 0xFF;
286 *buf++ = (reg >> 24) & 0xFF;
287 len -= 4;
288 }
289
290 /* read the reset by bytes */
291 while (len--)
292 *buf++ = r852_read_reg(dev, R852_DATALINE);
293}
294
295/*
296 * Read one byte from nand chip
297 */
298static uint8_t r852_read_byte(struct mtd_info *mtd)
299{
300 struct r852_device *dev = r852_get_dev(mtd);
301
302 /* Same problem as in r852_read_buf.... */
303 if (dev->card_unstable)
304 return 0;
305
306 return r852_read_reg(dev, R852_DATALINE);
307}
308
309
310/*
311 * Readback the buffer to verify it
312 */
313int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
314{
315 struct r852_device *dev = r852_get_dev(mtd);
316
317 /* We can't be sure about anything here... */
318 if (dev->card_unstable)
319 return -1;
320
321 /* This will never happen, unless you wired up a nand chip
322 with > 512 bytes page size to the reader */
323 if (len > SM_SECTOR_SIZE)
324 return 0;
325
326 r852_read_buf(mtd, dev->tmp_buffer, len);
327 return memcmp(buf, dev->tmp_buffer, len);
328}
329
330/*
331 * Control several chip lines & send commands
332 */
333void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
334{
335 struct r852_device *dev = r852_get_dev(mtd);
336
337 if (dev->card_unstable)
338 return;
339
340 if (ctrl & NAND_CTRL_CHANGE) {
341
342 dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
343 R852_CTL_ON | R852_CTL_CARDENABLE);
344
345 if (ctrl & NAND_ALE)
346 dev->ctlreg |= R852_CTL_DATA;
347
348 if (ctrl & NAND_CLE)
349 dev->ctlreg |= R852_CTL_COMMAND;
350
351 if (ctrl & NAND_NCE)
352 dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
353 else
354 dev->ctlreg &= ~R852_CTL_WRITE;
355
356 /* when write is stareted, enable write access */
357 if (dat == NAND_CMD_ERASE1)
358 dev->ctlreg |= R852_CTL_WRITE;
359
360 r852_write_reg(dev, R852_CTL, dev->ctlreg);
361 }
362
363 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
364 to set write mode */
365 if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
366 dev->ctlreg |= R852_CTL_WRITE;
367 r852_write_reg(dev, R852_CTL, dev->ctlreg);
368 }
369
370 if (dat != NAND_CMD_NONE)
371 r852_write_reg(dev, R852_DATALINE, dat);
372}
373
374/*
375 * Wait till card is ready.
376 * based on nand_wait, but returns errors on DMA error
377 */
378int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
379{
380 struct r852_device *dev = (struct r852_device *)chip->priv;
381
382 unsigned long timeout;
383 int status;
384
385 timeout = jiffies + (chip->state == FL_ERASING ?
386 msecs_to_jiffies(400) : msecs_to_jiffies(20));
387
388 while (time_before(jiffies, timeout))
389 if (chip->dev_ready(mtd))
390 break;
391
392 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
393 status = (int)chip->read_byte(mtd);
394
395 /* Unfortunelly, no way to send detailed error status... */
396 if (dev->dma_error) {
397 status |= NAND_STATUS_FAIL;
398 dev->dma_error = 0;
399 }
400 return status;
401}
402
403/*
404 * Check if card is ready
405 */
406
407int r852_ready(struct mtd_info *mtd)
408{
409 struct r852_device *dev = r852_get_dev(mtd);
410 return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
411}
412
413
414/*
415 * Set ECC engine mode
416*/
417
418void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
419{
420 struct r852_device *dev = r852_get_dev(mtd);
421
422 if (dev->card_unstable)
423 return;
424
425 switch (mode) {
426 case NAND_ECC_READ:
427 case NAND_ECC_WRITE:
428 /* enable ecc generation/check*/
429 dev->ctlreg |= R852_CTL_ECC_ENABLE;
430
431 /* flush ecc buffer */
432 r852_write_reg(dev, R852_CTL,
433 dev->ctlreg | R852_CTL_ECC_ACCESS);
434
435 r852_read_reg_dword(dev, R852_DATALINE);
436 r852_write_reg(dev, R852_CTL, dev->ctlreg);
437 return;
438
439 case NAND_ECC_READSYN:
440 /* disable ecc generation */
441 dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
442 r852_write_reg(dev, R852_CTL, dev->ctlreg);
443 }
444}
445
446/*
447 * Calculate ECC, only used for writes
448 */
449
450int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
451 uint8_t *ecc_code)
452{
453 struct r852_device *dev = r852_get_dev(mtd);
454 struct sm_oob *oob = (struct sm_oob *)ecc_code;
455 uint32_t ecc1, ecc2;
456
457 if (dev->card_unstable)
458 return 0;
459
460 dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
461 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
462
463 ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
464 ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
465
466 oob->ecc1[0] = (ecc1) & 0xFF;
467 oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
468 oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
469
470 oob->ecc2[0] = (ecc2) & 0xFF;
471 oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
472 oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
473
474 r852_write_reg(dev, R852_CTL, dev->ctlreg);
475 return 0;
476}
477
478/*
479 * Correct the data using ECC, hw did almost everything for us
480 */
481
482int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
483 uint8_t *read_ecc, uint8_t *calc_ecc)
484{
485 uint16_t ecc_reg;
486 uint8_t ecc_status, err_byte;
487 int i, error = 0;
488
489 struct r852_device *dev = r852_get_dev(mtd);
490
491 if (dev->card_unstable)
492 return 0;
493
494 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
495 ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
496 r852_write_reg(dev, R852_CTL, dev->ctlreg);
497
498 for (i = 0 ; i <= 1 ; i++) {
499
500 ecc_status = (ecc_reg >> 8) & 0xFF;
501
502 /* ecc uncorrectable error */
503 if (ecc_status & R852_ECC_FAIL) {
504 dbg("ecc: unrecoverable error, in half %d", i);
505 error = -1;
506 goto exit;
507 }
508
509 /* correctable error */
510 if (ecc_status & R852_ECC_CORRECTABLE) {
511
512 err_byte = ecc_reg & 0xFF;
513 dbg("ecc: recoverable error, "
514 "in half %d, byte %d, bit %d", i,
515 err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
516
517 dat[err_byte] ^=
518 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
519 error++;
520 }
521
522 dat += 256;
523 ecc_reg >>= 16;
524 }
525exit:
526 return error;
527}
528
529/*
530 * This is copy of nand_read_oob_std
531 * nand_read_oob_syndrome assumes we can send column address - we can't
532 */
533static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
534 int page, int sndcmd)
535{
536 if (sndcmd) {
537 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
538 sndcmd = 0;
539 }
540 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
541 return sndcmd;
542}
543
544/*
545 * Start the nand engine
546 */
547
548void r852_engine_enable(struct r852_device *dev)
549{
550 if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
551 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
552 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
553 } else {
554 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
555 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
556 }
557 msleep(300);
558 r852_write_reg(dev, R852_CTL, 0);
559}
560
561
562/*
563 * Stop the nand engine
564 */
565
566void r852_engine_disable(struct r852_device *dev)
567{
568 r852_write_reg_dword(dev, R852_HW, 0);
569 r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
570}
571
572/*
573 * Test if card is present
574 */
575
576void r852_card_update_present(struct r852_device *dev)
577{
578 unsigned long flags;
579 uint8_t reg;
580
581 spin_lock_irqsave(&dev->irqlock, flags);
582 reg = r852_read_reg(dev, R852_CARD_STA);
583 dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
584 spin_unlock_irqrestore(&dev->irqlock, flags);
585}
586
587/*
588 * Update card detection IRQ state according to current card state
589 * which is read in r852_card_update_present
590 */
591void r852_update_card_detect(struct r852_device *dev)
592{
593 int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
Maxim Levitskyfb45d322010-02-27 02:04:02 +0200594 dev->card_unstable = 0;
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200595
596 card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
597 card_detect_reg |= R852_CARD_IRQ_GENABLE;
598
599 card_detect_reg |= dev->card_detected ?
600 R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
601
602 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
603}
604
605ssize_t r852_media_type_show(struct device *sys_dev,
606 struct device_attribute *attr, char *buf)
607{
608 struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
609 struct r852_device *dev = r852_get_dev(mtd);
610 char *data = dev->sm ? "smartmedia" : "xd";
611
612 strcpy(buf, data);
613 return strlen(data);
614}
615
616DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
617
618
619/* Detect properties of card in slot */
620void r852_update_media_status(struct r852_device *dev)
621{
622 uint8_t reg;
623 unsigned long flags;
624 int readonly;
625
626 spin_lock_irqsave(&dev->irqlock, flags);
627 if (!dev->card_detected) {
628 message("card removed");
629 spin_unlock_irqrestore(&dev->irqlock, flags);
630 return ;
631 }
632
633 readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
634 reg = r852_read_reg(dev, R852_DMA_CAP);
635 dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
636
637 message("detected %s %s card in slot",
638 dev->sm ? "SmartMedia" : "xD",
639 readonly ? "readonly" : "writeable");
640
641 dev->readonly = readonly;
642 spin_unlock_irqrestore(&dev->irqlock, flags);
643}
644
645/*
646 * Register the nand device
647 * Called when the card is detected
648 */
649int r852_register_nand_device(struct r852_device *dev)
650{
651 dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
652
653 if (!dev->mtd)
654 goto error1;
655
656 WARN_ON(dev->card_registred);
657
658 dev->mtd->owner = THIS_MODULE;
659 dev->mtd->priv = dev->chip;
660 dev->mtd->dev.parent = &dev->pci_dev->dev;
661
662 if (dev->readonly)
663 dev->chip->options |= NAND_ROM;
664
665 r852_engine_enable(dev);
666
667 if (sm_register_device(dev->mtd))
668 goto error2;
669
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200670 if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
671 message("can't create media type sysfs attribute");
672
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200673 dev->card_registred = 1;
674 return 0;
675error2:
676 kfree(dev->mtd);
677error1:
678 /* Force card redetect */
679 dev->card_detected = 0;
680 return -1;
681}
682
683/*
684 * Unregister the card
685 */
686
687void r852_unregister_nand_device(struct r852_device *dev)
688{
689 if (!dev->card_registred)
690 return;
691
692 device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
693 nand_release(dev->mtd);
694 r852_engine_disable(dev);
695 dev->card_registred = 0;
696 kfree(dev->mtd);
697 dev->mtd = NULL;
698}
699
700/* Card state updater */
701void r852_card_detect_work(struct work_struct *work)
702{
703 struct r852_device *dev =
704 container_of(work, struct r852_device, card_detect_work.work);
705
Maxim Levitskyfb45d322010-02-27 02:04:02 +0200706 r852_card_update_present(dev);
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200707 dev->card_unstable = 0;
708
Maxim Levitskyfb45d322010-02-27 02:04:02 +0200709 /* False alarm */
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200710 if (dev->card_detected == dev->card_registred)
711 goto exit;
712
713 /* Read media properties */
714 r852_update_media_status(dev);
715
716 /* Register the card */
717 if (dev->card_detected)
718 r852_register_nand_device(dev);
719 else
720 r852_unregister_nand_device(dev);
721exit:
722 /* Update detection logic */
723 r852_update_card_detect(dev);
724}
725
726/* Ack + disable IRQ generation */
727static void r852_disable_irqs(struct r852_device *dev)
728{
729 uint8_t reg;
730 reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
731 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
732
733 reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
734 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
735 reg & ~R852_DMA_IRQ_MASK);
736
737 r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
738 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
739}
740
741/* Interrupt handler */
742static irqreturn_t r852_irq(int irq, void *data)
743{
744 struct r852_device *dev = (struct r852_device *)data;
745
746 uint8_t card_status, dma_status;
747 unsigned long flags;
748 irqreturn_t ret = IRQ_NONE;
749
750 spin_lock_irqsave(&dev->irqlock, flags);
751
752 /* We can recieve shared interrupt while pci is suspended
753 in that case reads will return 0xFFFFFFFF.... */
754 if (dev->insuspend)
755 goto out;
756
757 /* handle card detection interrupts first */
758 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
759 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
760
761 if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
762
763 ret = IRQ_HANDLED;
764 dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
765
766 /* we shouldn't recieve any interrupts if we wait for card
767 to settle */
768 WARN_ON(dev->card_unstable);
769
770 /* disable irqs while card is unstable */
771 /* this will timeout DMA if active, but better that garbage */
772 r852_disable_irqs(dev);
773
774 if (dev->card_unstable)
775 goto out;
776
777 /* let, card state to settle a bit, and then do the work */
778 dev->card_unstable = 1;
779 queue_delayed_work(dev->card_workqueue,
780 &dev->card_detect_work, msecs_to_jiffies(100));
781 goto out;
782 }
783
784
785 /* Handle dma interrupts */
786 dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
787 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
788
789 if (dma_status & R852_DMA_IRQ_MASK) {
790
791 ret = IRQ_HANDLED;
792
793 if (dma_status & R852_DMA_IRQ_ERROR) {
794 dbg("recieved dma error IRQ");
795 r852_dma_done(dev, -EIO);
796 goto out;
797 }
798
799 /* recieved DMA interrupt out of nowhere? */
800 WARN_ON_ONCE(dev->dma_stage == 0);
801
802 if (dev->dma_stage == 0)
803 goto out;
804
805 /* done device access */
806 if (dev->dma_state == DMA_INTERNAL &&
807 (dma_status & R852_DMA_IRQ_INTERNAL)) {
808
809 dev->dma_state = DMA_MEMORY;
810 dev->dma_stage++;
811 }
812
813 /* done memory DMA */
814 if (dev->dma_state == DMA_MEMORY &&
815 (dma_status & R852_DMA_IRQ_MEMORY)) {
816 dev->dma_state = DMA_INTERNAL;
817 dev->dma_stage++;
818 }
819
820 /* Enable 2nd half of dma dance */
821 if (dev->dma_stage == 2)
822 r852_dma_enable(dev);
823
824 /* Operation done */
825 if (dev->dma_stage == 3)
826 r852_dma_done(dev, 0);
827 goto out;
828 }
829
830 /* Handle unknown interrupts */
831 if (dma_status)
832 dbg("bad dma IRQ status = %x", dma_status);
833
834 if (card_status & ~R852_CARD_STA_CD)
835 dbg("strange card status = %x", card_status);
836
837out:
838 spin_unlock_irqrestore(&dev->irqlock, flags);
839 return ret;
840}
841
842int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
843{
844 int error;
845 struct nand_chip *chip;
846 struct r852_device *dev;
847
848 /* pci initialization */
849 error = pci_enable_device(pci_dev);
850
851 if (error)
852 goto error1;
853
854 pci_set_master(pci_dev);
855
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200856 error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
Maxim Levitsky67e054e2010-02-22 20:39:42 +0200857 if (error)
858 goto error2;
859
860 error = pci_request_regions(pci_dev, DRV_NAME);
861
862 if (error)
863 goto error3;
864
865 error = -ENOMEM;
866
867 /* init nand chip, but register it only on card insert */
868 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
869
870 if (!chip)
871 goto error4;
872
873 /* commands */
874 chip->cmd_ctrl = r852_cmdctl;
875 chip->waitfunc = r852_wait;
876 chip->dev_ready = r852_ready;
877
878 /* I/O */
879 chip->read_byte = r852_read_byte;
880 chip->read_buf = r852_read_buf;
881 chip->write_buf = r852_write_buf;
882 chip->verify_buf = r852_verify_buf;
883
884 /* ecc */
885 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
886 chip->ecc.size = R852_DMA_LEN;
887 chip->ecc.bytes = SM_OOB_SIZE;
888 chip->ecc.hwctl = r852_ecc_hwctl;
889 chip->ecc.calculate = r852_ecc_calculate;
890 chip->ecc.correct = r852_ecc_correct;
891
892 /* TODO: hack */
893 chip->ecc.read_oob = r852_read_oob;
894
895 /* init our device structure */
896 dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
897
898 if (!dev)
899 goto error5;
900
901 chip->priv = dev;
902 dev->chip = chip;
903 dev->pci_dev = pci_dev;
904 pci_set_drvdata(pci_dev, dev);
905
906 dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
907 &dev->phys_bounce_buffer);
908
909 if (!dev->bounce_buffer)
910 goto error6;
911
912
913 error = -ENODEV;
914 dev->mmio = pci_ioremap_bar(pci_dev, 0);
915
916 if (!dev->mmio)
917 goto error7;
918
919 error = -ENOMEM;
920 dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
921
922 if (!dev->tmp_buffer)
923 goto error8;
924
925 init_completion(&dev->dma_done);
926
927 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
928
929 if (!dev->card_workqueue)
930 goto error9;
931
932 INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
933
934 /* shutdown everything - precation */
935 r852_engine_disable(dev);
936 r852_disable_irqs(dev);
937
938 r852_dma_test(dev);
939
940 /*register irq handler*/
941 error = -ENODEV;
942 if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
943 DRV_NAME, dev))
944 goto error10;
945
946 dev->irq = pci_dev->irq;
947 spin_lock_init(&dev->irqlock);
948
949 /* kick initial present test */
950 dev->card_detected = 0;
951 r852_card_update_present(dev);
952 queue_delayed_work(dev->card_workqueue,
953 &dev->card_detect_work, 0);
954
955
956 printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
957 return 0;
958
959error10:
960 destroy_workqueue(dev->card_workqueue);
961error9:
962 kfree(dev->tmp_buffer);
963error8:
964 pci_iounmap(pci_dev, dev->mmio);
965error7:
966 pci_free_consistent(pci_dev, R852_DMA_LEN,
967 dev->bounce_buffer, dev->phys_bounce_buffer);
968error6:
969 kfree(dev);
970error5:
971 kfree(chip);
972error4:
973 pci_release_regions(pci_dev);
974error3:
975error2:
976 pci_disable_device(pci_dev);
977error1:
978 return error;
979}
980
981void r852_remove(struct pci_dev *pci_dev)
982{
983 struct r852_device *dev = pci_get_drvdata(pci_dev);
984
985 /* Stop detect workqueue -
986 we are going to unregister the device anyway*/
987 cancel_delayed_work_sync(&dev->card_detect_work);
988 destroy_workqueue(dev->card_workqueue);
989
990 /* Unregister the device, this might make more IO */
991 r852_unregister_nand_device(dev);
992
993 /* Stop interrupts */
994 r852_disable_irqs(dev);
995 synchronize_irq(dev->irq);
996 free_irq(dev->irq, dev);
997
998 /* Cleanup */
999 kfree(dev->tmp_buffer);
1000 pci_iounmap(pci_dev, dev->mmio);
1001 pci_free_consistent(pci_dev, R852_DMA_LEN,
1002 dev->bounce_buffer, dev->phys_bounce_buffer);
1003
1004 kfree(dev->chip);
1005 kfree(dev);
1006
1007 /* Shutdown the PCI device */
1008 pci_release_regions(pci_dev);
1009 pci_disable_device(pci_dev);
1010}
1011
1012void r852_shutdown(struct pci_dev *pci_dev)
1013{
1014 struct r852_device *dev = pci_get_drvdata(pci_dev);
1015
1016 cancel_delayed_work_sync(&dev->card_detect_work);
1017 r852_disable_irqs(dev);
1018 synchronize_irq(dev->irq);
1019 pci_disable_device(pci_dev);
1020}
1021
1022int r852_suspend(struct device *device)
1023{
1024 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1025 unsigned long flags;
1026
1027 if (dev->ctlreg & R852_CTL_CARDENABLE)
1028 return -EBUSY;
1029
1030 /* First make sure the detect work is gone */
1031 cancel_delayed_work_sync(&dev->card_detect_work);
1032
1033 /* Turn off the interrupts and stop the device */
1034 r852_disable_irqs(dev);
1035 r852_engine_disable(dev);
1036
1037 spin_lock_irqsave(&dev->irqlock, flags);
1038 dev->insuspend = 1;
1039 spin_unlock_irqrestore(&dev->irqlock, flags);
1040
1041 /* At that point, even if interrupt handler is running, it will quit */
1042 /* So wait for this to happen explictly */
1043 synchronize_irq(dev->irq);
1044
1045 /* If card was pulled off just during the suspend, which is very
1046 unlikely, we will remove it on resume, it too late now
1047 anyway... */
1048 dev->card_unstable = 0;
1049
1050 pci_save_state(to_pci_dev(device));
1051 return pci_prepare_to_sleep(to_pci_dev(device));
1052}
1053
1054int r852_resume(struct device *device)
1055{
1056 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1057 unsigned long flags;
1058
1059 /* Turn on the hardware */
1060 pci_back_from_sleep(to_pci_dev(device));
1061 pci_restore_state(to_pci_dev(device));
1062
1063 r852_disable_irqs(dev);
1064 r852_card_update_present(dev);
1065 r852_engine_disable(dev);
1066
1067
1068 /* Now its safe for IRQ to run */
1069 spin_lock_irqsave(&dev->irqlock, flags);
1070 dev->insuspend = 0;
1071 spin_unlock_irqrestore(&dev->irqlock, flags);
1072
1073
1074 /* If card status changed, just do the work */
1075 if (dev->card_detected != dev->card_registred) {
1076 dbg("card was %s during low power state",
1077 dev->card_detected ? "added" : "removed");
1078
1079 queue_delayed_work(dev->card_workqueue,
1080 &dev->card_detect_work, 1000);
1081 return 0;
1082 }
1083
1084 /* Otherwise, initialize the card */
1085 if (dev->card_registred) {
1086 r852_engine_enable(dev);
1087 dev->chip->select_chip(dev->mtd, 0);
1088 dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
1089 dev->chip->select_chip(dev->mtd, -1);
1090 }
1091
1092 /* Program card detection IRQ */
1093 r852_update_card_detect(dev);
1094 return 0;
1095}
1096
1097static const struct pci_device_id r852_pci_id_tbl[] = {
1098
Maxim Levitskyd4080cb2010-02-26 23:10:32 +02001099 { PCI_VDEVICE(RICOH, 0x0852), },
Maxim Levitsky67e054e2010-02-22 20:39:42 +02001100 { },
1101};
1102
1103MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1104
1105SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1106
1107
1108static struct pci_driver r852_pci_driver = {
1109 .name = DRV_NAME,
1110 .id_table = r852_pci_id_tbl,
1111 .probe = r852_probe,
1112 .remove = r852_remove,
1113 .shutdown = r852_shutdown,
1114 .driver.pm = &r852_pm_ops,
1115};
1116
1117static __init int r852_module_init(void)
1118{
1119 return pci_register_driver(&r852_pci_driver);
1120}
1121
1122static void __exit r852_module_exit(void)
1123{
1124 pci_unregister_driver(&r852_pci_driver);
1125}
1126
1127module_init(r852_module_init);
1128module_exit(r852_module_exit);
1129
1130MODULE_LICENSE("GPL");
1131MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1132MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");