blob: f3898353d0a8f9dc6d328ace0e06689ae366c214 [file] [log] [blame]
unsik Kim3fbed4c2009-04-02 12:50:58 -07001/*
2 * drivers/block/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/fs.h>
18#include <linux/blkdev.h>
19#include <linux/hdreg.h>
20#include <linux/libata.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/platform_device.h>
24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28
29static void mg_request(struct request_queue *);
30
31static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host)
33{
34 char *name = MG_DISK_NAME;
35 struct request *req;
36
37 if (host->breq) {
38 req = elv_next_request(host->breq);
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY)
45 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY)
47 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
49 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
51 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ)
53 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
55 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR)
57 printk("Error ");
58 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
60 host->error = 0;
61 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK)
66 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC)
68 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF)
70 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT)
72 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF)
74 printk("AddrMarkNotFound ");
75 printk("}");
76 if (host->error &
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
79 if (host->breq) {
80 req = elv_next_request(host->breq);
81 if (req)
Bartlomiej Zolnierkiewicz0d9f3462009-04-28 12:38:33 +090082 printk(", sector=%u", (u32)req->sector);
unsik Kim3fbed4c2009-04-02 12:50:58 -070083 }
84
85 }
86 printk("\n");
87 }
88}
89
90static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
91{
92 u8 status;
93 unsigned long expire, cur_jiffies;
94 struct mg_drv_data *prv_data = host->dev->platform_data;
95
96 host->error = MG_ERR_NONE;
97 expire = jiffies + msecs_to_jiffies(msec);
98
99 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
100
101 do {
102 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY)
105 break;
106 } else {
107 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) {
109 mg_dump_status("mg_wait", status, host);
110 break;
111 }
112
113 if (expect == MG_STAT_READY)
114 if (MG_READY_OK(status))
115 break;
116
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ)
119 break;
120 }
121 if (!msec) {
122 mg_dump_status("not ready", status, host);
123 return MG_ERR_INV_STAT;
124 }
125 if (prv_data->use_polling)
126 msleep(1);
127
128 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
129 } while (time_before(cur_jiffies, expire));
130
131 if (time_after_eq(cur_jiffies, expire) && msec)
132 host->error = MG_ERR_TIMEOUT;
133
134 return host->error;
135}
136
137static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
138{
139 unsigned long expire;
140
141 expire = jiffies + msecs_to_jiffies(msec);
142 while (time_before(jiffies, expire)) {
143 if (gpio_get_value(rstout) == 1)
144 return MG_ERR_NONE;
145 msleep(10);
146 }
147
148 return MG_ERR_RSTOUT;
149}
150
151static void mg_unexpected_intr(struct mg_host *host)
152{
153 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
154
155 mg_dump_status("mg_unexpected_intr", status, host);
156}
157
158static irqreturn_t mg_irq(int irq, void *dev_id)
159{
160 struct mg_host *host = dev_id;
161 void (*handler)(struct mg_host *) = host->mg_do_intr;
162
Tejun Heo39f36b42009-04-28 12:38:32 +0900163 spin_lock(&host->lock);
164
165 host->mg_do_intr = NULL;
unsik Kim3fbed4c2009-04-02 12:50:58 -0700166 del_timer(&host->timer);
167 if (!handler)
168 handler = mg_unexpected_intr;
169 handler(host);
Tejun Heo39f36b42009-04-28 12:38:32 +0900170
171 spin_unlock(&host->lock);
172
unsik Kim3fbed4c2009-04-02 12:50:58 -0700173 return IRQ_HANDLED;
174}
175
176static int mg_get_disk_id(struct mg_host *host)
177{
178 u32 i;
179 s32 err;
180 const u16 *id = host->id;
181 struct mg_drv_data *prv_data = host->dev->platform_data;
182 char fwrev[ATA_ID_FW_REV_LEN + 1];
183 char model[ATA_ID_PROD_LEN + 1];
184 char serial[ATA_ID_SERNO_LEN + 1];
185
186 if (!prv_data->use_polling)
187 outb(MG_REG_CTRL_INTR_DISABLE,
188 (unsigned long)host->dev_base +
189 MG_REG_DRV_CTRL);
190
191 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
192 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
193 if (err)
194 return err;
195
196 for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
197 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
198 MG_BUFF_OFFSET + i * 2));
199
200 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
201 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
202 if (err)
203 return err;
204
205 if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
206 return MG_ERR_TRANSLATION;
207
208 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
209 host->cyls = id[ATA_ID_CYLS];
210 host->heads = id[ATA_ID_HEADS];
211 host->sectors = id[ATA_ID_SECTORS];
212
213 if (MG_RES_SEC && host->heads && host->sectors) {
214 /* modify cyls, n_sectors */
215 host->cyls = (host->n_sectors - MG_RES_SEC) /
216 host->heads / host->sectors;
217 host->nres_sectors = host->n_sectors - host->cyls *
218 host->heads * host->sectors;
219 host->n_sectors -= host->nres_sectors;
220 }
221
222 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
223 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
224 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
225 printk(KERN_INFO "mg_disk: model: %s\n", model);
226 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
227 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
228 printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
229 host->n_sectors, host->nres_sectors);
230
231 if (!prv_data->use_polling)
232 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
233 MG_REG_DRV_CTRL);
234
235 return err;
236}
237
238
239static int mg_disk_init(struct mg_host *host)
240{
241 struct mg_drv_data *prv_data = host->dev->platform_data;
242 s32 err;
243 u8 init_status;
244
245 /* hdd rst low */
246 gpio_set_value(host->rst, 0);
247 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
248 if (err)
249 return err;
250
251 /* hdd rst high */
252 gpio_set_value(host->rst, 1);
253 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
254 if (err)
255 return err;
256
257 /* soft reset on */
258 outb(MG_REG_CTRL_RESET |
259 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
260 MG_REG_CTRL_INTR_ENABLE),
261 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
262 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
263 if (err)
264 return err;
265
266 /* soft reset off */
267 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
268 MG_REG_CTRL_INTR_ENABLE,
269 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
270 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
271 if (err)
272 return err;
273
274 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
275
276 if (init_status == 0xf)
277 return MG_ERR_INIT_STAT;
278
279 return err;
280}
281
282static void mg_bad_rw_intr(struct mg_host *host)
283{
284 struct request *req = elv_next_request(host->breq);
285 if (req != NULL)
286 if (++req->errors >= MG_MAX_ERRORS ||
287 host->error == MG_ERR_TIMEOUT)
288 end_request(req, 0);
289}
290
291static unsigned int mg_out(struct mg_host *host,
292 unsigned int sect_num,
293 unsigned int sect_cnt,
294 unsigned int cmd,
295 void (*intr_addr)(struct mg_host *))
296{
297 struct mg_drv_data *prv_data = host->dev->platform_data;
298
299 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
300 return host->error;
301
302 if (!prv_data->use_polling) {
303 host->mg_do_intr = intr_addr;
304 mod_timer(&host->timer, jiffies + 3 * HZ);
305 }
306 if (MG_RES_SEC)
307 sect_num += MG_RES_SEC;
308 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
309 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
310 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
311 MG_REG_CYL_LOW);
312 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
313 MG_REG_CYL_HIGH);
314 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
315 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
316 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
317 return MG_ERR_NONE;
318}
319
320static void mg_read(struct request *req)
321{
322 u32 remains, j;
323 struct mg_host *host = req->rq_disk->private_data;
324
325 remains = req->nr_sectors;
326
Tejun Heo39f36b42009-04-28 12:38:32 +0900327 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
unsik Kim3fbed4c2009-04-02 12:50:58 -0700328 MG_ERR_NONE)
329 mg_bad_rw_intr(host);
330
331 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
332 remains, req->sector, req->buffer);
333
334 while (remains) {
335 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
336 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
337 mg_bad_rw_intr(host);
338 return;
339 }
340 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
341 *(u16 *)req->buffer =
342 inw((unsigned long)host->dev_base +
343 MG_BUFF_OFFSET + (j << 1));
344 req->buffer += 2;
345 }
346
347 req->sector++;
348 req->errors = 0;
349 remains = --req->nr_sectors;
350 --req->current_nr_sectors;
351
352 if (req->current_nr_sectors <= 0) {
353 MG_DBG("remain : %d sects\n", remains);
354 end_request(req, 1);
355 if (remains > 0)
356 req = elv_next_request(host->breq);
357 }
358
359 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
360 MG_REG_COMMAND);
361 }
362}
363
364static void mg_write(struct request *req)
365{
366 u32 remains, j;
367 struct mg_host *host = req->rq_disk->private_data;
368
369 remains = req->nr_sectors;
370
Tejun Heo39f36b42009-04-28 12:38:32 +0900371 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
unsik Kim3fbed4c2009-04-02 12:50:58 -0700372 MG_ERR_NONE) {
373 mg_bad_rw_intr(host);
374 return;
375 }
376
377
378 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
379 remains, req->sector, req->buffer);
380 while (remains) {
381 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
382 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
383 mg_bad_rw_intr(host);
384 return;
385 }
386 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
387 outw(*(u16 *)req->buffer,
388 (unsigned long)host->dev_base +
389 MG_BUFF_OFFSET + (j << 1));
390 req->buffer += 2;
391 }
392 req->sector++;
393 remains = --req->nr_sectors;
394 --req->current_nr_sectors;
395
396 if (req->current_nr_sectors <= 0) {
397 MG_DBG("remain : %d sects\n", remains);
398 end_request(req, 1);
399 if (remains > 0)
400 req = elv_next_request(host->breq);
401 }
402
403 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
404 MG_REG_COMMAND);
405 }
406}
407
408static void mg_read_intr(struct mg_host *host)
409{
410 u32 i;
411 struct request *req;
412
413 /* check status */
414 do {
415 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
416 if (i & MG_REG_STATUS_BIT_BUSY)
417 break;
418 if (!MG_READY_OK(i))
419 break;
420 if (i & MG_REG_STATUS_BIT_DATA_REQ)
421 goto ok_to_read;
422 } while (0);
423 mg_dump_status("mg_read_intr", i, host);
424 mg_bad_rw_intr(host);
425 mg_request(host->breq);
426 return;
427
428ok_to_read:
429 /* get current segment of request */
430 req = elv_next_request(host->breq);
431
432 /* read 1 sector */
433 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
434 *(u16 *)req->buffer =
435 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
436 (i << 1));
437 req->buffer += 2;
438 }
439
440 /* manipulate request */
441 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
442 req->sector, req->nr_sectors - 1, req->buffer);
443
444 req->sector++;
445 req->errors = 0;
446 i = --req->nr_sectors;
447 --req->current_nr_sectors;
448
449 /* let know if current segment done */
450 if (req->current_nr_sectors <= 0)
451 end_request(req, 1);
452
453 /* set handler if read remains */
454 if (i > 0) {
455 host->mg_do_intr = mg_read_intr;
456 mod_timer(&host->timer, jiffies + 3 * HZ);
457 }
458
459 /* send read confirm */
460 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
461
462 /* goto next request */
463 if (!i)
464 mg_request(host->breq);
465}
466
467static void mg_write_intr(struct mg_host *host)
468{
469 u32 i, j;
470 u16 *buff;
471 struct request *req;
472
473 /* get current segment of request */
474 req = elv_next_request(host->breq);
475
476 /* check status */
477 do {
478 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
479 if (i & MG_REG_STATUS_BIT_BUSY)
480 break;
481 if (!MG_READY_OK(i))
482 break;
483 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
484 goto ok_to_write;
485 } while (0);
486 mg_dump_status("mg_write_intr", i, host);
487 mg_bad_rw_intr(host);
488 mg_request(host->breq);
489 return;
490
491ok_to_write:
492 /* manipulate request */
493 req->sector++;
494 i = --req->nr_sectors;
495 --req->current_nr_sectors;
496 req->buffer += MG_SECTOR_SIZE;
497
498 /* let know if current segment or all done */
499 if (!i || (req->bio && req->current_nr_sectors <= 0))
500 end_request(req, 1);
501
502 /* write 1 sector and set handler if remains */
503 if (i > 0) {
504 buff = (u16 *)req->buffer;
505 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
506 outw(*buff, (unsigned long)host->dev_base +
507 MG_BUFF_OFFSET + (j << 1));
508 buff++;
509 }
510 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
511 req->sector, req->nr_sectors, req->buffer);
512 host->mg_do_intr = mg_write_intr;
513 mod_timer(&host->timer, jiffies + 3 * HZ);
514 }
515
516 /* send write confirm */
517 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
518
519 if (!i)
520 mg_request(host->breq);
521}
522
523void mg_times_out(unsigned long data)
524{
525 struct mg_host *host = (struct mg_host *)data;
526 char *name;
527 struct request *req;
528
Tejun Heo39f36b42009-04-28 12:38:32 +0900529 spin_lock_irq(&host->lock);
530
unsik Kim3fbed4c2009-04-02 12:50:58 -0700531 req = elv_next_request(host->breq);
532 if (!req)
Tejun Heo39f36b42009-04-28 12:38:32 +0900533 goto out_unlock;
unsik Kim3fbed4c2009-04-02 12:50:58 -0700534
535 host->mg_do_intr = NULL;
536
537 name = req->rq_disk->disk_name;
538 printk(KERN_DEBUG "%s: timeout\n", name);
539
540 host->error = MG_ERR_TIMEOUT;
541 mg_bad_rw_intr(host);
542
543 mg_request(host->breq);
Tejun Heo39f36b42009-04-28 12:38:32 +0900544out_unlock:
545 spin_unlock_irq(&host->lock);
unsik Kim3fbed4c2009-04-02 12:50:58 -0700546}
547
548static void mg_request_poll(struct request_queue *q)
549{
550 struct request *req;
551 struct mg_host *host;
552
553 while ((req = elv_next_request(q)) != NULL) {
554 host = req->rq_disk->private_data;
555 if (blk_fs_request(req)) {
556 switch (rq_data_dir(req)) {
557 case READ:
558 mg_read(req);
559 break;
560 case WRITE:
561 mg_write(req);
562 break;
563 default:
564 printk(KERN_WARNING "%s:%d unknown command\n",
565 __func__, __LINE__);
566 end_request(req, 0);
567 break;
568 }
569 }
570 }
571}
572
573static unsigned int mg_issue_req(struct request *req,
574 struct mg_host *host,
575 unsigned int sect_num,
576 unsigned int sect_cnt)
577{
578 u16 *buff;
579 u32 i;
580
581 switch (rq_data_dir(req)) {
582 case READ:
583 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
584 != MG_ERR_NONE) {
585 mg_bad_rw_intr(host);
586 return host->error;
587 }
588 break;
589 case WRITE:
590 /* TODO : handler */
591 outb(MG_REG_CTRL_INTR_DISABLE,
592 (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
595 != MG_ERR_NONE) {
596 mg_bad_rw_intr(host);
597 return host->error;
598 }
599 del_timer(&host->timer);
600 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
601 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
602 MG_REG_DRV_CTRL);
603 if (host->error) {
604 mg_bad_rw_intr(host);
605 return host->error;
606 }
607 buff = (u16 *)req->buffer;
608 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
609 outw(*buff, (unsigned long)host->dev_base +
610 MG_BUFF_OFFSET + (i << 1));
611 buff++;
612 }
613 mod_timer(&host->timer, jiffies + 3 * HZ);
614 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
615 MG_REG_COMMAND);
616 break;
617 default:
618 printk(KERN_WARNING "%s:%d unknown command\n",
619 __func__, __LINE__);
620 end_request(req, 0);
621 break;
622 }
623 return MG_ERR_NONE;
624}
625
626/* This function also called from IRQ context */
627static void mg_request(struct request_queue *q)
628{
629 struct request *req;
630 struct mg_host *host;
631 u32 sect_num, sect_cnt;
632
633 while (1) {
634 req = elv_next_request(q);
635 if (!req)
636 return;
637
638 host = req->rq_disk->private_data;
639
640 /* check unwanted request call */
641 if (host->mg_do_intr)
642 return;
643
644 del_timer(&host->timer);
645
646 sect_num = req->sector;
647 /* deal whole segments */
648 sect_cnt = req->nr_sectors;
649
650 /* sanity check */
651 if (sect_num >= get_capacity(req->rq_disk) ||
652 ((sect_num + sect_cnt) >
653 get_capacity(req->rq_disk))) {
654 printk(KERN_WARNING
655 "%s: bad access: sector=%d, count=%d\n",
656 req->rq_disk->disk_name,
657 sect_num, sect_cnt);
658 end_request(req, 0);
659 continue;
660 }
661
662 if (!blk_fs_request(req))
663 return;
664
665 if (!mg_issue_req(req, host, sect_num, sect_cnt))
666 return;
667 }
668}
669
670static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
671{
672 struct mg_host *host = bdev->bd_disk->private_data;
673
674 geo->cylinders = (unsigned short)host->cyls;
675 geo->heads = (unsigned char)host->heads;
676 geo->sectors = (unsigned char)host->sectors;
677 return 0;
678}
679
680static struct block_device_operations mg_disk_ops = {
681 .getgeo = mg_getgeo
682};
683
684static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
685{
686 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
687 struct mg_host *host = prv_data->host;
688
689 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
690 return -EIO;
691
692 if (!prv_data->use_polling)
693 outb(MG_REG_CTRL_INTR_DISABLE,
694 (unsigned long)host->dev_base +
695 MG_REG_DRV_CTRL);
696
697 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
698 /* wait until mflash deep sleep */
699 msleep(1);
700
701 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
702 if (!prv_data->use_polling)
703 outb(MG_REG_CTRL_INTR_ENABLE,
704 (unsigned long)host->dev_base +
705 MG_REG_DRV_CTRL);
706 return -EIO;
707 }
708
709 return 0;
710}
711
712static int mg_resume(struct platform_device *plat_dev)
713{
714 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
715 struct mg_host *host = prv_data->host;
716
717 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
718 return -EIO;
719
720 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
721 /* wait until mflash wakeup */
722 msleep(1);
723
724 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
725 return -EIO;
726
727 if (!prv_data->use_polling)
728 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
729 MG_REG_DRV_CTRL);
730
731 return 0;
732}
733
734static int mg_probe(struct platform_device *plat_dev)
735{
736 struct mg_host *host;
737 struct resource *rsc;
738 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
739 int err = 0;
740
741 if (!prv_data) {
742 printk(KERN_ERR "%s:%d fail (no driver_data)\n",
743 __func__, __LINE__);
744 err = -EINVAL;
745 goto probe_err;
746 }
747
748 /* alloc mg_host */
749 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
750 if (!host) {
751 printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
752 __func__, __LINE__);
753 err = -ENOMEM;
754 goto probe_err;
755 }
756 host->major = MG_DISK_MAJ;
757
758 /* link each other */
759 prv_data->host = host;
760 host->dev = &plat_dev->dev;
761
762 /* io remap */
763 rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
764 if (!rsc) {
765 printk(KERN_ERR "%s:%d platform_get_resource fail\n",
766 __func__, __LINE__);
767 err = -EINVAL;
768 goto probe_err_2;
769 }
770 host->dev_base = ioremap(rsc->start , rsc->end + 1);
771 if (!host->dev_base) {
772 printk(KERN_ERR "%s:%d ioremap fail\n",
773 __func__, __LINE__);
774 err = -EIO;
775 goto probe_err_2;
776 }
777 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
778
779 /* get reset pin */
780 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
781 MG_RST_PIN);
782 if (!rsc) {
783 printk(KERN_ERR "%s:%d get reset pin fail\n",
784 __func__, __LINE__);
785 err = -EIO;
786 goto probe_err_3;
787 }
788 host->rst = rsc->start;
789
790 /* init rst pin */
791 err = gpio_request(host->rst, MG_RST_PIN);
792 if (err)
793 goto probe_err_3;
794 gpio_direction_output(host->rst, 1);
795
796 /* reset out pin */
797 if (!(prv_data->dev_attr & MG_DEV_MASK))
798 goto probe_err_3a;
799
800 if (prv_data->dev_attr != MG_BOOT_DEV) {
801 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
802 MG_RSTOUT_PIN);
803 if (!rsc) {
804 printk(KERN_ERR "%s:%d get reset-out pin fail\n",
805 __func__, __LINE__);
806 err = -EIO;
807 goto probe_err_3a;
808 }
809 host->rstout = rsc->start;
810 err = gpio_request(host->rstout, MG_RSTOUT_PIN);
811 if (err)
812 goto probe_err_3a;
813 gpio_direction_input(host->rstout);
814 }
815
816 /* disk reset */
817 if (prv_data->dev_attr == MG_STORAGE_DEV) {
818 /* If POR seq. not yet finised, wait */
819 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
820 if (err)
821 goto probe_err_3b;
822 err = mg_disk_init(host);
823 if (err) {
824 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
825 __func__, __LINE__, err);
826 err = -EIO;
827 goto probe_err_3b;
828 }
829 }
830
831 /* get irq resource */
832 if (!prv_data->use_polling) {
833 host->irq = platform_get_irq(plat_dev, 0);
834 if (host->irq == -ENXIO) {
835 err = host->irq;
836 goto probe_err_3b;
837 }
838 err = request_irq(host->irq, mg_irq,
839 IRQF_DISABLED | IRQF_TRIGGER_RISING,
840 MG_DEV_NAME, host);
841 if (err) {
842 printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
843 __func__, __LINE__, err);
844 goto probe_err_3b;
845 }
846
847 }
848
849 /* get disk id */
850 err = mg_get_disk_id(host);
851 if (err) {
852 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
853 __func__, __LINE__, err);
854 err = -EIO;
855 goto probe_err_4;
856 }
857
858 err = register_blkdev(host->major, MG_DISK_NAME);
859 if (err < 0) {
860 printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
861 __func__, __LINE__, err);
862 goto probe_err_4;
863 }
864 if (!host->major)
865 host->major = err;
866
867 spin_lock_init(&host->lock);
868
869 if (prv_data->use_polling)
870 host->breq = blk_init_queue(mg_request_poll, &host->lock);
871 else
872 host->breq = blk_init_queue(mg_request, &host->lock);
873
874 if (!host->breq) {
875 err = -ENOMEM;
876 printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
877 __func__, __LINE__);
878 goto probe_err_5;
879 }
880
881 /* mflash is random device, thanx for the noop */
882 elevator_exit(host->breq->elevator);
883 err = elevator_init(host->breq, "noop");
884 if (err) {
885 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
886 __func__, __LINE__);
887 goto probe_err_6;
888 }
889 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
890 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
891
892 init_timer(&host->timer);
893 host->timer.function = mg_times_out;
894 host->timer.data = (unsigned long)host;
895
896 host->gd = alloc_disk(MG_DISK_MAX_PART);
897 if (!host->gd) {
898 printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
899 __func__, __LINE__);
900 err = -ENOMEM;
901 goto probe_err_7;
902 }
903 host->gd->major = host->major;
904 host->gd->first_minor = 0;
905 host->gd->fops = &mg_disk_ops;
906 host->gd->queue = host->breq;
907 host->gd->private_data = host;
908 sprintf(host->gd->disk_name, MG_DISK_NAME"a");
909
910 set_capacity(host->gd, host->n_sectors);
911
912 add_disk(host->gd);
913
914 return err;
915
916probe_err_7:
917 del_timer_sync(&host->timer);
918probe_err_6:
919 blk_cleanup_queue(host->breq);
920probe_err_5:
921 unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
922probe_err_4:
923 if (!prv_data->use_polling)
924 free_irq(host->irq, host);
925probe_err_3b:
926 gpio_free(host->rstout);
927probe_err_3a:
928 gpio_free(host->rst);
929probe_err_3:
930 iounmap(host->dev_base);
931probe_err_2:
932 kfree(host);
933probe_err:
934 return err;
935}
936
937static int mg_remove(struct platform_device *plat_dev)
938{
939 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
940 struct mg_host *host = prv_data->host;
941 int err = 0;
942
943 /* delete timer */
944 del_timer_sync(&host->timer);
945
946 /* remove disk */
947 if (host->gd) {
948 del_gendisk(host->gd);
949 put_disk(host->gd);
950 }
951 /* remove queue */
952 if (host->breq)
953 blk_cleanup_queue(host->breq);
954
955 /* unregister blk device */
956 unregister_blkdev(host->major, MG_DISK_NAME);
957
958 /* free irq */
959 if (!prv_data->use_polling)
960 free_irq(host->irq, host);
961
962 /* free reset-out pin */
963 if (prv_data->dev_attr != MG_BOOT_DEV)
964 gpio_free(host->rstout);
965
966 /* free rst pin */
967 if (host->rst)
968 gpio_free(host->rst);
969
970 /* unmap io */
971 if (host->dev_base)
972 iounmap(host->dev_base);
973
974 /* free mg_host */
975 kfree(host);
976
977 return err;
978}
979
980static struct platform_driver mg_disk_driver = {
981 .probe = mg_probe,
982 .remove = mg_remove,
983 .suspend = mg_suspend,
984 .resume = mg_resume,
985 .driver = {
986 .name = MG_DEV_NAME,
987 .owner = THIS_MODULE,
988 }
989};
990
991/****************************************************************************
992 *
993 * Module stuff
994 *
995 ****************************************************************************/
996
997static int __init mg_init(void)
998{
999 printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
1000 return platform_driver_register(&mg_disk_driver);
1001}
1002
1003static void __exit mg_exit(void)
1004{
1005 printk(KERN_INFO "mflash driver : bye bye\n");
1006 platform_driver_unregister(&mg_disk_driver);
1007}
1008
1009module_init(mg_init);
1010module_exit(mg_exit);
1011
1012MODULE_LICENSE("GPL");
1013MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1014MODULE_DESCRIPTION("mGine m[g]flash device driver");