blob: 5b5528d8de60c6e44231ed0f4b991694f7268b85 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020033#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040034#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
John Calixtocb87ea22011-04-26 18:56:29 -040038#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/mmc/card.h>
Pierre Ossman385e3222006-06-18 14:34:37 +020040#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010041#include <linux/mmc/mmc.h>
42#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <asm/system.h>
45#include <asm/uaccess.h>
46
Pierre Ossman98ac2162006-12-23 20:03:02 +010047#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000049MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040050#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010054
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050055#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020062static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040063
64/*
65 * The defaults come from config options but can be overriden by module
66 * or bootarg options.
67 */
68static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
69
70/*
71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
73 */
74static int max_devices;
75
76/* 256 minors, so at most 256 separate devices */
77static DECLARE_BITMAP(dev_use, 256);
Andrei Warkentinf06c9152011-04-21 22:46:13 -050078static DECLARE_BITMAP(name_use, 256);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Linus Torvalds1da177e2005-04-16 15:20:36 -070080/*
81 * There is one mmc_blk_data per slot.
82 */
83struct mmc_blk_data {
84 spinlock_t lock;
85 struct gendisk *disk;
86 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -050087 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Andrei Warkentind0c97cf2011-05-23 15:06:36 -050089 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +000094 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -050095 unsigned int part_type;
Andrei Warkentinf06c9152011-04-21 22:46:13 -050096 unsigned int name_idx;
Andrei Warkentin371a6892011-04-11 18:10:25 -050097
98 /*
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
102 */
103 unsigned int part_curr;
104 struct device_attribute force_ro;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105};
106
Arjan van de Vena621aae2006-01-12 18:43:35 +0000107static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Per Forlind737c892011-07-01 18:55:30 +0200109enum mmc_blk_status {
110 MMC_BLK_SUCCESS = 0,
111 MMC_BLK_PARTIAL,
112 MMC_BLK_RETRY,
113 MMC_BLK_RETRY_SINGLE,
114 MMC_BLK_DATA_ERR,
115 MMC_BLK_CMD_ERR,
116 MMC_BLK_ABORT,
117};
118
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400119module_param(perdev_minors, int, 0444);
120MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
123{
124 struct mmc_blk_data *md;
125
Arjan van de Vena621aae2006-01-12 18:43:35 +0000126 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 md = disk->private_data;
128 if (md && md->usage == 0)
129 md = NULL;
130 if (md)
131 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000132 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 return md;
135}
136
Andrei Warkentin371a6892011-04-11 18:10:25 -0500137static inline int mmc_get_devidx(struct gendisk *disk)
138{
Colin Crossfa746fa2010-09-03 12:41:21 -0700139 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500140 return devidx;
141}
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static void mmc_blk_put(struct mmc_blk_data *md)
144{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000145 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 md->usage--;
147 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500148 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800149 blk_cleanup_queue(md->queue.queue);
150
David Woodhouse1dff3142007-11-21 18:45:12 +0100151 __clear_bit(devidx, dev_use);
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 kfree(md);
155 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000156 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Andrei Warkentin371a6892011-04-11 18:10:25 -0500159static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
160 char *buf)
161{
162 int ret;
163 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
164
165 ret = snprintf(buf, PAGE_SIZE, "%d",
166 get_disk_ro(dev_to_disk(dev)) ^
167 md->read_only);
168 mmc_blk_put(md);
169 return ret;
170}
171
172static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 int ret;
176 char *end;
177 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
178 unsigned long set = simple_strtoul(buf, &end, 0);
179 if (end == buf) {
180 ret = -EINVAL;
181 goto out;
182 }
183
184 set_disk_ro(dev_to_disk(dev), set || md->read_only);
185 ret = count;
186out:
187 mmc_blk_put(md);
188 return ret;
189}
190
Al Viroa5a15612008-03-02 10:33:30 -0500191static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Al Viroa5a15612008-03-02 10:33:30 -0500193 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 int ret = -ENXIO;
195
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200196 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if (md) {
198 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500199 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700201
Al Viroa5a15612008-03-02 10:33:30 -0500202 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700203 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700204 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200207 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 return ret;
210}
211
Al Viroa5a15612008-03-02 10:33:30 -0500212static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
Al Viroa5a15612008-03-02 10:33:30 -0500214 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200216 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200218 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 return 0;
220}
221
222static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800223mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800225 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
226 geo->heads = 4;
227 geo->sectors = 16;
228 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229}
230
John Calixtocb87ea22011-04-26 18:56:29 -0400231struct mmc_blk_ioc_data {
232 struct mmc_ioc_cmd ic;
233 unsigned char *buf;
234 u64 buf_bytes;
235};
236
237static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
238 struct mmc_ioc_cmd __user *user)
239{
240 struct mmc_blk_ioc_data *idata;
241 int err;
242
243 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
244 if (!idata) {
245 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400246 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400247 }
248
249 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
250 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400251 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400252 }
253
254 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
255 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
256 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400257 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400258 }
259
260 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
261 if (!idata->buf) {
262 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400263 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400264 }
265
266 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
267 idata->ic.data_ptr, idata->buf_bytes)) {
268 err = -EFAULT;
269 goto copy_err;
270 }
271
272 return idata;
273
274copy_err:
275 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400276idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400277 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400278out:
John Calixtocb87ea22011-04-26 18:56:29 -0400279 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400280}
281
282static int mmc_blk_ioctl_cmd(struct block_device *bdev,
283 struct mmc_ioc_cmd __user *ic_ptr)
284{
285 struct mmc_blk_ioc_data *idata;
286 struct mmc_blk_data *md;
287 struct mmc_card *card;
288 struct mmc_command cmd = {0};
289 struct mmc_data data = {0};
290 struct mmc_request mrq = {0};
291 struct scatterlist sg;
292 int err;
293
294 /*
295 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
296 * whole block device, not on a partition. This prevents overspray
297 * between sibling partitions.
298 */
299 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
300 return -EPERM;
301
302 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
303 if (IS_ERR(idata))
304 return PTR_ERR(idata);
305
306 cmd.opcode = idata->ic.opcode;
307 cmd.arg = idata->ic.arg;
308 cmd.flags = idata->ic.flags;
309
310 data.sg = &sg;
311 data.sg_len = 1;
312 data.blksz = idata->ic.blksz;
313 data.blocks = idata->ic.blocks;
314
315 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
316
317 if (idata->ic.write_flag)
318 data.flags = MMC_DATA_WRITE;
319 else
320 data.flags = MMC_DATA_READ;
321
322 mrq.cmd = &cmd;
323 mrq.data = &data;
324
325 md = mmc_blk_get(bdev->bd_disk);
326 if (!md) {
327 err = -EINVAL;
328 goto cmd_done;
329 }
330
331 card = md->queue.card;
332 if (IS_ERR(card)) {
333 err = PTR_ERR(card);
334 goto cmd_done;
335 }
336
337 mmc_claim_host(card->host);
338
339 if (idata->ic.is_acmd) {
340 err = mmc_app_cmd(card->host, card);
341 if (err)
342 goto cmd_rel_host;
343 }
344
345 /* data.flags must already be set before doing this. */
346 mmc_set_data_timeout(&data, card);
347 /* Allow overriding the timeout_ns for empirical tuning. */
348 if (idata->ic.data_timeout_ns)
349 data.timeout_ns = idata->ic.data_timeout_ns;
350
351 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
352 /*
353 * Pretend this is a data transfer and rely on the host driver
354 * to compute timeout. When all host drivers support
355 * cmd.cmd_timeout for R1B, this can be changed to:
356 *
357 * mrq.data = NULL;
358 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
359 */
360 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
361 }
362
363 mmc_wait_for_req(card->host, &mrq);
364
365 if (cmd.error) {
366 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
367 __func__, cmd.error);
368 err = cmd.error;
369 goto cmd_rel_host;
370 }
371 if (data.error) {
372 dev_err(mmc_dev(card->host), "%s: data error %d\n",
373 __func__, data.error);
374 err = data.error;
375 goto cmd_rel_host;
376 }
377
378 /*
379 * According to the SD specs, some commands require a delay after
380 * issuing the command.
381 */
382 if (idata->ic.postsleep_min_us)
383 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
384
385 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
386 err = -EFAULT;
387 goto cmd_rel_host;
388 }
389
390 if (!idata->ic.write_flag) {
391 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
392 idata->buf, idata->buf_bytes)) {
393 err = -EFAULT;
394 goto cmd_rel_host;
395 }
396 }
397
398cmd_rel_host:
399 mmc_release_host(card->host);
400
401cmd_done:
402 mmc_blk_put(md);
403 kfree(idata->buf);
404 kfree(idata);
405 return err;
406}
407
408static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
409 unsigned int cmd, unsigned long arg)
410{
411 int ret = -EINVAL;
412 if (cmd == MMC_IOC_CMD)
413 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
414 return ret;
415}
416
417#ifdef CONFIG_COMPAT
418static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
419 unsigned int cmd, unsigned long arg)
420{
421 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
422}
423#endif
424
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700425static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -0500426 .open = mmc_blk_open,
427 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800428 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -0400430 .ioctl = mmc_blk_ioctl,
431#ifdef CONFIG_COMPAT
432 .compat_ioctl = mmc_blk_compat_ioctl,
433#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434};
435
Andrei Warkentin371a6892011-04-11 18:10:25 -0500436static inline int mmc_blk_part_switch(struct mmc_card *card,
437 struct mmc_blk_data *md)
438{
439 int ret;
440 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
441 if (main_md->part_curr == md->part_type)
442 return 0;
443
444 if (mmc_card_mmc(card)) {
445 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
446 card->ext_csd.part_config |= md->part_type;
447
448 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
449 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
450 card->ext_csd.part_time);
451 if (ret)
452 return ret;
453}
454
455 main_md->part_curr = md->part_type;
456 return 0;
457}
458
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700459static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
460{
461 int err;
Ben Dooks051913d2009-06-08 23:33:57 +0100462 u32 result;
463 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700464
Chris Ball24f5b532011-04-13 23:49:45 -0400465 struct mmc_request mrq = {0};
Chris Ball1278dba2011-04-13 23:40:30 -0400466 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400467 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700468 unsigned int timeout_us;
469
470 struct scatterlist sg;
471
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700472 cmd.opcode = MMC_APP_CMD;
473 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -0700474 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700475
476 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -0700477 if (err)
478 return (u32)-1;
479 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700480 return (u32)-1;
481
482 memset(&cmd, 0, sizeof(struct mmc_command));
483
484 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
485 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700486 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700487
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700488 data.timeout_ns = card->csd.tacc_ns * 100;
489 data.timeout_clks = card->csd.tacc_clks * 100;
490
491 timeout_us = data.timeout_ns / 1000;
492 timeout_us += data.timeout_clks * 1000 /
493 (card->host->ios.clock / 1000);
494
495 if (timeout_us > 100000) {
496 data.timeout_ns = 100000000;
497 data.timeout_clks = 0;
498 }
499
500 data.blksz = 4;
501 data.blocks = 1;
502 data.flags = MMC_DATA_READ;
503 data.sg = &sg;
504 data.sg_len = 1;
505
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700506 mrq.cmd = &cmd;
507 mrq.data = &data;
508
Ben Dooks051913d2009-06-08 23:33:57 +0100509 blocks = kmalloc(4, GFP_KERNEL);
510 if (!blocks)
511 return (u32)-1;
512
513 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700514
515 mmc_wait_for_req(card->host, &mrq);
516
Ben Dooks051913d2009-06-08 23:33:57 +0100517 result = ntohl(*blocks);
518 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700519
Ben Dooks051913d2009-06-08 23:33:57 +0100520 if (cmd.error || data.error)
521 result = (u32)-1;
522
523 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700524}
525
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100526static int send_stop(struct mmc_card *card, u32 *status)
527{
528 struct mmc_command cmd = {0};
529 int err;
530
531 cmd.opcode = MMC_STOP_TRANSMISSION;
532 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
533 err = mmc_wait_for_cmd(card->host, &cmd, 5);
534 if (err == 0)
535 *status = cmd.resp[0];
536 return err;
537}
538
Russell King - ARM Linux6be918e2011-06-20 20:10:08 +0100539static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +0300540{
Chris Ball1278dba2011-04-13 23:40:30 -0400541 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +0300542 int err;
543
Adrian Hunter504f1912008-10-16 12:55:25 +0300544 cmd.opcode = MMC_SEND_STATUS;
545 if (!mmc_host_is_spi(card->host))
546 cmd.arg = card->rca << 16;
547 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux6be918e2011-06-20 20:10:08 +0100548 err = mmc_wait_for_cmd(card->host, &cmd, retries);
549 if (err == 0)
550 *status = cmd.resp[0];
551 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +0300552}
553
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530554#define ERR_NOMEDIUM 3
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100555#define ERR_RETRY 2
556#define ERR_ABORT 1
557#define ERR_CONTINUE 0
558
559static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
560 bool status_valid, u32 status)
561{
562 switch (error) {
563 case -EILSEQ:
564 /* response crc error, retry the r/w cmd */
565 pr_err("%s: %s sending %s command, card status %#x\n",
566 req->rq_disk->disk_name, "response CRC error",
567 name, status);
568 return ERR_RETRY;
569
570 case -ETIMEDOUT:
571 pr_err("%s: %s sending %s command, card status %#x\n",
572 req->rq_disk->disk_name, "timed out", name, status);
573
574 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallf4104732011-10-25 18:16:58 -0700575 if (!status_valid) {
576 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100577 return ERR_RETRY;
Ken Sumrallf4104732011-10-25 18:16:58 -0700578 }
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100579 /*
580 * If it was a r/w cmd crc error, or illegal command
581 * (eg, issued in wrong state) then retry - we should
582 * have corrected the state problem above.
583 */
Ken Sumrallf4104732011-10-25 18:16:58 -0700584 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
585 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100586 return ERR_RETRY;
Ken Sumrallf4104732011-10-25 18:16:58 -0700587 }
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100588
589 /* Otherwise abort the command */
Ken Sumrallf4104732011-10-25 18:16:58 -0700590 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100591 return ERR_ABORT;
592
593 default:
594 /* We don't understand the error code the driver gave us */
595 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
596 req->rq_disk->disk_name, error, status);
597 return ERR_ABORT;
598 }
599}
600
601/*
602 * Initial r/w and stop cmd error recovery.
603 * We don't know whether the card received the r/w cmd or not, so try to
604 * restore things back to a sane state. Essentially, we do this as follows:
605 * - Obtain card status. If the first attempt to obtain card status fails,
606 * the status word will reflect the failed status cmd, not the failed
607 * r/w cmd. If we fail to obtain card status, it suggests we can no
608 * longer communicate with the card.
609 * - Check the card state. If the card received the cmd but there was a
610 * transient problem with the response, it might still be in a data transfer
611 * mode. Try to send it a stop command. If this fails, we can't recover.
612 * - If the r/w cmd failed due to a response CRC error, it was probably
613 * transient, so retry the cmd.
614 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
615 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
616 * illegal cmd, retry.
617 * Otherwise we don't understand what happened, so abort.
618 */
619static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
620 struct mmc_blk_request *brq)
621{
622 bool prev_cmd_status_valid = true;
623 u32 status, stop_status = 0;
624 int err, retry;
625
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530626 if (mmc_card_removed(card))
627 return ERR_NOMEDIUM;
628
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100629 /*
630 * Try to get card status which indicates both the card state
631 * and why there was no response. If the first attempt fails,
632 * we can't be sure the returned status is for the r/w command.
633 */
634 for (retry = 2; retry >= 0; retry--) {
635 err = get_card_status(card, &status, 0);
636 if (!err)
637 break;
638
639 prev_cmd_status_valid = false;
640 pr_err("%s: error %d sending status command, %sing\n",
641 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
642 }
643
644 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530645 if (err) {
646 /* Check if the card is removed */
647 if (mmc_detect_card_removed(card->host))
648 return ERR_NOMEDIUM;
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100649 return ERR_ABORT;
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530650 }
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100651
652 /*
653 * Check the current card state. If it is in some data transfer
654 * mode, tell it to stop (and hopefully transition back to TRAN.)
655 */
656 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
657 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
658 err = send_stop(card, &stop_status);
659 if (err)
660 pr_err("%s: error %d sending stop command\n",
661 req->rq_disk->disk_name, err);
662
663 /*
664 * If the stop cmd also timed out, the card is probably
665 * not present, so abort. Other errors are bad news too.
666 */
667 if (err)
668 return ERR_ABORT;
669 }
670
671 /* Check for set block count errors */
672 if (brq->sbc.error)
673 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
674 prev_cmd_status_valid, status);
675
676 /* Check for r/w command errors */
677 if (brq->cmd.error)
678 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
679 prev_cmd_status_valid, status);
680
681 /* Now for stop errors. These aren't fatal to the transfer. */
682 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
683 req->rq_disk->disk_name, brq->stop.error,
684 brq->cmd.resp[0], status);
685
686 /*
687 * Subsitute in our own stop status as this will give the error
688 * state which happened during the execution of the r/w command.
689 */
690 if (stop_status) {
691 brq->stop.resp[0] = stop_status;
692 brq->stop.error = 0;
693 }
694 return ERR_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
696
Adrian Hunterbd788c92010-08-11 14:17:47 -0700697static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
698{
699 struct mmc_blk_data *md = mq->data;
700 struct mmc_card *card = md->queue.card;
701 unsigned int from, nr, arg;
702 int err = 0;
703
Adrian Hunterbd788c92010-08-11 14:17:47 -0700704 if (!mmc_can_erase(card)) {
705 err = -EOPNOTSUPP;
706 goto out;
707 }
708
709 from = blk_rq_pos(req);
710 nr = blk_rq_sectors(req);
711
712 if (mmc_can_trim(card))
713 arg = MMC_TRIM_ARG;
714 else
715 arg = MMC_ERASE_ARG;
716
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500717 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
718 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
719 INAND_CMD38_ARG_EXT_CSD,
720 arg == MMC_TRIM_ARG ?
721 INAND_CMD38_ARG_TRIM :
722 INAND_CMD38_ARG_ERASE,
723 0);
724 if (err)
725 goto out;
726 }
Adrian Hunterbd788c92010-08-11 14:17:47 -0700727 err = mmc_erase(card, from, nr, arg);
728out:
729 spin_lock_irq(&md->lock);
730 __blk_end_request(req, err, blk_rq_bytes(req));
731 spin_unlock_irq(&md->lock);
732
Adrian Hunterbd788c92010-08-11 14:17:47 -0700733 return err ? 0 : 1;
734}
735
Adrian Hunter49804542010-08-11 14:17:50 -0700736static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
737 struct request *req)
738{
739 struct mmc_blk_data *md = mq->data;
740 struct mmc_card *card = md->queue.card;
741 unsigned int from, nr, arg;
742 int err = 0;
743
Adrian Hunter49804542010-08-11 14:17:50 -0700744 if (!mmc_can_secure_erase_trim(card)) {
745 err = -EOPNOTSUPP;
746 goto out;
747 }
748
749 from = blk_rq_pos(req);
750 nr = blk_rq_sectors(req);
751
752 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
753 arg = MMC_SECURE_TRIM1_ARG;
754 else
755 arg = MMC_SECURE_ERASE_ARG;
756
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500757 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
758 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
759 INAND_CMD38_ARG_EXT_CSD,
760 arg == MMC_SECURE_TRIM1_ARG ?
761 INAND_CMD38_ARG_SECTRIM1 :
762 INAND_CMD38_ARG_SECERASE,
763 0);
764 if (err)
765 goto out;
766 }
Adrian Hunter49804542010-08-11 14:17:50 -0700767 err = mmc_erase(card, from, nr, arg);
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500768 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
769 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
770 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
771 INAND_CMD38_ARG_EXT_CSD,
772 INAND_CMD38_ARG_SECTRIM2,
773 0);
774 if (err)
775 goto out;
776 }
Adrian Hunter49804542010-08-11 14:17:50 -0700777 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500778 }
Adrian Hunter49804542010-08-11 14:17:50 -0700779out:
780 spin_lock_irq(&md->lock);
781 __blk_end_request(req, err, blk_rq_bytes(req));
782 spin_unlock_irq(&md->lock);
783
Adrian Hunter49804542010-08-11 14:17:50 -0700784 return err ? 0 : 1;
785}
786
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500787static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
788{
789 struct mmc_blk_data *md = mq->data;
790
791 /*
792 * No-op, only service this because we need REQ_FUA for reliable
793 * writes.
794 */
795 spin_lock_irq(&md->lock);
796 __blk_end_request_all(req, 0);
797 spin_unlock_irq(&md->lock);
798
799 return 1;
800}
801
802/*
803 * Reformat current write as a reliable write, supporting
804 * both legacy and the enhanced reliable write MMC cards.
805 * In each transfer we'll handle only as much as a single
806 * reliable write can handle, thus finish the request in
807 * partial completions.
808 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500809static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
810 struct mmc_card *card,
811 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500812{
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500813 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
814 /* Legacy mode imposes restrictions on transfers. */
815 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
816 brq->data.blocks = 1;
817
818 if (brq->data.blocks > card->ext_csd.rel_sectors)
819 brq->data.blocks = card->ext_csd.rel_sectors;
820 else if (brq->data.blocks < card->ext_csd.rel_sectors)
821 brq->data.blocks = 1;
822 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500823}
824
Russell King - ARM Linux20803902011-06-20 20:10:49 +0100825#define CMD_ERRORS \
826 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
827 R1_ADDRESS_ERROR | /* Misaligned address */ \
828 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
829 R1_WP_VIOLATION | /* Tried to write to protected block */ \
830 R1_CC_ERROR | /* Card controller error */ \
831 R1_ERROR) /* General/unknown error */
832
Per Forlind737c892011-07-01 18:55:30 +0200833int mmc_blk_err_check(struct mmc_blk_request *brq,
834 struct request *req,
835 struct mmc_card *card,
836 struct mmc_blk_data *md)
837{
838 int ret = MMC_BLK_SUCCESS;
839
840 /*
841 * sbc.error indicates a problem with the set block count
842 * command. No data will have been transferred.
843 *
844 * cmd.error indicates a problem with the r/w command. No
845 * data will have been transferred.
846 *
847 * stop.error indicates a problem with the stop command. Data
848 * may have been transferred, or may still be transferring.
849 */
850 if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
851 switch (mmc_blk_cmd_recovery(card, req, brq)) {
852 case ERR_RETRY:
853 return MMC_BLK_RETRY;
854 case ERR_ABORT:
855 case ERR_NOMEDIUM:
856 return MMC_BLK_ABORT;
857 case ERR_CONTINUE:
858 break;
859 }
860 }
861
862 /*
863 * Check for errors relating to the execution of the
864 * initial command - such as address errors. No data
865 * has been transferred.
866 */
867 if (brq->cmd.resp[0] & CMD_ERRORS) {
868 pr_err("%s: r/w command failed, status = %#x\n",
869 req->rq_disk->disk_name, brq->cmd.resp[0]);
870 return MMC_BLK_ABORT;
871 }
872
873 /*
874 * Everything else is either success, or a data error of some
875 * kind. If it was a write, we may have transitioned to
876 * program mode, which we have to wait for it to complete.
877 */
878 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
879 u32 status;
880 do {
881 int err = get_card_status(card, &status, 5);
882 if (err) {
883 printk(KERN_ERR "%s: error %d requesting status\n",
884 req->rq_disk->disk_name, err);
885 return MMC_BLK_CMD_ERR;
886 }
887 /*
888 * Some cards mishandle the status bits,
889 * so make sure to check both the busy
890 * indication and the card state.
891 */
892 } while (!(status & R1_READY_FOR_DATA) ||
893 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
894 }
895
896 if (brq->data.error) {
897 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
898 req->rq_disk->disk_name, brq->data.error,
899 (unsigned)blk_rq_pos(req),
900 (unsigned)blk_rq_sectors(req),
901 brq->cmd.resp[0], brq->stop.resp[0]);
902
903 if (rq_data_dir(req) == READ) {
904 if (brq->data.blocks > 1) {
905 /* Redo read one sector at a time */
906 pr_warning("%s: retrying using single block read\n",
907 req->rq_disk->disk_name);
908 return MMC_BLK_RETRY_SINGLE;
909 }
910 return MMC_BLK_DATA_ERR;
911 } else {
912 return MMC_BLK_CMD_ERR;
913 }
914 }
915
916 if (ret == MMC_BLK_SUCCESS &&
917 blk_rq_bytes(req) != brq->data.bytes_xfered)
918 ret = MMC_BLK_PARTIAL;
919
920 return ret;
921}
922
Per Forlina69554e42011-07-01 18:55:29 +0200923static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
924 struct mmc_card *card,
925 int disable_multi,
926 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Per Forlina69554e42011-07-01 18:55:29 +0200928 u32 readcmd, writecmd;
929 struct mmc_blk_request *brq = &mqrq->brq;
930 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 struct mmc_blk_data *md = mq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500933 /*
934 * Reliable writes are used to implement Forced Unit Access and
935 * REQ_META accesses, and are supported only on MMCs.
936 */
937 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
938 (req->cmd_flags & REQ_META)) &&
939 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500940 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500941
Per Forlina69554e42011-07-01 18:55:29 +0200942 memset(brq, 0, sizeof(struct mmc_blk_request));
943 brq->mrq.cmd = &brq->cmd;
944 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Per Forlina69554e42011-07-01 18:55:29 +0200946 brq->cmd.arg = blk_rq_pos(req);
947 if (!mmc_card_blockaddr(card))
948 brq->cmd.arg <<= 9;
949 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
950 brq->data.blksz = 512;
951 brq->stop.opcode = MMC_STOP_TRANSMISSION;
952 brq->stop.arg = 0;
953 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
954 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Per Forlina69554e42011-07-01 18:55:29 +0200956 /*
957 * The block layer doesn't support all sector count
958 * restrictions, so we need to be prepared for too big
959 * requests.
960 */
961 if (brq->data.blocks > card->host->max_blk_count)
962 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
Per Forlina69554e42011-07-01 18:55:29 +0200964 /*
965 * After a read error, we redo the request one sector at a time
966 * in order to accurately determine which sectors can be read
967 * successfully.
968 */
969 if (disable_multi && brq->data.blocks > 1)
970 brq->data.blocks = 1;
971
972 if (brq->data.blocks > 1 || do_rel_wr) {
973 /* SPI multiblock writes terminate using a special
974 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +0200975 */
Per Forlina69554e42011-07-01 18:55:29 +0200976 if (!mmc_host_is_spi(card->host) ||
977 rq_data_dir(req) == READ)
978 brq->mrq.stop = &brq->stop;
979 readcmd = MMC_READ_MULTIPLE_BLOCK;
980 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
981 } else {
982 brq->mrq.stop = NULL;
983 readcmd = MMC_READ_SINGLE_BLOCK;
984 writecmd = MMC_WRITE_BLOCK;
985 }
986 if (rq_data_dir(req) == READ) {
987 brq->cmd.opcode = readcmd;
988 brq->data.flags |= MMC_DATA_READ;
989 } else {
990 brq->cmd.opcode = writecmd;
991 brq->data.flags |= MMC_DATA_WRITE;
992 }
Pierre Ossman548d2de2009-04-10 17:52:57 +0200993
Per Forlina69554e42011-07-01 18:55:29 +0200994 if (do_rel_wr)
995 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +0100996
Per Forlina69554e42011-07-01 18:55:29 +0200997 /*
998 * Pre-defined multi-block transfers are preferable to
999 * open ended-ones (and necessary for reliable writes).
1000 * However, it is not sufficient to just send CMD23,
1001 * and avoid the final CMD12, as on an error condition
1002 * CMD12 (stop) needs to be sent anyway. This, coupled
1003 * with Auto-CMD23 enhancements provided by some
1004 * hosts, means that the complexity of dealing
1005 * with this is best left to the host. If CMD23 is
1006 * supported by card and host, we'll fill sbc in and let
1007 * the host deal with handling it correctly. This means
1008 * that for hosts that don't expose MMC_CAP_CMD23, no
1009 * change of behavior will be observed.
1010 *
1011 * N.B: Some MMC cards experience perf degradation.
1012 * We'll avoid using CMD23-bounded multiblock writes for
1013 * these, while retaining features like reliable writes.
1014 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Per Forlina69554e42011-07-01 18:55:29 +02001016 if ((md->flags & MMC_BLK_CMD23) &&
1017 mmc_op_multi(brq->cmd.opcode) &&
1018 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1019 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1020 brq->sbc.arg = brq->data.blocks |
1021 (do_rel_wr ? (1 << 31) : 0);
1022 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1023 brq->mrq.sbc = &brq->sbc;
1024 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001025
Per Forlina69554e42011-07-01 18:55:29 +02001026 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001027
Per Forlina69554e42011-07-01 18:55:29 +02001028 brq->data.sg = mqrq->sg;
1029 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001030
Per Forlina69554e42011-07-01 18:55:29 +02001031 /*
1032 * Adjust the sg list so it is the same size as the
1033 * request.
1034 */
1035 if (brq->data.blocks != blk_rq_sectors(req)) {
1036 int i, data_size = brq->data.blocks << 9;
1037 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02001038
Per Forlina69554e42011-07-01 18:55:29 +02001039 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1040 data_size -= sg->length;
1041 if (data_size <= 0) {
1042 sg->length += data_size;
1043 i++;
1044 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01001045 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001046 }
Per Forlina69554e42011-07-01 18:55:29 +02001047 brq->data.sg_len = i;
1048 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001049
Per Forlina69554e42011-07-01 18:55:29 +02001050 mmc_queue_bounce_pre(mqrq);
1051}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Per Forlina69554e42011-07-01 18:55:29 +02001053static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1054{
1055 struct mmc_blk_data *md = mq->data;
1056 struct mmc_card *card = md->queue.card;
1057 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1058 int ret = 1, disable_multi = 0, retry = 0;
Per Forlind737c892011-07-01 18:55:30 +02001059 enum mmc_blk_status status;
Per Forlina69554e42011-07-01 18:55:29 +02001060
1061 do {
1062 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, disable_multi, mq);
Per Forlincb86e7b2011-07-09 17:12:36 -04001063 mmc_wait_for_req(card->host, &brq->mrq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02001064
Per Forlincb86e7b2011-07-09 17:12:36 -04001065 mmc_queue_bounce_post(mq->mqrq_cur);
Pierre Ossman98ccf142007-05-12 00:26:16 +02001066
Per Forlind737c892011-07-01 18:55:30 +02001067 status = mmc_blk_err_check(brq, req, card, md);
1068 switch (status) {
1069 case MMC_BLK_SUCCESS:
1070 case MMC_BLK_PARTIAL:
1071 /*
1072 * A block was successfully transferred.
1073 */
1074 spin_lock_irq(&md->lock);
1075 ret = __blk_end_request(req, 0,
1076 brq->data.bytes_xfered);
1077 spin_unlock_irq(&md->lock);
1078 break;
1079 case MMC_BLK_CMD_ERR:
1080 goto cmd_err;
1081 case MMC_BLK_RETRY_SINGLE:
1082 disable_multi = 1;
1083 break;
1084 case MMC_BLK_RETRY:
1085 if (retry++ < 5)
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +01001086 break;
Per Forlind737c892011-07-01 18:55:30 +02001087 case MMC_BLK_ABORT:
Russell King - ARM Linux20803902011-06-20 20:10:49 +01001088 goto cmd_abort;
Per Forlind737c892011-07-01 18:55:30 +02001089 case MMC_BLK_DATA_ERR:
1090 /*
1091 * After an error, we redo I/O one sector at a
1092 * time, so we only reach here after trying to
1093 * read a single sector.
1094 */
1095 spin_lock_irq(&md->lock);
1096 ret = __blk_end_request(req, -EIO,
1097 brq->data.blksz);
1098 spin_unlock_irq(&md->lock);
1099 break;
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001100 }
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 } while (ret);
1103
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 return 1;
1105
1106 cmd_err:
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001107 /*
1108 * If this is an SD card and we're writing, we can first
1109 * mark the known good sectors as ok.
1110 *
1111 * If the card is not SD, we can still ok written sectors
Pierre Ossman23af6032008-07-06 01:10:27 +02001112 * as reported by the controller (which might be less than
1113 * the real number of written sectors, but never more).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 */
Adrian Hunter6a79e392008-12-31 18:21:17 +01001115 if (mmc_card_sd(card)) {
1116 u32 blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001117
Adrian Hunter6a79e392008-12-31 18:21:17 +01001118 blocks = mmc_sd_num_wr_blocks(card);
1119 if (blocks != (u32)-1) {
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001120 spin_lock_irq(&md->lock);
Adrian Hunter6a79e392008-12-31 18:21:17 +01001121 ret = __blk_end_request(req, 0, blocks << 9);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001122 spin_unlock_irq(&md->lock);
1123 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001124 } else {
1125 spin_lock_irq(&md->lock);
Per Forlincb86e7b2011-07-09 17:12:36 -04001126 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter6a79e392008-12-31 18:21:17 +01001127 spin_unlock_irq(&md->lock);
Pierre Ossman176f00f2006-10-04 02:15:41 -07001128 }
1129
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +01001130 cmd_abort:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 spin_lock_irq(&md->lock);
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +05301132 if (mmc_card_removed(card))
1133 req->cmd_flags |= REQ_QUIET;
Kiyoshi Uedafd539832007-12-11 17:48:29 -05001134 while (ret)
1135 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 spin_unlock_irq(&md->lock);
1137
1138 return 0;
1139}
1140
San Mehatc87f8d42009-07-30 08:21:19 -07001141static int
1142mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
1143
Adrian Hunterbd788c92010-08-11 14:17:47 -07001144static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1145{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001146 int ret;
1147 struct mmc_blk_data *md = mq->data;
1148 struct mmc_card *card = md->queue.card;
1149
San Mehatc87f8d42009-07-30 08:21:19 -07001150#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1151 if (mmc_bus_needs_resume(card->host)) {
1152 mmc_resume_bus(card->host);
1153 mmc_blk_set_blksize(md, card);
1154 }
1155#endif
1156
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001157 mmc_claim_host(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001158 ret = mmc_blk_part_switch(card, md);
1159 if (ret) {
1160 ret = 0;
1161 goto out;
1162 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001163
Adrian Hunter49804542010-08-11 14:17:50 -07001164 if (req->cmd_flags & REQ_DISCARD) {
1165 if (req->cmd_flags & REQ_SECURE)
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001166 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001167 else
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001168 ret = mmc_blk_issue_discard_rq(mq, req);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001169 } else if (req->cmd_flags & REQ_FLUSH) {
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001170 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001171 } else {
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001172 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001173 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001174
Andrei Warkentin371a6892011-04-11 18:10:25 -05001175out:
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001176 mmc_release_host(card->host);
1177 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001178}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Russell Kinga6f6c962006-01-03 22:38:44 +00001180static inline int mmc_blk_readonly(struct mmc_card *card)
1181{
1182 return mmc_card_readonly(card) ||
1183 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1184}
1185
Andrei Warkentin371a6892011-04-11 18:10:25 -05001186static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1187 struct device *parent,
1188 sector_t size,
1189 bool default_ro,
1190 const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191{
1192 struct mmc_blk_data *md;
1193 int devidx, ret;
1194
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001195 devidx = find_first_zero_bit(dev_use, max_devices);
1196 if (devidx >= max_devices)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 return ERR_PTR(-ENOSPC);
1198 __set_bit(devidx, dev_use);
1199
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07001200 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00001201 if (!md) {
1202 ret = -ENOMEM;
1203 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 }
Russell Kinga6f6c962006-01-03 22:38:44 +00001205
Russell Kinga6f6c962006-01-03 22:38:44 +00001206 /*
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001207 * !subname implies we are creating main mmc_blk_data that will be
1208 * associated with mmc_card with mmc_set_drvdata. Due to device
1209 * partitions, devidx will not coincide with a per-physical card
1210 * index anymore so we keep track of a name index.
1211 */
1212 if (!subname) {
1213 md->name_idx = find_first_zero_bit(name_use, max_devices);
1214 __set_bit(md->name_idx, name_use);
1215 }
1216 else
1217 md->name_idx = ((struct mmc_blk_data *)
1218 dev_to_disk(parent)->private_data)->name_idx;
1219
1220 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00001221 * Set the read-only status based on the supported commands
1222 * and the write protect switch.
1223 */
1224 md->read_only = mmc_blk_readonly(card);
1225
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001226 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00001227 if (md->disk == NULL) {
1228 ret = -ENOMEM;
1229 goto err_kfree;
1230 }
1231
1232 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001233 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00001234 md->usage = 1;
1235
Adrian Hunterd09408a2011-06-23 13:40:28 +03001236 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
Russell Kinga6f6c962006-01-03 22:38:44 +00001237 if (ret)
1238 goto err_putdisk;
1239
Russell Kinga6f6c962006-01-03 22:38:44 +00001240 md->queue.issue_fn = mmc_blk_issue_rq;
1241 md->queue.data = md;
1242
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02001243 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001244 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00001245 md->disk->fops = &mmc_bdops;
1246 md->disk->private_data = md;
1247 md->disk->queue = md->queue.queue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 md->disk->driverfs_dev = &card->dev;
Andrei Warkentin371a6892011-04-11 18:10:25 -05001249 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Crossfa746fa2010-09-03 12:41:21 -07001250 md->disk->flags = GENHD_FL_EXT_DEVT;
Russell Kinga6f6c962006-01-03 22:38:44 +00001251
1252 /*
1253 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1254 *
1255 * - be set for removable media with permanent block devices
1256 * - be unset for removable block devices with permanent media
1257 *
1258 * Since MMC block devices clearly fall under the second
1259 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1260 * should use the block device creation/destruction hotplug
1261 * messages to tell when the card is present.
1262 */
1263
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001264 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1265 "mmcblk%d%s", md->name_idx, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00001266
Martin K. Petersene1defc42009-05-22 17:17:49 -04001267 blk_queue_logical_block_size(md->queue.queue, 512);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001268 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001269
Andrei Warkentinf0d89972011-05-23 15:06:38 -05001270 if (mmc_host_cmd23(card->host)) {
1271 if (mmc_card_mmc(card) ||
1272 (mmc_card_sd(card) &&
1273 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1274 md->flags |= MMC_BLK_CMD23;
1275 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001276
1277 if (mmc_card_mmc(card) &&
1278 md->flags & MMC_BLK_CMD23 &&
1279 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1280 card->ext_csd.rel_sectors)) {
1281 md->flags |= MMC_BLK_REL_WR;
1282 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1283 }
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00001286
1287 err_putdisk:
1288 put_disk(md->disk);
1289 err_kfree:
1290 kfree(md);
1291 out:
1292 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
Andrei Warkentin371a6892011-04-11 18:10:25 -05001295static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1296{
1297 sector_t size;
1298 struct mmc_blk_data *md;
1299
1300 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1301 /*
1302 * The EXT_CSD sector count is in number or 512 byte
1303 * sectors.
1304 */
1305 size = card->ext_csd.sectors;
1306 } else {
1307 /*
1308 * The CSD capacity field is in units of read_blkbits.
1309 * set_capacity takes units of 512 bytes.
1310 */
1311 size = card->csd.capacity << (card->csd.read_blkbits - 9);
1312 }
1313
1314 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
1315 return md;
1316}
1317
1318static int mmc_blk_alloc_part(struct mmc_card *card,
1319 struct mmc_blk_data *md,
1320 unsigned int part_type,
1321 sector_t size,
1322 bool default_ro,
1323 const char *subname)
1324{
1325 char cap_str[10];
1326 struct mmc_blk_data *part_md;
1327
1328 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1329 subname);
1330 if (IS_ERR(part_md))
1331 return PTR_ERR(part_md);
1332 part_md->part_type = part_type;
1333 list_add(&part_md->part, &md->part);
1334
1335 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1336 cap_str, sizeof(cap_str));
1337 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1338 part_md->disk->disk_name, mmc_card_id(card),
1339 mmc_card_name(card), part_md->part_type, cap_str);
1340 return 0;
1341}
1342
1343static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1344{
1345 int ret = 0;
1346
1347 if (!mmc_card_mmc(card))
1348 return 0;
1349
1350 if (card->ext_csd.boot_size) {
1351 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1352 card->ext_csd.boot_size >> 9,
1353 true,
1354 "boot0");
1355 if (ret)
1356 return ret;
1357 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1358 card->ext_csd.boot_size >> 9,
1359 true,
1360 "boot1");
1361 if (ret)
1362 return ret;
1363 }
1364
1365 return ret;
1366}
1367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368static int
1369mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1370{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 int err;
1372
Pierre Ossmanb8558852007-01-03 19:47:29 +01001373 mmc_claim_host(card->host);
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03001374 err = mmc_set_blocklen(card, 512);
Pierre Ossmanb8558852007-01-03 19:47:29 +01001375 mmc_release_host(card->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377 if (err) {
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03001378 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
1379 md->disk->disk_name, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 return -EINVAL;
1381 }
1382
1383 return 0;
1384}
1385
Andrei Warkentin371a6892011-04-11 18:10:25 -05001386static void mmc_blk_remove_req(struct mmc_blk_data *md)
1387{
1388 if (md) {
1389 if (md->disk->flags & GENHD_FL_UP) {
1390 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1391
1392 /* Stop new requests from getting into the queue */
1393 del_gendisk(md->disk);
1394 }
1395
1396 /* Then flush out any already in there */
1397 mmc_cleanup_queue(&md->queue);
1398 mmc_blk_put(md);
1399 }
1400}
1401
1402static void mmc_blk_remove_parts(struct mmc_card *card,
1403 struct mmc_blk_data *md)
1404{
1405 struct list_head *pos, *q;
1406 struct mmc_blk_data *part_md;
1407
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001408 __clear_bit(md->name_idx, name_use);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001409 list_for_each_safe(pos, q, &md->part) {
1410 part_md = list_entry(pos, struct mmc_blk_data, part);
1411 list_del(pos);
1412 mmc_blk_remove_req(part_md);
1413 }
1414}
1415
1416static int mmc_add_disk(struct mmc_blk_data *md)
1417{
1418 int ret;
1419
1420 add_disk(md->disk);
1421 md->force_ro.show = force_ro_show;
1422 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05301423 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001424 md->force_ro.attr.name = "force_ro";
1425 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1426 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1427 if (ret)
1428 del_gendisk(md->disk);
1429
1430 return ret;
1431}
1432
Andrei Warkentin6f60c222011-04-11 19:11:04 -04001433static const struct mmc_fixup blk_fixups[] =
1434{
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001435 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1436 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1437 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1438 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1439 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001440
1441 /*
1442 * Some MMC cards experience performance degradation with CMD23
1443 * instead of CMD12-bounded multiblock transfers. For now we'll
1444 * black list what's bad...
1445 * - Certain Toshiba cards.
1446 *
1447 * N.B. This doesn't affect SD cards.
1448 */
1449 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1450 MMC_QUIRK_BLK_NO_CMD23),
1451 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1452 MMC_QUIRK_BLK_NO_CMD23),
1453 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1454 MMC_QUIRK_BLK_NO_CMD23),
Andrei Warkentin6f60c222011-04-11 19:11:04 -04001455 END_FIXUP
1456};
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458static int mmc_blk_probe(struct mmc_card *card)
1459{
Andrei Warkentin371a6892011-04-11 18:10:25 -05001460 struct mmc_blk_data *md, *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 int err;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02001462 char cap_str[10];
1463
Pierre Ossman912490d2005-05-21 10:27:02 +01001464 /*
1465 * Check that the card supports the command class(es) we need.
1466 */
1467 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 return -ENODEV;
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 md = mmc_blk_alloc(card);
1471 if (IS_ERR(md))
1472 return PTR_ERR(md);
1473
1474 err = mmc_blk_set_blksize(md, card);
1475 if (err)
1476 goto out;
1477
Yi Li444122f2009-02-05 15:31:57 +08001478 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02001479 cap_str, sizeof(cap_str));
1480 printk(KERN_INFO "%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02001482 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Andrei Warkentin371a6892011-04-11 18:10:25 -05001484 if (mmc_blk_alloc_parts(card, md))
1485 goto out;
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 mmc_set_drvdata(card, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04001488 mmc_fixup_device(card, blk_fixups);
1489
San Mehatc87f8d42009-07-30 08:21:19 -07001490#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1491 mmc_set_bus_resume_policy(card->host, 1);
1492#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05001493 if (mmc_add_disk(md))
1494 goto out;
1495
1496 list_for_each_entry(part_md, &md->part, part) {
1497 if (mmc_add_disk(part_md))
1498 goto out;
1499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return 0;
1501
1502 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05001503 mmc_blk_remove_parts(card, md);
1504 mmc_blk_remove_req(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 return err;
1506}
1507
1508static void mmc_blk_remove(struct mmc_card *card)
1509{
1510 struct mmc_blk_data *md = mmc_get_drvdata(card);
1511
Andrei Warkentin371a6892011-04-11 18:10:25 -05001512 mmc_blk_remove_parts(card, md);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03001513 mmc_claim_host(card->host);
1514 mmc_blk_part_switch(card, md);
1515 mmc_release_host(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001516 mmc_blk_remove_req(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 mmc_set_drvdata(card, NULL);
San Mehatc87f8d42009-07-30 08:21:19 -07001518#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1519 mmc_set_bus_resume_policy(card->host, 0);
1520#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
1522
1523#ifdef CONFIG_PM
Chuanxiao Dong72407e92011-08-24 14:00:41 +05301524static int mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525{
Andrei Warkentin371a6892011-04-11 18:10:25 -05001526 struct mmc_blk_data *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 struct mmc_blk_data *md = mmc_get_drvdata(card);
1528
1529 if (md) {
1530 mmc_queue_suspend(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001531 list_for_each_entry(part_md, &md->part, part) {
1532 mmc_queue_suspend(&part_md->queue);
1533 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535 return 0;
1536}
1537
1538static int mmc_blk_resume(struct mmc_card *card)
1539{
Andrei Warkentin371a6892011-04-11 18:10:25 -05001540 struct mmc_blk_data *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 struct mmc_blk_data *md = mmc_get_drvdata(card);
1542
1543 if (md) {
San Mehatc87f8d42009-07-30 08:21:19 -07001544#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 mmc_blk_set_blksize(md, card);
San Mehatc87f8d42009-07-30 08:21:19 -07001546#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05001547
1548 /*
1549 * Resume involves the card going into idle state,
1550 * so current partition is always the main one.
1551 */
1552 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001554 list_for_each_entry(part_md, &md->part, part) {
1555 mmc_queue_resume(&part_md->queue);
1556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 }
1558 return 0;
1559}
1560#else
1561#define mmc_blk_suspend NULL
1562#define mmc_blk_resume NULL
1563#endif
1564
1565static struct mmc_driver mmc_driver = {
1566 .drv = {
1567 .name = "mmcblk",
1568 },
1569 .probe = mmc_blk_probe,
1570 .remove = mmc_blk_remove,
1571 .suspend = mmc_blk_suspend,
1572 .resume = mmc_blk_resume,
1573};
1574
1575static int __init mmc_blk_init(void)
1576{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09001577 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001579 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1580 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1581
1582 max_devices = 256 / perdev_minors;
1583
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02001584 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1585 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09001588 res = mmc_register_driver(&mmc_driver);
1589 if (res)
1590 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09001592 return 0;
1593 out2:
1594 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 out:
1596 return res;
1597}
1598
1599static void __exit mmc_blk_exit(void)
1600{
1601 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02001602 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603}
1604
1605module_init(mmc_blk_init);
1606module_exit(mmc_blk_exit);
1607
1608MODULE_LICENSE("GPL");
1609MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1610