blob: dd27b0783d5213aa5d7d2aaab8d19258ad8fa0a7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020033#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040034#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
John Calixtocb87ea22011-04-26 18:56:29 -040038#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020040#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010041#include <linux/mmc/mmc.h>
42#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/uaccess.h>
45
Pierre Ossman98ac2162006-12-23 20:03:02 +010046#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000048MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040049#ifdef MODULE_PARAM_PREFIX
50#undef MODULE_PARAM_PREFIX
51#endif
52#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010053
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050054#define INAND_CMD38_ARG_EXT_CSD 113
55#define INAND_CMD38_ARG_ERASE 0x00
56#define INAND_CMD38_ARG_TRIM 0x01
57#define INAND_CMD38_ARG_SECERASE 0x80
58#define INAND_CMD38_ARG_SECTRIM1 0x81
59#define INAND_CMD38_ARG_SECTRIM2 0x88
Trey Ramsay8fee4762012-11-16 09:31:41 -060060#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050061
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090062#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
63 (req->cmd_flags & REQ_META)) && \
64 (rq_data_dir(req) == WRITE))
65#define PACKED_CMD_VER 0x01
66#define PACKED_CMD_WR 0x02
67
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020068static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040069
70/*
71 * The defaults come from config options but can be overriden by module
72 * or bootarg options.
73 */
74static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
75
76/*
77 * We've only got one major, so number of mmcblk devices is
78 * limited to 256 / number of minors per device.
79 */
80static int max_devices;
81
82/* 256 minors, so at most 256 separate devices */
83static DECLARE_BITMAP(dev_use, 256);
Andrei Warkentinf06c9152011-04-21 22:46:13 -050084static DECLARE_BITMAP(name_use, 256);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * There is one mmc_blk_data per slot.
88 */
89struct mmc_blk_data {
90 spinlock_t lock;
91 struct gendisk *disk;
92 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -050093 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Andrei Warkentind0c97cf2011-05-23 15:06:36 -050095 unsigned int flags;
96#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
97#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090098#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -050099
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000101 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500102 unsigned int part_type;
Andrei Warkentinf06c9152011-04-21 22:46:13 -0500103 unsigned int name_idx;
Adrian Hunter67716322011-08-29 16:42:15 +0300104 unsigned int reset_done;
105#define MMC_BLK_READ BIT(0)
106#define MMC_BLK_WRITE BIT(1)
107#define MMC_BLK_DISCARD BIT(2)
108#define MMC_BLK_SECDISCARD BIT(3)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500109
110 /*
111 * Only set in main mmc_blk_data associated
112 * with mmc_card with mmc_set_drvdata, and keeps
113 * track of the current selected device partition.
114 */
115 unsigned int part_curr;
116 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100117 struct device_attribute power_ro_lock;
118 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119};
120
Arjan van de Vena621aae2006-01-12 18:43:35 +0000121static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900123enum {
124 MMC_PACKED_NR_IDX = -1,
125 MMC_PACKED_NR_ZERO,
126 MMC_PACKED_NR_SINGLE,
127};
128
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400129module_param(perdev_minors, int, 0444);
130MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
131
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200132static inline int mmc_blk_part_switch(struct mmc_card *card,
133 struct mmc_blk_data *md);
134static int get_card_status(struct mmc_card *card, u32 *status, int retries);
135
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900136static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
137{
138 struct mmc_packed *packed = mqrq->packed;
139
140 BUG_ON(!packed);
141
142 mqrq->cmd_type = MMC_PACKED_NONE;
143 packed->nr_entries = MMC_PACKED_NR_ZERO;
144 packed->idx_failure = MMC_PACKED_NR_IDX;
145 packed->retries = 0;
146 packed->blocks = 0;
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
150{
151 struct mmc_blk_data *md;
152
Arjan van de Vena621aae2006-01-12 18:43:35 +0000153 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 md = disk->private_data;
155 if (md && md->usage == 0)
156 md = NULL;
157 if (md)
158 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000159 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 return md;
162}
163
Andrei Warkentin371a6892011-04-11 18:10:25 -0500164static inline int mmc_get_devidx(struct gendisk *disk)
165{
166 int devmaj = MAJOR(disk_devt(disk));
167 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
168
169 if (!devmaj)
170 devidx = disk->first_minor / perdev_minors;
171 return devidx;
172}
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174static void mmc_blk_put(struct mmc_blk_data *md)
175{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000176 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 md->usage--;
178 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500179 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800180 blk_cleanup_queue(md->queue.queue);
181
David Woodhouse1dff3142007-11-21 18:45:12 +0100182 __clear_bit(devidx, dev_use);
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 kfree(md);
186 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000187 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Johan Rudholmadd710e2011-12-02 08:51:06 +0100190static ssize_t power_ro_lock_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 int ret;
194 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
195 struct mmc_card *card = md->queue.card;
196 int locked = 0;
197
198 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
199 locked = 2;
200 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
201 locked = 1;
202
203 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
204
205 return ret;
206}
207
208static ssize_t power_ro_lock_store(struct device *dev,
209 struct device_attribute *attr, const char *buf, size_t count)
210{
211 int ret;
212 struct mmc_blk_data *md, *part_md;
213 struct mmc_card *card;
214 unsigned long set;
215
216 if (kstrtoul(buf, 0, &set))
217 return -EINVAL;
218
219 if (set != 1)
220 return count;
221
222 md = mmc_blk_get(dev_to_disk(dev));
223 card = md->queue.card;
224
225 mmc_claim_host(card->host);
226
227 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
228 card->ext_csd.boot_ro_lock |
229 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
230 card->ext_csd.part_time);
231 if (ret)
232 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
233 else
234 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
235
236 mmc_release_host(card->host);
237
238 if (!ret) {
239 pr_info("%s: Locking boot partition ro until next power on\n",
240 md->disk->disk_name);
241 set_disk_ro(md->disk, 1);
242
243 list_for_each_entry(part_md, &md->part, part)
244 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
245 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
246 set_disk_ro(part_md->disk, 1);
247 }
248 }
249
250 mmc_blk_put(md);
251 return count;
252}
253
Andrei Warkentin371a6892011-04-11 18:10:25 -0500254static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
255 char *buf)
256{
257 int ret;
258 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
259
260 ret = snprintf(buf, PAGE_SIZE, "%d",
261 get_disk_ro(dev_to_disk(dev)) ^
262 md->read_only);
263 mmc_blk_put(md);
264 return ret;
265}
266
267static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
268 const char *buf, size_t count)
269{
270 int ret;
271 char *end;
272 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
273 unsigned long set = simple_strtoul(buf, &end, 0);
274 if (end == buf) {
275 ret = -EINVAL;
276 goto out;
277 }
278
279 set_disk_ro(dev_to_disk(dev), set || md->read_only);
280 ret = count;
281out:
282 mmc_blk_put(md);
283 return ret;
284}
285
Al Viroa5a15612008-03-02 10:33:30 -0500286static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Al Viroa5a15612008-03-02 10:33:30 -0500288 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 int ret = -ENXIO;
290
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200291 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 if (md) {
293 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500294 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700296
Al Viroa5a15612008-03-02 10:33:30 -0500297 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700298 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700299 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200302 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304 return ret;
305}
306
Al Virodb2a1442013-05-05 21:52:57 -0400307static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308{
Al Viroa5a15612008-03-02 10:33:30 -0500309 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200311 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200313 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
316static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800317mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800319 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
320 geo->heads = 4;
321 geo->sectors = 16;
322 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
John Calixtocb87ea22011-04-26 18:56:29 -0400325struct mmc_blk_ioc_data {
326 struct mmc_ioc_cmd ic;
327 unsigned char *buf;
328 u64 buf_bytes;
329};
330
331static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
332 struct mmc_ioc_cmd __user *user)
333{
334 struct mmc_blk_ioc_data *idata;
335 int err;
336
337 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
338 if (!idata) {
339 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400340 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400341 }
342
343 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
344 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400345 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400346 }
347
348 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
349 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
350 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400351 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400352 }
353
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100354 if (!idata->buf_bytes)
355 return idata;
356
John Calixtocb87ea22011-04-26 18:56:29 -0400357 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
358 if (!idata->buf) {
359 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400360 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400361 }
362
363 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
364 idata->ic.data_ptr, idata->buf_bytes)) {
365 err = -EFAULT;
366 goto copy_err;
367 }
368
369 return idata;
370
371copy_err:
372 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400373idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400374 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400375out:
John Calixtocb87ea22011-04-26 18:56:29 -0400376 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400377}
378
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200379static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
380 u32 retries_max)
381{
382 int err;
383 u32 retry_count = 0;
384
385 if (!status || !retries_max)
386 return -EINVAL;
387
388 do {
389 err = get_card_status(card, status, 5);
390 if (err)
391 break;
392
393 if (!R1_STATUS(*status) &&
394 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
395 break; /* RPMB programming operation complete */
396
397 /*
398 * Rechedule to give the MMC device a chance to continue
399 * processing the previous command without being polled too
400 * frequently.
401 */
402 usleep_range(1000, 5000);
403 } while (++retry_count < retries_max);
404
405 if (retry_count == retries_max)
406 err = -EPERM;
407
408 return err;
409}
410
John Calixtocb87ea22011-04-26 18:56:29 -0400411static int mmc_blk_ioctl_cmd(struct block_device *bdev,
412 struct mmc_ioc_cmd __user *ic_ptr)
413{
414 struct mmc_blk_ioc_data *idata;
415 struct mmc_blk_data *md;
416 struct mmc_card *card;
417 struct mmc_command cmd = {0};
418 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530419 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400420 struct scatterlist sg;
421 int err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200422 int is_rpmb = false;
423 u32 status = 0;
John Calixtocb87ea22011-04-26 18:56:29 -0400424
425 /*
426 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
427 * whole block device, not on a partition. This prevents overspray
428 * between sibling partitions.
429 */
430 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
431 return -EPERM;
432
433 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
434 if (IS_ERR(idata))
435 return PTR_ERR(idata);
436
John Calixtocb87ea22011-04-26 18:56:29 -0400437 md = mmc_blk_get(bdev->bd_disk);
438 if (!md) {
439 err = -EINVAL;
Philippe De Swert1c02f002012-04-11 23:31:45 +0300440 goto cmd_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400441 }
442
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200443 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
444 is_rpmb = true;
445
John Calixtocb87ea22011-04-26 18:56:29 -0400446 card = md->queue.card;
447 if (IS_ERR(card)) {
448 err = PTR_ERR(card);
449 goto cmd_done;
450 }
451
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100452 cmd.opcode = idata->ic.opcode;
453 cmd.arg = idata->ic.arg;
454 cmd.flags = idata->ic.flags;
455
456 if (idata->buf_bytes) {
457 data.sg = &sg;
458 data.sg_len = 1;
459 data.blksz = idata->ic.blksz;
460 data.blocks = idata->ic.blocks;
461
462 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
463
464 if (idata->ic.write_flag)
465 data.flags = MMC_DATA_WRITE;
466 else
467 data.flags = MMC_DATA_READ;
468
469 /* data.flags must already be set before doing this. */
470 mmc_set_data_timeout(&data, card);
471
472 /* Allow overriding the timeout_ns for empirical tuning. */
473 if (idata->ic.data_timeout_ns)
474 data.timeout_ns = idata->ic.data_timeout_ns;
475
476 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
477 /*
478 * Pretend this is a data transfer and rely on the
479 * host driver to compute timeout. When all host
480 * drivers support cmd.cmd_timeout for R1B, this
481 * can be changed to:
482 *
483 * mrq.data = NULL;
484 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
485 */
486 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
487 }
488
489 mrq.data = &data;
490 }
491
492 mrq.cmd = &cmd;
493
John Calixtocb87ea22011-04-26 18:56:29 -0400494 mmc_claim_host(card->host);
495
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200496 err = mmc_blk_part_switch(card, md);
497 if (err)
498 goto cmd_rel_host;
499
John Calixtocb87ea22011-04-26 18:56:29 -0400500 if (idata->ic.is_acmd) {
501 err = mmc_app_cmd(card->host, card);
502 if (err)
503 goto cmd_rel_host;
504 }
505
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200506 if (is_rpmb) {
507 err = mmc_set_blockcount(card, data.blocks,
508 idata->ic.write_flag & (1 << 31));
509 if (err)
510 goto cmd_rel_host;
511 }
512
John Calixtocb87ea22011-04-26 18:56:29 -0400513 mmc_wait_for_req(card->host, &mrq);
514
515 if (cmd.error) {
516 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
517 __func__, cmd.error);
518 err = cmd.error;
519 goto cmd_rel_host;
520 }
521 if (data.error) {
522 dev_err(mmc_dev(card->host), "%s: data error %d\n",
523 __func__, data.error);
524 err = data.error;
525 goto cmd_rel_host;
526 }
527
528 /*
529 * According to the SD specs, some commands require a delay after
530 * issuing the command.
531 */
532 if (idata->ic.postsleep_min_us)
533 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
534
535 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
536 err = -EFAULT;
537 goto cmd_rel_host;
538 }
539
540 if (!idata->ic.write_flag) {
541 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
542 idata->buf, idata->buf_bytes)) {
543 err = -EFAULT;
544 goto cmd_rel_host;
545 }
546 }
547
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200548 if (is_rpmb) {
549 /*
550 * Ensure RPMB command has completed by polling CMD13
551 * "Send Status".
552 */
553 err = ioctl_rpmb_card_status_poll(card, &status, 5);
554 if (err)
555 dev_err(mmc_dev(card->host),
556 "%s: Card Status=0x%08X, error %d\n",
557 __func__, status, err);
558 }
559
John Calixtocb87ea22011-04-26 18:56:29 -0400560cmd_rel_host:
561 mmc_release_host(card->host);
562
563cmd_done:
564 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +0300565cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400566 kfree(idata->buf);
567 kfree(idata);
568 return err;
569}
570
571static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
572 unsigned int cmd, unsigned long arg)
573{
574 int ret = -EINVAL;
575 if (cmd == MMC_IOC_CMD)
576 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
577 return ret;
578}
579
580#ifdef CONFIG_COMPAT
581static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
582 unsigned int cmd, unsigned long arg)
583{
584 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
585}
586#endif
587
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700588static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -0500589 .open = mmc_blk_open,
590 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800591 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -0400593 .ioctl = mmc_blk_ioctl,
594#ifdef CONFIG_COMPAT
595 .compat_ioctl = mmc_blk_compat_ioctl,
596#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597};
598
Andrei Warkentin371a6892011-04-11 18:10:25 -0500599static inline int mmc_blk_part_switch(struct mmc_card *card,
600 struct mmc_blk_data *md)
601{
602 int ret;
603 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300604
Andrei Warkentin371a6892011-04-11 18:10:25 -0500605 if (main_md->part_curr == md->part_type)
606 return 0;
607
608 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300609 u8 part_config = card->ext_csd.part_config;
610
611 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
612 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500613
614 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300615 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -0500616 card->ext_csd.part_time);
617 if (ret)
618 return ret;
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300619
620 card->ext_csd.part_config = part_config;
Adrian Hunter67716322011-08-29 16:42:15 +0300621 }
Andrei Warkentin371a6892011-04-11 18:10:25 -0500622
623 main_md->part_curr = md->part_type;
624 return 0;
625}
626
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700627static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
628{
629 int err;
Ben Dooks051913d2009-06-08 23:33:57 +0100630 u32 result;
631 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700632
Venkatraman Sad5fd972011-08-25 00:30:50 +0530633 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -0400634 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400635 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700636
637 struct scatterlist sg;
638
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700639 cmd.opcode = MMC_APP_CMD;
640 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -0700641 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700642
643 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -0700644 if (err)
645 return (u32)-1;
646 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700647 return (u32)-1;
648
649 memset(&cmd, 0, sizeof(struct mmc_command));
650
651 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
652 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700653 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700654
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700655 data.blksz = 4;
656 data.blocks = 1;
657 data.flags = MMC_DATA_READ;
658 data.sg = &sg;
659 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +0530660 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700661
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700662 mrq.cmd = &cmd;
663 mrq.data = &data;
664
Ben Dooks051913d2009-06-08 23:33:57 +0100665 blocks = kmalloc(4, GFP_KERNEL);
666 if (!blocks)
667 return (u32)-1;
668
669 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700670
671 mmc_wait_for_req(card->host, &mrq);
672
Ben Dooks051913d2009-06-08 23:33:57 +0100673 result = ntohl(*blocks);
674 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700675
Ben Dooks051913d2009-06-08 23:33:57 +0100676 if (cmd.error || data.error)
677 result = (u32)-1;
678
679 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700680}
681
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100682static int send_stop(struct mmc_card *card, u32 *status)
683{
684 struct mmc_command cmd = {0};
685 int err;
686
687 cmd.opcode = MMC_STOP_TRANSMISSION;
688 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
689 err = mmc_wait_for_cmd(card->host, &cmd, 5);
690 if (err == 0)
691 *status = cmd.resp[0];
692 return err;
693}
694
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +0100695static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +0300696{
Chris Ball1278dba2011-04-13 23:40:30 -0400697 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +0300698 int err;
699
Adrian Hunter504f1912008-10-16 12:55:25 +0300700 cmd.opcode = MMC_SEND_STATUS;
701 if (!mmc_host_is_spi(card->host))
702 cmd.arg = card->rca << 16;
703 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +0100704 err = mmc_wait_for_cmd(card->host, &cmd, retries);
705 if (err == 0)
706 *status = cmd.resp[0];
707 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +0300708}
709
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530710#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100711#define ERR_RETRY 2
712#define ERR_ABORT 1
713#define ERR_CONTINUE 0
714
715static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
716 bool status_valid, u32 status)
717{
718 switch (error) {
719 case -EILSEQ:
720 /* response crc error, retry the r/w cmd */
721 pr_err("%s: %s sending %s command, card status %#x\n",
722 req->rq_disk->disk_name, "response CRC error",
723 name, status);
724 return ERR_RETRY;
725
726 case -ETIMEDOUT:
727 pr_err("%s: %s sending %s command, card status %#x\n",
728 req->rq_disk->disk_name, "timed out", name, status);
729
730 /* If the status cmd initially failed, retry the r/w cmd */
731 if (!status_valid)
732 return ERR_RETRY;
733
734 /*
735 * If it was a r/w cmd crc error, or illegal command
736 * (eg, issued in wrong state) then retry - we should
737 * have corrected the state problem above.
738 */
739 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
740 return ERR_RETRY;
741
742 /* Otherwise abort the command */
743 return ERR_ABORT;
744
745 default:
746 /* We don't understand the error code the driver gave us */
747 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
748 req->rq_disk->disk_name, error, status);
749 return ERR_ABORT;
750 }
751}
752
753/*
754 * Initial r/w and stop cmd error recovery.
755 * We don't know whether the card received the r/w cmd or not, so try to
756 * restore things back to a sane state. Essentially, we do this as follows:
757 * - Obtain card status. If the first attempt to obtain card status fails,
758 * the status word will reflect the failed status cmd, not the failed
759 * r/w cmd. If we fail to obtain card status, it suggests we can no
760 * longer communicate with the card.
761 * - Check the card state. If the card received the cmd but there was a
762 * transient problem with the response, it might still be in a data transfer
763 * mode. Try to send it a stop command. If this fails, we can't recover.
764 * - If the r/w cmd failed due to a response CRC error, it was probably
765 * transient, so retry the cmd.
766 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
767 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
768 * illegal cmd, retry.
769 * Otherwise we don't understand what happened, so abort.
770 */
771static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
Adrian Hunter67716322011-08-29 16:42:15 +0300772 struct mmc_blk_request *brq, int *ecc_err)
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100773{
774 bool prev_cmd_status_valid = true;
775 u32 status, stop_status = 0;
776 int err, retry;
777
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530778 if (mmc_card_removed(card))
779 return ERR_NOMEDIUM;
780
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100781 /*
782 * Try to get card status which indicates both the card state
783 * and why there was no response. If the first attempt fails,
784 * we can't be sure the returned status is for the r/w command.
785 */
786 for (retry = 2; retry >= 0; retry--) {
787 err = get_card_status(card, &status, 0);
788 if (!err)
789 break;
790
791 prev_cmd_status_valid = false;
792 pr_err("%s: error %d sending status command, %sing\n",
793 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
794 }
795
796 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530797 if (err) {
798 /* Check if the card is removed */
799 if (mmc_detect_card_removed(card->host))
800 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100801 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530802 }
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100803
Adrian Hunter67716322011-08-29 16:42:15 +0300804 /* Flag ECC errors */
805 if ((status & R1_CARD_ECC_FAILED) ||
806 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
807 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
808 *ecc_err = 1;
809
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100810 /*
811 * Check the current card state. If it is in some data transfer
812 * mode, tell it to stop (and hopefully transition back to TRAN.)
813 */
814 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
815 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
816 err = send_stop(card, &stop_status);
817 if (err)
818 pr_err("%s: error %d sending stop command\n",
819 req->rq_disk->disk_name, err);
820
821 /*
822 * If the stop cmd also timed out, the card is probably
823 * not present, so abort. Other errors are bad news too.
824 */
825 if (err)
826 return ERR_ABORT;
Adrian Hunter67716322011-08-29 16:42:15 +0300827 if (stop_status & R1_CARD_ECC_FAILED)
828 *ecc_err = 1;
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100829 }
830
831 /* Check for set block count errors */
832 if (brq->sbc.error)
833 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
834 prev_cmd_status_valid, status);
835
836 /* Check for r/w command errors */
837 if (brq->cmd.error)
838 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
839 prev_cmd_status_valid, status);
840
Adrian Hunter67716322011-08-29 16:42:15 +0300841 /* Data errors */
842 if (!brq->stop.error)
843 return ERR_CONTINUE;
844
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +0100845 /* Now for stop errors. These aren't fatal to the transfer. */
846 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
847 req->rq_disk->disk_name, brq->stop.error,
848 brq->cmd.resp[0], status);
849
850 /*
851 * Subsitute in our own stop status as this will give the error
852 * state which happened during the execution of the r/w command.
853 */
854 if (stop_status) {
855 brq->stop.resp[0] = stop_status;
856 brq->stop.error = 0;
857 }
858 return ERR_CONTINUE;
859}
860
Adrian Hunter67716322011-08-29 16:42:15 +0300861static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
862 int type)
863{
864 int err;
865
866 if (md->reset_done & type)
867 return -EEXIST;
868
869 md->reset_done |= type;
870 err = mmc_hw_reset(host);
871 /* Ensure we switch back to the correct partition */
872 if (err != -EOPNOTSUPP) {
873 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
874 int part_err;
875
876 main_md->part_curr = main_md->part_type;
877 part_err = mmc_blk_part_switch(host->card, md);
878 if (part_err) {
879 /*
880 * We have failed to get back into the correct
881 * partition, so we need to abort the whole request.
882 */
883 return -ENODEV;
884 }
885 }
886 return err;
887}
888
889static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
890{
891 md->reset_done &= ~type;
892}
893
Adrian Hunterbd788c92010-08-11 14:17:47 -0700894static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
895{
896 struct mmc_blk_data *md = mq->data;
897 struct mmc_card *card = md->queue.card;
898 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +0300899 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -0700900
Adrian Hunterbd788c92010-08-11 14:17:47 -0700901 if (!mmc_can_erase(card)) {
902 err = -EOPNOTSUPP;
903 goto out;
904 }
905
906 from = blk_rq_pos(req);
907 nr = blk_rq_sectors(req);
908
Kyungmin Parkb3bf9152011-10-18 09:34:04 +0900909 if (mmc_can_discard(card))
910 arg = MMC_DISCARD_ARG;
911 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -0700912 arg = MMC_TRIM_ARG;
913 else
914 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +0300915retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500916 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
917 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
918 INAND_CMD38_ARG_EXT_CSD,
919 arg == MMC_TRIM_ARG ?
920 INAND_CMD38_ARG_TRIM :
921 INAND_CMD38_ARG_ERASE,
922 0);
923 if (err)
924 goto out;
925 }
Adrian Hunterbd788c92010-08-11 14:17:47 -0700926 err = mmc_erase(card, from, nr, arg);
927out:
Adrian Hunter67716322011-08-29 16:42:15 +0300928 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
929 goto retry;
930 if (!err)
931 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +0530932 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -0700933
Adrian Hunterbd788c92010-08-11 14:17:47 -0700934 return err ? 0 : 1;
935}
936
Adrian Hunter49804542010-08-11 14:17:50 -0700937static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
938 struct request *req)
939{
940 struct mmc_blk_data *md = mq->data;
941 struct mmc_card *card = md->queue.card;
Adrian Hunter28302812012-04-05 14:45:48 +0300942 unsigned int from, nr, arg, trim_arg, erase_arg;
Adrian Hunter67716322011-08-29 16:42:15 +0300943 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -0700944
Kyungmin Parkd9ddd622011-10-14 14:15:48 +0900945 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -0700946 err = -EOPNOTSUPP;
947 goto out;
948 }
949
950 from = blk_rq_pos(req);
951 nr = blk_rq_sectors(req);
952
Adrian Hunter28302812012-04-05 14:45:48 +0300953 /* The sanitize operation is supported at v4.5 only */
954 if (mmc_can_sanitize(card)) {
955 erase_arg = MMC_ERASE_ARG;
956 trim_arg = MMC_TRIM_ARG;
957 } else {
958 erase_arg = MMC_SECURE_ERASE_ARG;
959 trim_arg = MMC_SECURE_TRIM1_ARG;
960 }
961
962 if (mmc_erase_group_aligned(card, from, nr))
963 arg = erase_arg;
964 else if (mmc_can_trim(card))
965 arg = trim_arg;
966 else {
967 err = -EINVAL;
968 goto out;
969 }
Adrian Hunter67716322011-08-29 16:42:15 +0300970retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500971 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
972 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
973 INAND_CMD38_ARG_EXT_CSD,
974 arg == MMC_SECURE_TRIM1_ARG ?
975 INAND_CMD38_ARG_SECTRIM1 :
976 INAND_CMD38_ARG_SECERASE,
977 0);
978 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +0300979 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500980 }
Adrian Hunter28302812012-04-05 14:45:48 +0300981
Adrian Hunter49804542010-08-11 14:17:50 -0700982 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +0300983 if (err == -EIO)
984 goto out_retry;
985 if (err)
986 goto out;
987
988 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500989 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
990 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
991 INAND_CMD38_ARG_EXT_CSD,
992 INAND_CMD38_ARG_SECTRIM2,
993 0);
994 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +0300995 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500996 }
Adrian Hunter28302812012-04-05 14:45:48 +0300997
Adrian Hunter49804542010-08-11 14:17:50 -0700998 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +0300999 if (err == -EIO)
1000 goto out_retry;
1001 if (err)
1002 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001003 }
Adrian Hunter28302812012-04-05 14:45:48 +03001004
1005 if (mmc_can_sanitize(card))
1006 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1007 EXT_CSD_SANITIZE_START, 1, 0);
1008out_retry:
1009 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03001010 goto retry;
1011 if (!err)
1012 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03001013out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301014 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07001015
Adrian Hunter49804542010-08-11 14:17:50 -07001016 return err ? 0 : 1;
1017}
1018
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001019static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1020{
1021 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001022 struct mmc_card *card = md->queue.card;
1023 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001024
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001025 ret = mmc_flush_cache(card);
1026 if (ret)
1027 ret = -EIO;
1028
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301029 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001030
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001031 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001032}
1033
1034/*
1035 * Reformat current write as a reliable write, supporting
1036 * both legacy and the enhanced reliable write MMC cards.
1037 * In each transfer we'll handle only as much as a single
1038 * reliable write can handle, thus finish the request in
1039 * partial completions.
1040 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001041static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1042 struct mmc_card *card,
1043 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001044{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001045 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1046 /* Legacy mode imposes restrictions on transfers. */
1047 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1048 brq->data.blocks = 1;
1049
1050 if (brq->data.blocks > card->ext_csd.rel_sectors)
1051 brq->data.blocks = card->ext_csd.rel_sectors;
1052 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1053 brq->data.blocks = 1;
1054 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001055}
1056
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001057#define CMD_ERRORS \
1058 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1059 R1_ADDRESS_ERROR | /* Misaligned address */ \
1060 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1061 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1062 R1_CC_ERROR | /* Card controller error */ \
1063 R1_ERROR) /* General/unknown error */
1064
Per Forlinee8a43a2011-07-01 18:55:33 +02001065static int mmc_blk_err_check(struct mmc_card *card,
1066 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02001067{
Per Forlinee8a43a2011-07-01 18:55:33 +02001068 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1069 mmc_active);
1070 struct mmc_blk_request *brq = &mq_mrq->brq;
1071 struct request *req = mq_mrq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03001072 int ecc_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02001073
1074 /*
1075 * sbc.error indicates a problem with the set block count
1076 * command. No data will have been transferred.
1077 *
1078 * cmd.error indicates a problem with the r/w command. No
1079 * data will have been transferred.
1080 *
1081 * stop.error indicates a problem with the stop command. Data
1082 * may have been transferred, or may still be transferring.
1083 */
Adrian Hunter67716322011-08-29 16:42:15 +03001084 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1085 brq->data.error) {
1086 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02001087 case ERR_RETRY:
1088 return MMC_BLK_RETRY;
1089 case ERR_ABORT:
1090 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301091 case ERR_NOMEDIUM:
1092 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02001093 case ERR_CONTINUE:
1094 break;
1095 }
1096 }
1097
1098 /*
1099 * Check for errors relating to the execution of the
1100 * initial command - such as address errors. No data
1101 * has been transferred.
1102 */
1103 if (brq->cmd.resp[0] & CMD_ERRORS) {
1104 pr_err("%s: r/w command failed, status = %#x\n",
1105 req->rq_disk->disk_name, brq->cmd.resp[0]);
1106 return MMC_BLK_ABORT;
1107 }
1108
1109 /*
1110 * Everything else is either success, or a data error of some
1111 * kind. If it was a write, we may have transitioned to
1112 * program mode, which we have to wait for it to complete.
1113 */
1114 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1115 u32 status;
Trey Ramsay8fee4762012-11-16 09:31:41 -06001116 unsigned long timeout;
1117
1118 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
Per Forlind78d4a82011-07-01 18:55:30 +02001119 do {
1120 int err = get_card_status(card, &status, 5);
1121 if (err) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301122 pr_err("%s: error %d requesting status\n",
Per Forlind78d4a82011-07-01 18:55:30 +02001123 req->rq_disk->disk_name, err);
1124 return MMC_BLK_CMD_ERR;
1125 }
Trey Ramsay8fee4762012-11-16 09:31:41 -06001126
1127 /* Timeout if the device never becomes ready for data
1128 * and never leaves the program state.
1129 */
1130 if (time_after(jiffies, timeout)) {
1131 pr_err("%s: Card stuck in programming state!"\
1132 " %s %s\n", mmc_hostname(card->host),
1133 req->rq_disk->disk_name, __func__);
1134
1135 return MMC_BLK_CMD_ERR;
1136 }
Per Forlind78d4a82011-07-01 18:55:30 +02001137 /*
1138 * Some cards mishandle the status bits,
1139 * so make sure to check both the busy
1140 * indication and the card state.
1141 */
1142 } while (!(status & R1_READY_FOR_DATA) ||
1143 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1144 }
1145
1146 if (brq->data.error) {
1147 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1148 req->rq_disk->disk_name, brq->data.error,
1149 (unsigned)blk_rq_pos(req),
1150 (unsigned)blk_rq_sectors(req),
1151 brq->cmd.resp[0], brq->stop.resp[0]);
1152
1153 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03001154 if (ecc_err)
1155 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001156 return MMC_BLK_DATA_ERR;
1157 } else {
1158 return MMC_BLK_CMD_ERR;
1159 }
1160 }
1161
Adrian Hunter67716322011-08-29 16:42:15 +03001162 if (!brq->data.bytes_xfered)
1163 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02001164
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001165 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1166 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1167 return MMC_BLK_PARTIAL;
1168 else
1169 return MMC_BLK_SUCCESS;
1170 }
1171
Adrian Hunter67716322011-08-29 16:42:15 +03001172 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1173 return MMC_BLK_PARTIAL;
1174
1175 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02001176}
1177
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001178static int mmc_blk_packed_err_check(struct mmc_card *card,
1179 struct mmc_async_req *areq)
1180{
1181 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1182 mmc_active);
1183 struct request *req = mq_rq->req;
1184 struct mmc_packed *packed = mq_rq->packed;
1185 int err, check, status;
1186 u8 *ext_csd;
1187
1188 BUG_ON(!packed);
1189
1190 packed->retries--;
1191 check = mmc_blk_err_check(card, areq);
1192 err = get_card_status(card, &status, 0);
1193 if (err) {
1194 pr_err("%s: error %d sending status command\n",
1195 req->rq_disk->disk_name, err);
1196 return MMC_BLK_ABORT;
1197 }
1198
1199 if (status & R1_EXCEPTION_EVENT) {
1200 ext_csd = kzalloc(512, GFP_KERNEL);
1201 if (!ext_csd) {
1202 pr_err("%s: unable to allocate buffer for ext_csd\n",
1203 req->rq_disk->disk_name);
1204 return -ENOMEM;
1205 }
1206
1207 err = mmc_send_ext_csd(card, ext_csd);
1208 if (err) {
1209 pr_err("%s: error %d sending ext_csd\n",
1210 req->rq_disk->disk_name, err);
1211 check = MMC_BLK_ABORT;
1212 goto free;
1213 }
1214
1215 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1216 EXT_CSD_PACKED_FAILURE) &&
1217 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1218 EXT_CSD_PACKED_GENERIC_ERROR)) {
1219 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1220 EXT_CSD_PACKED_INDEXED_ERROR) {
1221 packed->idx_failure =
1222 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1223 check = MMC_BLK_PARTIAL;
1224 }
1225 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1226 "failure index: %d\n",
1227 req->rq_disk->disk_name, packed->nr_entries,
1228 packed->blocks, packed->idx_failure);
1229 }
1230free:
1231 kfree(ext_csd);
1232 }
1233
1234 return check;
1235}
1236
Per Forlin54d49d72011-07-01 18:55:29 +02001237static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1238 struct mmc_card *card,
1239 int disable_multi,
1240 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
Per Forlin54d49d72011-07-01 18:55:29 +02001242 u32 readcmd, writecmd;
1243 struct mmc_blk_request *brq = &mqrq->brq;
1244 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05301246 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001248 /*
1249 * Reliable writes are used to implement Forced Unit Access and
1250 * REQ_META accesses, and are supported only on MMCs.
Christoph Hellwig65299a32011-08-23 14:50:29 +02001251 *
1252 * XXX: this really needs a good explanation of why REQ_META
1253 * is treated special.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001254 */
1255 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1256 (req->cmd_flags & REQ_META)) &&
1257 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001258 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001259
Per Forlin54d49d72011-07-01 18:55:29 +02001260 memset(brq, 0, sizeof(struct mmc_blk_request));
1261 brq->mrq.cmd = &brq->cmd;
1262 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Per Forlin54d49d72011-07-01 18:55:29 +02001264 brq->cmd.arg = blk_rq_pos(req);
1265 if (!mmc_card_blockaddr(card))
1266 brq->cmd.arg <<= 9;
1267 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1268 brq->data.blksz = 512;
1269 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1270 brq->stop.arg = 0;
1271 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1272 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
Per Forlin54d49d72011-07-01 18:55:29 +02001274 /*
1275 * The block layer doesn't support all sector count
1276 * restrictions, so we need to be prepared for too big
1277 * requests.
1278 */
1279 if (brq->data.blocks > card->host->max_blk_count)
1280 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Paul Walmsley2bf22b32011-10-06 14:50:33 -06001282 if (brq->data.blocks > 1) {
1283 /*
1284 * After a read error, we redo the request one sector
1285 * at a time in order to accurately determine which
1286 * sectors can be read successfully.
1287 */
1288 if (disable_multi)
1289 brq->data.blocks = 1;
1290
1291 /* Some controllers can't do multiblock reads due to hw bugs */
1292 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1293 rq_data_dir(req) == READ)
1294 brq->data.blocks = 1;
1295 }
Per Forlin54d49d72011-07-01 18:55:29 +02001296
1297 if (brq->data.blocks > 1 || do_rel_wr) {
1298 /* SPI multiblock writes terminate using a special
1299 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02001300 */
Per Forlin54d49d72011-07-01 18:55:29 +02001301 if (!mmc_host_is_spi(card->host) ||
1302 rq_data_dir(req) == READ)
1303 brq->mrq.stop = &brq->stop;
1304 readcmd = MMC_READ_MULTIPLE_BLOCK;
1305 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1306 } else {
1307 brq->mrq.stop = NULL;
1308 readcmd = MMC_READ_SINGLE_BLOCK;
1309 writecmd = MMC_WRITE_BLOCK;
1310 }
1311 if (rq_data_dir(req) == READ) {
1312 brq->cmd.opcode = readcmd;
1313 brq->data.flags |= MMC_DATA_READ;
1314 } else {
1315 brq->cmd.opcode = writecmd;
1316 brq->data.flags |= MMC_DATA_WRITE;
1317 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02001318
Per Forlin54d49d72011-07-01 18:55:29 +02001319 if (do_rel_wr)
1320 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01001321
Per Forlin54d49d72011-07-01 18:55:29 +02001322 /*
Saugata Das42659002011-12-21 13:09:17 +05301323 * Data tag is used only during writing meta data to speed
1324 * up write and any subsequent read of this meta data
1325 */
1326 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1327 (req->cmd_flags & REQ_META) &&
1328 (rq_data_dir(req) == WRITE) &&
1329 ((brq->data.blocks * brq->data.blksz) >=
1330 card->ext_csd.data_tag_unit_size);
1331
1332 /*
Per Forlin54d49d72011-07-01 18:55:29 +02001333 * Pre-defined multi-block transfers are preferable to
1334 * open ended-ones (and necessary for reliable writes).
1335 * However, it is not sufficient to just send CMD23,
1336 * and avoid the final CMD12, as on an error condition
1337 * CMD12 (stop) needs to be sent anyway. This, coupled
1338 * with Auto-CMD23 enhancements provided by some
1339 * hosts, means that the complexity of dealing
1340 * with this is best left to the host. If CMD23 is
1341 * supported by card and host, we'll fill sbc in and let
1342 * the host deal with handling it correctly. This means
1343 * that for hosts that don't expose MMC_CAP_CMD23, no
1344 * change of behavior will be observed.
1345 *
1346 * N.B: Some MMC cards experience perf degradation.
1347 * We'll avoid using CMD23-bounded multiblock writes for
1348 * these, while retaining features like reliable writes.
1349 */
Saugata Das42659002011-12-21 13:09:17 +05301350 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1351 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1352 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02001353 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1354 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05301355 (do_rel_wr ? (1 << 31) : 0) |
1356 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02001357 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1358 brq->mrq.sbc = &brq->sbc;
1359 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001360
Per Forlin54d49d72011-07-01 18:55:29 +02001361 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001362
Per Forlin54d49d72011-07-01 18:55:29 +02001363 brq->data.sg = mqrq->sg;
1364 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001365
Per Forlin54d49d72011-07-01 18:55:29 +02001366 /*
1367 * Adjust the sg list so it is the same size as the
1368 * request.
1369 */
1370 if (brq->data.blocks != blk_rq_sectors(req)) {
1371 int i, data_size = brq->data.blocks << 9;
1372 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02001373
Per Forlin54d49d72011-07-01 18:55:29 +02001374 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1375 data_size -= sg->length;
1376 if (data_size <= 0) {
1377 sg->length += data_size;
1378 i++;
1379 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01001380 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001381 }
Per Forlin54d49d72011-07-01 18:55:29 +02001382 brq->data.sg_len = i;
1383 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001384
Per Forlinee8a43a2011-07-01 18:55:33 +02001385 mqrq->mmc_active.mrq = &brq->mrq;
1386 mqrq->mmc_active.err_check = mmc_blk_err_check;
1387
Per Forlin54d49d72011-07-01 18:55:29 +02001388 mmc_queue_bounce_pre(mqrq);
1389}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001391static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1392 struct mmc_card *card)
1393{
1394 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1395 unsigned int max_seg_sz = queue_max_segment_size(q);
1396 unsigned int len, nr_segs = 0;
1397
1398 do {
1399 len = min(hdr_sz, max_seg_sz);
1400 hdr_sz -= len;
1401 nr_segs++;
1402 } while (hdr_sz);
1403
1404 return nr_segs;
1405}
1406
1407static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1408{
1409 struct request_queue *q = mq->queue;
1410 struct mmc_card *card = mq->card;
1411 struct request *cur = req, *next = NULL;
1412 struct mmc_blk_data *md = mq->data;
1413 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1414 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1415 unsigned int req_sectors = 0, phys_segments = 0;
1416 unsigned int max_blk_count, max_phys_segs;
1417 bool put_back = true;
1418 u8 max_packed_rw = 0;
1419 u8 reqs = 0;
1420
1421 if (!(md->flags & MMC_BLK_PACKED_CMD))
1422 goto no_packed;
1423
1424 if ((rq_data_dir(cur) == WRITE) &&
1425 mmc_host_packed_wr(card->host))
1426 max_packed_rw = card->ext_csd.max_packed_writes;
1427
1428 if (max_packed_rw == 0)
1429 goto no_packed;
1430
1431 if (mmc_req_rel_wr(cur) &&
1432 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1433 goto no_packed;
1434
1435 if (mmc_large_sector(card) &&
1436 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1437 goto no_packed;
1438
1439 mmc_blk_clear_packed(mqrq);
1440
1441 max_blk_count = min(card->host->max_blk_count,
1442 card->host->max_req_size >> 9);
1443 if (unlikely(max_blk_count > 0xffff))
1444 max_blk_count = 0xffff;
1445
1446 max_phys_segs = queue_max_segments(q);
1447 req_sectors += blk_rq_sectors(cur);
1448 phys_segments += cur->nr_phys_segments;
1449
1450 if (rq_data_dir(cur) == WRITE) {
1451 req_sectors += mmc_large_sector(card) ? 8 : 1;
1452 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1453 }
1454
1455 do {
1456 if (reqs >= max_packed_rw - 1) {
1457 put_back = false;
1458 break;
1459 }
1460
1461 spin_lock_irq(q->queue_lock);
1462 next = blk_fetch_request(q);
1463 spin_unlock_irq(q->queue_lock);
1464 if (!next) {
1465 put_back = false;
1466 break;
1467 }
1468
1469 if (mmc_large_sector(card) &&
1470 !IS_ALIGNED(blk_rq_sectors(next), 8))
1471 break;
1472
1473 if (next->cmd_flags & REQ_DISCARD ||
1474 next->cmd_flags & REQ_FLUSH)
1475 break;
1476
1477 if (rq_data_dir(cur) != rq_data_dir(next))
1478 break;
1479
1480 if (mmc_req_rel_wr(next) &&
1481 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1482 break;
1483
1484 req_sectors += blk_rq_sectors(next);
1485 if (req_sectors > max_blk_count)
1486 break;
1487
1488 phys_segments += next->nr_phys_segments;
1489 if (phys_segments > max_phys_segs)
1490 break;
1491
1492 list_add_tail(&next->queuelist, &mqrq->packed->list);
1493 cur = next;
1494 reqs++;
1495 } while (1);
1496
1497 if (put_back) {
1498 spin_lock_irq(q->queue_lock);
1499 blk_requeue_request(q, next);
1500 spin_unlock_irq(q->queue_lock);
1501 }
1502
1503 if (reqs > 0) {
1504 list_add(&req->queuelist, &mqrq->packed->list);
1505 mqrq->packed->nr_entries = ++reqs;
1506 mqrq->packed->retries = reqs;
1507 return reqs;
1508 }
1509
1510no_packed:
1511 mqrq->cmd_type = MMC_PACKED_NONE;
1512 return 0;
1513}
1514
1515static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1516 struct mmc_card *card,
1517 struct mmc_queue *mq)
1518{
1519 struct mmc_blk_request *brq = &mqrq->brq;
1520 struct request *req = mqrq->req;
1521 struct request *prq;
1522 struct mmc_blk_data *md = mq->data;
1523 struct mmc_packed *packed = mqrq->packed;
1524 bool do_rel_wr, do_data_tag;
1525 u32 *packed_cmd_hdr;
1526 u8 hdr_blocks;
1527 u8 i = 1;
1528
1529 BUG_ON(!packed);
1530
1531 mqrq->cmd_type = MMC_PACKED_WRITE;
1532 packed->blocks = 0;
1533 packed->idx_failure = MMC_PACKED_NR_IDX;
1534
1535 packed_cmd_hdr = packed->cmd_hdr;
1536 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1537 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1538 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1539 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1540
1541 /*
1542 * Argument for each entry of packed group
1543 */
1544 list_for_each_entry(prq, &packed->list, queuelist) {
1545 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1546 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1547 (prq->cmd_flags & REQ_META) &&
1548 (rq_data_dir(prq) == WRITE) &&
1549 ((brq->data.blocks * brq->data.blksz) >=
1550 card->ext_csd.data_tag_unit_size);
1551 /* Argument of CMD23 */
1552 packed_cmd_hdr[(i * 2)] =
1553 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1554 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1555 blk_rq_sectors(prq);
1556 /* Argument of CMD18 or CMD25 */
1557 packed_cmd_hdr[((i * 2)) + 1] =
1558 mmc_card_blockaddr(card) ?
1559 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1560 packed->blocks += blk_rq_sectors(prq);
1561 i++;
1562 }
1563
1564 memset(brq, 0, sizeof(struct mmc_blk_request));
1565 brq->mrq.cmd = &brq->cmd;
1566 brq->mrq.data = &brq->data;
1567 brq->mrq.sbc = &brq->sbc;
1568 brq->mrq.stop = &brq->stop;
1569
1570 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1571 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1572 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1573
1574 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1575 brq->cmd.arg = blk_rq_pos(req);
1576 if (!mmc_card_blockaddr(card))
1577 brq->cmd.arg <<= 9;
1578 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1579
1580 brq->data.blksz = 512;
1581 brq->data.blocks = packed->blocks + hdr_blocks;
1582 brq->data.flags |= MMC_DATA_WRITE;
1583
1584 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1585 brq->stop.arg = 0;
1586 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1587
1588 mmc_set_data_timeout(&brq->data, card);
1589
1590 brq->data.sg = mqrq->sg;
1591 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1592
1593 mqrq->mmc_active.mrq = &brq->mrq;
1594 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1595
1596 mmc_queue_bounce_pre(mqrq);
1597}
1598
Adrian Hunter67716322011-08-29 16:42:15 +03001599static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1600 struct mmc_blk_request *brq, struct request *req,
1601 int ret)
1602{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001603 struct mmc_queue_req *mq_rq;
1604 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1605
Adrian Hunter67716322011-08-29 16:42:15 +03001606 /*
1607 * If this is an SD card and we're writing, we can first
1608 * mark the known good sectors as ok.
1609 *
1610 * If the card is not SD, we can still ok written sectors
1611 * as reported by the controller (which might be less than
1612 * the real number of written sectors, but never more).
1613 */
1614 if (mmc_card_sd(card)) {
1615 u32 blocks;
1616
1617 blocks = mmc_sd_num_wr_blocks(card);
1618 if (blocks != (u32)-1) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301619 ret = blk_end_request(req, 0, blocks << 9);
Adrian Hunter67716322011-08-29 16:42:15 +03001620 }
1621 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001622 if (!mmc_packed_cmd(mq_rq->cmd_type))
1623 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03001624 }
1625 return ret;
1626}
1627
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001628static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1629{
1630 struct request *prq;
1631 struct mmc_packed *packed = mq_rq->packed;
1632 int idx = packed->idx_failure, i = 0;
1633 int ret = 0;
1634
1635 BUG_ON(!packed);
1636
1637 while (!list_empty(&packed->list)) {
1638 prq = list_entry_rq(packed->list.next);
1639 if (idx == i) {
1640 /* retry from error index */
1641 packed->nr_entries -= idx;
1642 mq_rq->req = prq;
1643 ret = 1;
1644
1645 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1646 list_del_init(&prq->queuelist);
1647 mmc_blk_clear_packed(mq_rq);
1648 }
1649 return ret;
1650 }
1651 list_del_init(&prq->queuelist);
1652 blk_end_request(prq, 0, blk_rq_bytes(prq));
1653 i++;
1654 }
1655
1656 mmc_blk_clear_packed(mq_rq);
1657 return ret;
1658}
1659
1660static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1661{
1662 struct request *prq;
1663 struct mmc_packed *packed = mq_rq->packed;
1664
1665 BUG_ON(!packed);
1666
1667 while (!list_empty(&packed->list)) {
1668 prq = list_entry_rq(packed->list.next);
1669 list_del_init(&prq->queuelist);
1670 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1671 }
1672
1673 mmc_blk_clear_packed(mq_rq);
1674}
1675
1676static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1677 struct mmc_queue_req *mq_rq)
1678{
1679 struct request *prq;
1680 struct request_queue *q = mq->queue;
1681 struct mmc_packed *packed = mq_rq->packed;
1682
1683 BUG_ON(!packed);
1684
1685 while (!list_empty(&packed->list)) {
1686 prq = list_entry_rq(packed->list.prev);
1687 if (prq->queuelist.prev != &packed->list) {
1688 list_del_init(&prq->queuelist);
1689 spin_lock_irq(q->queue_lock);
1690 blk_requeue_request(mq->queue, prq);
1691 spin_unlock_irq(q->queue_lock);
1692 } else {
1693 list_del_init(&prq->queuelist);
1694 }
1695 }
1696
1697 mmc_blk_clear_packed(mq_rq);
1698}
1699
Per Forlinee8a43a2011-07-01 18:55:33 +02001700static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02001701{
1702 struct mmc_blk_data *md = mq->data;
1703 struct mmc_card *card = md->queue.card;
1704 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunter67716322011-08-29 16:42:15 +03001705 int ret = 1, disable_multi = 0, retry = 0, type;
Per Forlind78d4a82011-07-01 18:55:30 +02001706 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02001707 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05301708 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02001709 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001710 const u8 packed_nr = 2;
1711 u8 reqs = 0;
Per Forlinee8a43a2011-07-01 18:55:33 +02001712
1713 if (!rqc && !mq->mqrq_prev->req)
1714 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02001715
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001716 if (rqc)
1717 reqs = mmc_blk_prep_packed_list(mq, rqc);
1718
Per Forlin54d49d72011-07-01 18:55:29 +02001719 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02001720 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05301721 /*
1722 * When 4KB native sector is enabled, only 8 blocks
1723 * multiple read or write is allowed
1724 */
1725 if ((brq->data.blocks & 0x07) &&
1726 (card->ext_csd.data_sector_size == 4096)) {
1727 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1728 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001729 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05301730 goto cmd_abort;
1731 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001732
1733 if (reqs >= packed_nr)
1734 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1735 card, mq);
1736 else
1737 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02001738 areq = &mq->mqrq_cur->mmc_active;
1739 } else
1740 areq = NULL;
1741 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001742 if (!areq) {
1743 if (status == MMC_BLK_NEW_REQUEST)
1744 mq->flags |= MMC_QUEUE_NEW_REQUEST;
Per Forlinee8a43a2011-07-01 18:55:33 +02001745 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001746 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02001747
Per Forlinee8a43a2011-07-01 18:55:33 +02001748 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1749 brq = &mq_rq->brq;
1750 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03001751 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02001752 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02001753
Per Forlind78d4a82011-07-01 18:55:30 +02001754 switch (status) {
1755 case MMC_BLK_SUCCESS:
1756 case MMC_BLK_PARTIAL:
1757 /*
1758 * A block was successfully transferred.
1759 */
Adrian Hunter67716322011-08-29 16:42:15 +03001760 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001761
1762 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1763 ret = mmc_blk_end_packed_req(mq_rq);
1764 break;
1765 } else {
1766 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02001767 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001768 }
1769
Adrian Hunter67716322011-08-29 16:42:15 +03001770 /*
1771 * If the blk_end_request function returns non-zero even
1772 * though all data has been transferred and no errors
1773 * were returned by the host controller, it's a bug.
1774 */
Per Forlinee8a43a2011-07-01 18:55:33 +02001775 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301776 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02001777 __func__, blk_rq_bytes(req),
1778 brq->data.bytes_xfered);
1779 rqc = NULL;
1780 goto cmd_abort;
1781 }
Per Forlind78d4a82011-07-01 18:55:30 +02001782 break;
1783 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03001784 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1785 if (!mmc_blk_reset(md, card->host, type))
1786 break;
1787 goto cmd_abort;
Per Forlind78d4a82011-07-01 18:55:30 +02001788 case MMC_BLK_RETRY:
1789 if (retry++ < 5)
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +01001790 break;
Adrian Hunter67716322011-08-29 16:42:15 +03001791 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02001792 case MMC_BLK_ABORT:
Adrian Hunter67716322011-08-29 16:42:15 +03001793 if (!mmc_blk_reset(md, card->host, type))
1794 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001795 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03001796 case MMC_BLK_DATA_ERR: {
1797 int err;
1798
1799 err = mmc_blk_reset(md, card->host, type);
1800 if (!err)
1801 break;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001802 if (err == -ENODEV ||
1803 mmc_packed_cmd(mq_rq->cmd_type))
Adrian Hunter67716322011-08-29 16:42:15 +03001804 goto cmd_abort;
1805 /* Fall through */
1806 }
1807 case MMC_BLK_ECC_ERR:
1808 if (brq->data.blocks > 1) {
1809 /* Redo read one sector at a time */
1810 pr_warning("%s: retrying using single block read\n",
1811 req->rq_disk->disk_name);
1812 disable_multi = 1;
1813 break;
1814 }
Per Forlind78d4a82011-07-01 18:55:30 +02001815 /*
1816 * After an error, we redo I/O one sector at a
1817 * time, so we only reach here after trying to
1818 * read a single sector.
1819 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301820 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02001821 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02001822 if (!ret)
1823 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02001824 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301825 case MMC_BLK_NOMEDIUM:
1826 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001827 default:
1828 pr_err("%s: Unhandled return value (%d)",
1829 req->rq_disk->disk_name, status);
1830 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001831 }
1832
Per Forlinee8a43a2011-07-01 18:55:33 +02001833 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001834 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1835 if (!mq_rq->packed->retries)
1836 goto cmd_abort;
1837 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1838 mmc_start_req(card->host,
1839 &mq_rq->mmc_active, NULL);
1840 } else {
1841
1842 /*
1843 * In case of a incomplete request
1844 * prepare it again and resend.
1845 */
1846 mmc_blk_rw_rq_prep(mq_rq, card,
1847 disable_multi, mq);
1848 mmc_start_req(card->host,
1849 &mq_rq->mmc_active, NULL);
1850 }
Per Forlinee8a43a2011-07-01 18:55:33 +02001851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 } while (ret);
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 return 1;
1855
Russell King - ARM Linuxa01f3cc2011-06-20 20:10:28 +01001856 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001857 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1858 mmc_blk_abort_packed_req(mq_rq);
1859 } else {
1860 if (mmc_card_removed(card))
1861 req->cmd_flags |= REQ_QUIET;
1862 while (ret)
1863 ret = blk_end_request(req, -EIO,
1864 blk_rq_cur_bytes(req));
1865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
Per Forlinee8a43a2011-07-01 18:55:33 +02001867 start_new_req:
1868 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09001869 if (mmc_card_removed(card)) {
1870 rqc->cmd_flags |= REQ_QUIET;
1871 blk_end_request_all(rqc, -EIO);
1872 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001873 /*
1874 * If current request is packed, it needs to put back.
1875 */
1876 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1877 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1878
Seungwon Jeon7a819022013-01-22 19:48:07 +09001879 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1880 mmc_start_req(card->host,
1881 &mq->mqrq_cur->mmc_active, NULL);
1882 }
Per Forlinee8a43a2011-07-01 18:55:33 +02001883 }
1884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 return 0;
1886}
1887
Adrian Hunterbd788c92010-08-11 14:17:47 -07001888static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1889{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001890 int ret;
1891 struct mmc_blk_data *md = mq->data;
1892 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001893 struct mmc_host *host = card->host;
1894 unsigned long flags;
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001895
Per Forlinee8a43a2011-07-01 18:55:33 +02001896 if (req && !mq->mqrq_prev->req)
1897 /* claim host only for the first request */
1898 mmc_claim_host(card->host);
1899
Andrei Warkentin371a6892011-04-11 18:10:25 -05001900 ret = mmc_blk_part_switch(card, md);
1901 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001902 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301903 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001904 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05001905 ret = 0;
1906 goto out;
1907 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001908
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001909 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
Per Forlinee8a43a2011-07-01 18:55:33 +02001910 if (req && req->cmd_flags & REQ_DISCARD) {
1911 /* complete ongoing async transfer before issuing discard */
1912 if (card->host->areq)
1913 mmc_blk_issue_rw_rq(mq, NULL);
Ian Chen3550ccd2012-08-29 15:05:36 +09001914 if (req->cmd_flags & REQ_SECURE &&
1915 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001916 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001917 else
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001918 ret = mmc_blk_issue_discard_rq(mq, req);
Per Forlinee8a43a2011-07-01 18:55:33 +02001919 } else if (req && req->cmd_flags & REQ_FLUSH) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09001920 /* complete ongoing async transfer before issuing flush */
1921 if (card->host->areq)
1922 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001923 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001924 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001925 if (!req && host->areq) {
1926 spin_lock_irqsave(&host->context_info.lock, flags);
1927 host->context_info.is_waiting_last_req = true;
1928 spin_unlock_irqrestore(&host->context_info.lock, flags);
1929 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001930 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001931 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001932
Andrei Warkentin371a6892011-04-11 18:10:25 -05001933out:
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09001934 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
1935 (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
1936 /*
1937 * Release host when there are no more requests
1938 * and after special request(discard, flush) is done.
1939 * In case sepecial request, there is no reentry to
1940 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
1941 */
Per Forlinee8a43a2011-07-01 18:55:33 +02001942 mmc_release_host(card->host);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001943 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001944}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Russell Kinga6f6c962006-01-03 22:38:44 +00001946static inline int mmc_blk_readonly(struct mmc_card *card)
1947{
1948 return mmc_card_readonly(card) ||
1949 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1950}
1951
Andrei Warkentin371a6892011-04-11 18:10:25 -05001952static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1953 struct device *parent,
1954 sector_t size,
1955 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01001956 const char *subname,
1957 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958{
1959 struct mmc_blk_data *md;
1960 int devidx, ret;
1961
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001962 devidx = find_first_zero_bit(dev_use, max_devices);
1963 if (devidx >= max_devices)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 return ERR_PTR(-ENOSPC);
1965 __set_bit(devidx, dev_use);
1966
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07001967 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00001968 if (!md) {
1969 ret = -ENOMEM;
1970 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 }
Russell Kinga6f6c962006-01-03 22:38:44 +00001972
Russell Kinga6f6c962006-01-03 22:38:44 +00001973 /*
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001974 * !subname implies we are creating main mmc_blk_data that will be
1975 * associated with mmc_card with mmc_set_drvdata. Due to device
1976 * partitions, devidx will not coincide with a per-physical card
1977 * index anymore so we keep track of a name index.
1978 */
1979 if (!subname) {
1980 md->name_idx = find_first_zero_bit(name_use, max_devices);
1981 __set_bit(md->name_idx, name_use);
Johan Rudholmadd710e2011-12-02 08:51:06 +01001982 } else
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001983 md->name_idx = ((struct mmc_blk_data *)
1984 dev_to_disk(parent)->private_data)->name_idx;
1985
Johan Rudholmadd710e2011-12-02 08:51:06 +01001986 md->area_type = area_type;
1987
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001988 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00001989 * Set the read-only status based on the supported commands
1990 * and the write protect switch.
1991 */
1992 md->read_only = mmc_blk_readonly(card);
1993
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001994 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00001995 if (md->disk == NULL) {
1996 ret = -ENOMEM;
1997 goto err_kfree;
1998 }
1999
2000 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002001 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00002002 md->usage = 1;
2003
Adrian Hunterd09408a2011-06-23 13:40:28 +03002004 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
Russell Kinga6f6c962006-01-03 22:38:44 +00002005 if (ret)
2006 goto err_putdisk;
2007
Russell Kinga6f6c962006-01-03 22:38:44 +00002008 md->queue.issue_fn = mmc_blk_issue_rq;
2009 md->queue.data = md;
2010
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002011 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002012 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00002013 md->disk->fops = &mmc_bdops;
2014 md->disk->private_data = md;
2015 md->disk->queue = md->queue.queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002016 md->disk->driverfs_dev = parent;
2017 set_disk_ro(md->disk, md->read_only || default_ro);
Loic Pallardy53d8f972012-08-06 17:12:28 +02002018 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2019 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00002020
2021 /*
2022 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2023 *
2024 * - be set for removable media with permanent block devices
2025 * - be unset for removable block devices with permanent media
2026 *
2027 * Since MMC block devices clearly fall under the second
2028 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2029 * should use the block device creation/destruction hotplug
2030 * messages to tell when the card is present.
2031 */
2032
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002033 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2034 "mmcblk%d%s", md->name_idx, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00002035
Saugata Dasa5075eb2012-05-17 16:32:21 +05302036 if (mmc_card_mmc(card))
2037 blk_queue_logical_block_size(md->queue.queue,
2038 card->ext_csd.data_sector_size);
2039 else
2040 blk_queue_logical_block_size(md->queue.queue, 512);
2041
Andrei Warkentin371a6892011-04-11 18:10:25 -05002042 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002043
Andrei Warkentinf0d89972011-05-23 15:06:38 -05002044 if (mmc_host_cmd23(card->host)) {
2045 if (mmc_card_mmc(card) ||
2046 (mmc_card_sd(card) &&
2047 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2048 md->flags |= MMC_BLK_CMD23;
2049 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002050
2051 if (mmc_card_mmc(card) &&
2052 md->flags & MMC_BLK_CMD23 &&
2053 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2054 card->ext_csd.rel_sectors)) {
2055 md->flags |= MMC_BLK_REL_WR;
2056 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2057 }
2058
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002059 if (mmc_card_mmc(card) &&
2060 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2061 (md->flags & MMC_BLK_CMD23) &&
2062 card->ext_csd.packed_event_en) {
2063 if (!mmc_packed_init(&md->queue, card))
2064 md->flags |= MMC_BLK_PACKED_CMD;
2065 }
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00002068
2069 err_putdisk:
2070 put_disk(md->disk);
2071 err_kfree:
2072 kfree(md);
2073 out:
2074 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075}
2076
Andrei Warkentin371a6892011-04-11 18:10:25 -05002077static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2078{
2079 sector_t size;
2080 struct mmc_blk_data *md;
2081
2082 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2083 /*
2084 * The EXT_CSD sector count is in number or 512 byte
2085 * sectors.
2086 */
2087 size = card->ext_csd.sectors;
2088 } else {
2089 /*
2090 * The CSD capacity field is in units of read_blkbits.
2091 * set_capacity takes units of 512 bytes.
2092 */
2093 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2094 }
2095
Johan Rudholmadd710e2011-12-02 08:51:06 +01002096 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2097 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002098 return md;
2099}
2100
2101static int mmc_blk_alloc_part(struct mmc_card *card,
2102 struct mmc_blk_data *md,
2103 unsigned int part_type,
2104 sector_t size,
2105 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002106 const char *subname,
2107 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05002108{
2109 char cap_str[10];
2110 struct mmc_blk_data *part_md;
2111
2112 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002113 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002114 if (IS_ERR(part_md))
2115 return PTR_ERR(part_md);
2116 part_md->part_type = part_type;
2117 list_add(&part_md->part, &md->part);
2118
2119 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2120 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05302121 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05002122 part_md->disk->disk_name, mmc_card_id(card),
2123 mmc_card_name(card), part_md->part_type, cap_str);
2124 return 0;
2125}
2126
Namjae Jeone0c368d2011-10-06 23:41:38 +09002127/* MMC Physical partitions consist of two boot partitions and
2128 * up to four general purpose partitions.
2129 * For each partition enabled in EXT_CSD a block device will be allocatedi
2130 * to provide access to the partition.
2131 */
2132
Andrei Warkentin371a6892011-04-11 18:10:25 -05002133static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2134{
Namjae Jeone0c368d2011-10-06 23:41:38 +09002135 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002136
2137 if (!mmc_card_mmc(card))
2138 return 0;
2139
Namjae Jeone0c368d2011-10-06 23:41:38 +09002140 for (idx = 0; idx < card->nr_parts; idx++) {
2141 if (card->part[idx].size) {
2142 ret = mmc_blk_alloc_part(card, md,
2143 card->part[idx].part_cfg,
2144 card->part[idx].size >> 9,
2145 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002146 card->part[idx].name,
2147 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09002148 if (ret)
2149 return ret;
2150 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002151 }
2152
2153 return ret;
2154}
2155
Andrei Warkentin371a6892011-04-11 18:10:25 -05002156static void mmc_blk_remove_req(struct mmc_blk_data *md)
2157{
Johan Rudholmadd710e2011-12-02 08:51:06 +01002158 struct mmc_card *card;
2159
Andrei Warkentin371a6892011-04-11 18:10:25 -05002160 if (md) {
Johan Rudholmadd710e2011-12-02 08:51:06 +01002161 card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002162 if (md->disk->flags & GENHD_FL_UP) {
2163 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002164 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2165 card->ext_csd.boot_ro_lockable)
2166 device_remove_file(disk_to_dev(md->disk),
2167 &md->power_ro_lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002168
2169 /* Stop new requests from getting into the queue */
2170 del_gendisk(md->disk);
2171 }
2172
2173 /* Then flush out any already in there */
2174 mmc_cleanup_queue(&md->queue);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002175 if (md->flags & MMC_BLK_PACKED_CMD)
2176 mmc_packed_clean(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002177 mmc_blk_put(md);
2178 }
2179}
2180
2181static void mmc_blk_remove_parts(struct mmc_card *card,
2182 struct mmc_blk_data *md)
2183{
2184 struct list_head *pos, *q;
2185 struct mmc_blk_data *part_md;
2186
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002187 __clear_bit(md->name_idx, name_use);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002188 list_for_each_safe(pos, q, &md->part) {
2189 part_md = list_entry(pos, struct mmc_blk_data, part);
2190 list_del(pos);
2191 mmc_blk_remove_req(part_md);
2192 }
2193}
2194
2195static int mmc_add_disk(struct mmc_blk_data *md)
2196{
2197 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01002198 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002199
2200 add_disk(md->disk);
2201 md->force_ro.show = force_ro_show;
2202 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05302203 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002204 md->force_ro.attr.name = "force_ro";
2205 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2206 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2207 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01002208 goto force_ro_fail;
2209
2210 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2211 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04002212 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01002213
2214 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2215 mode = S_IRUGO;
2216 else
2217 mode = S_IRUGO | S_IWUSR;
2218
2219 md->power_ro_lock.show = power_ro_lock_show;
2220 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01002221 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002222 md->power_ro_lock.attr.mode = mode;
2223 md->power_ro_lock.attr.name =
2224 "ro_lock_until_next_power_on";
2225 ret = device_create_file(disk_to_dev(md->disk),
2226 &md->power_ro_lock);
2227 if (ret)
2228 goto power_ro_lock_fail;
2229 }
2230 return ret;
2231
2232power_ro_lock_fail:
2233 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2234force_ro_fail:
2235 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002236
2237 return ret;
2238}
2239
Chris Ballc59d4472011-11-11 22:01:43 -05002240#define CID_MANFID_SANDISK 0x2
2241#define CID_MANFID_TOSHIBA 0x11
2242#define CID_MANFID_MICRON 0x13
Ian Chen3550ccd2012-08-29 15:05:36 +09002243#define CID_MANFID_SAMSUNG 0x15
Chris Ballc59d4472011-11-11 22:01:43 -05002244
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002245static const struct mmc_fixup blk_fixups[] =
2246{
Chris Ballc59d4472011-11-11 22:01:43 -05002247 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2248 MMC_QUIRK_INAND_CMD38),
2249 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2250 MMC_QUIRK_INAND_CMD38),
2251 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2252 MMC_QUIRK_INAND_CMD38),
2253 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2254 MMC_QUIRK_INAND_CMD38),
2255 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2256 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002257
2258 /*
2259 * Some MMC cards experience performance degradation with CMD23
2260 * instead of CMD12-bounded multiblock transfers. For now we'll
2261 * black list what's bad...
2262 * - Certain Toshiba cards.
2263 *
2264 * N.B. This doesn't affect SD cards.
2265 */
Chris Ballc59d4472011-11-11 22:01:43 -05002266 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002267 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002268 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002269 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002270 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002271 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002272
2273 /*
2274 * Some Micron MMC cards needs longer data read timeout than
2275 * indicated in CSD.
2276 */
Chris Ballc59d4472011-11-11 22:01:43 -05002277 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002278 MMC_QUIRK_LONG_READ_TIME),
2279
Ian Chen3550ccd2012-08-29 15:05:36 +09002280 /*
2281 * On these Samsung MoviNAND parts, performing secure erase or
2282 * secure trim can result in unrecoverable corruption due to a
2283 * firmware bug.
2284 */
2285 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2286 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2287 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2288 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2289 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2290 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2291 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2292 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2293 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2294 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2295 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2296 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2297 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2298 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2299 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2300 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2301
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002302 END_FIXUP
2303};
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305static int mmc_blk_probe(struct mmc_card *card)
2306{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002307 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002308 char cap_str[10];
2309
Pierre Ossman912490d2005-05-21 10:27:02 +01002310 /*
2311 * Check that the card supports the command class(es) we need.
2312 */
2313 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 return -ENODEV;
2315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 md = mmc_blk_alloc(card);
2317 if (IS_ERR(md))
2318 return PTR_ERR(md);
2319
Yi Li444122f2009-02-05 15:31:57 +08002320 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002321 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05302322 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002324 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Andrei Warkentin371a6892011-04-11 18:10:25 -05002326 if (mmc_blk_alloc_parts(card, md))
2327 goto out;
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 mmc_set_drvdata(card, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002330 mmc_fixup_device(card, blk_fixups);
2331
Andrei Warkentin371a6892011-04-11 18:10:25 -05002332 if (mmc_add_disk(md))
2333 goto out;
2334
2335 list_for_each_entry(part_md, &md->part, part) {
2336 if (mmc_add_disk(part_md))
2337 goto out;
2338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 return 0;
2340
2341 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05002342 mmc_blk_remove_parts(card, md);
2343 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01002344 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345}
2346
2347static void mmc_blk_remove(struct mmc_card *card)
2348{
2349 struct mmc_blk_data *md = mmc_get_drvdata(card);
2350
Andrei Warkentin371a6892011-04-11 18:10:25 -05002351 mmc_blk_remove_parts(card, md);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03002352 mmc_claim_host(card->host);
2353 mmc_blk_part_switch(card, md);
2354 mmc_release_host(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002355 mmc_blk_remove_req(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 mmc_set_drvdata(card, NULL);
2357}
2358
2359#ifdef CONFIG_PM
Chuanxiao Dong32d317c2012-04-11 19:54:38 +08002360static int mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002362 struct mmc_blk_data *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 struct mmc_blk_data *md = mmc_get_drvdata(card);
2364
2365 if (md) {
2366 mmc_queue_suspend(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002367 list_for_each_entry(part_md, &md->part, part) {
2368 mmc_queue_suspend(&part_md->queue);
2369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 }
2371 return 0;
2372}
2373
2374static int mmc_blk_resume(struct mmc_card *card)
2375{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002376 struct mmc_blk_data *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 struct mmc_blk_data *md = mmc_get_drvdata(card);
2378
2379 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05002380 /*
2381 * Resume involves the card going into idle state,
2382 * so current partition is always the main one.
2383 */
2384 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002386 list_for_each_entry(part_md, &md->part, part) {
2387 mmc_queue_resume(&part_md->queue);
2388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 }
2390 return 0;
2391}
2392#else
2393#define mmc_blk_suspend NULL
2394#define mmc_blk_resume NULL
2395#endif
2396
2397static struct mmc_driver mmc_driver = {
2398 .drv = {
2399 .name = "mmcblk",
2400 },
2401 .probe = mmc_blk_probe,
2402 .remove = mmc_blk_remove,
2403 .suspend = mmc_blk_suspend,
2404 .resume = mmc_blk_resume,
2405};
2406
2407static int __init mmc_blk_init(void)
2408{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002409 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002411 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2412 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2413
2414 max_devices = 256 / perdev_minors;
2415
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002416 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2417 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002420 res = mmc_register_driver(&mmc_driver);
2421 if (res)
2422 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002424 return 0;
2425 out2:
2426 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 out:
2428 return res;
2429}
2430
2431static void __exit mmc_blk_exit(void)
2432{
2433 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002434 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435}
2436
2437module_init(mmc_blk_init);
2438module_exit(mmc_blk_exit);
2439
2440MODULE_LICENSE("GPL");
2441MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2442