blob: 0c41ee043e36d8cda0ac43834e77663018b51f1a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020033#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040034#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
Ulf Hanssone94cfef2013-05-02 14:02:38 +020037#include <linux/pm_runtime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
John Calixtocb87ea22011-04-26 18:56:29 -040039#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020041#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010042#include <linux/mmc/mmc.h>
43#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <asm/uaccess.h>
46
Pierre Ossman98ac2162006-12-23 20:03:02 +010047#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000049MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040050#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010054
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050055#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
Trey Ramsay8fee4762012-11-16 09:31:41 -060061#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
Maya Erez775a9362013-04-18 15:41:55 +030062#define MMC_SANITIZE_REQ_TIMEOUT 240000
63#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050064
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090065#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
66 (req->cmd_flags & REQ_META)) && \
67 (rq_data_dir(req) == WRITE))
68#define PACKED_CMD_VER 0x01
69#define PACKED_CMD_WR 0x02
70
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020071static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040072
73/*
74 * The defaults come from config options but can be overriden by module
75 * or bootarg options.
76 */
77static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
78
79/*
80 * We've only got one major, so number of mmcblk devices is
81 * limited to 256 / number of minors per device.
82 */
83static int max_devices;
84
85/* 256 minors, so at most 256 separate devices */
86static DECLARE_BITMAP(dev_use, 256);
Andrei Warkentinf06c9152011-04-21 22:46:13 -050087static DECLARE_BITMAP(name_use, 256);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
90 * There is one mmc_blk_data per slot.
91 */
92struct mmc_blk_data {
93 spinlock_t lock;
94 struct gendisk *disk;
95 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -050096 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Andrei Warkentind0c97cf2011-05-23 15:06:36 -050098 unsigned int flags;
99#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
100#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900101#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000104 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500105 unsigned int part_type;
Andrei Warkentinf06c9152011-04-21 22:46:13 -0500106 unsigned int name_idx;
Adrian Hunter67716322011-08-29 16:42:15 +0300107 unsigned int reset_done;
108#define MMC_BLK_READ BIT(0)
109#define MMC_BLK_WRITE BIT(1)
110#define MMC_BLK_DISCARD BIT(2)
111#define MMC_BLK_SECDISCARD BIT(3)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500112
113 /*
114 * Only set in main mmc_blk_data associated
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200115 * with mmc_card with dev_set_drvdata, and keeps
Andrei Warkentin371a6892011-04-11 18:10:25 -0500116 * track of the current selected device partition.
117 */
118 unsigned int part_curr;
119 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100120 struct device_attribute power_ro_lock;
121 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122};
123
Arjan van de Vena621aae2006-01-12 18:43:35 +0000124static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900126enum {
127 MMC_PACKED_NR_IDX = -1,
128 MMC_PACKED_NR_ZERO,
129 MMC_PACKED_NR_SINGLE,
130};
131
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400132module_param(perdev_minors, int, 0444);
133MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
134
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200135static inline int mmc_blk_part_switch(struct mmc_card *card,
136 struct mmc_blk_data *md);
137static int get_card_status(struct mmc_card *card, u32 *status, int retries);
138
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900139static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
140{
141 struct mmc_packed *packed = mqrq->packed;
142
143 BUG_ON(!packed);
144
145 mqrq->cmd_type = MMC_PACKED_NONE;
146 packed->nr_entries = MMC_PACKED_NR_ZERO;
147 packed->idx_failure = MMC_PACKED_NR_IDX;
148 packed->retries = 0;
149 packed->blocks = 0;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
153{
154 struct mmc_blk_data *md;
155
Arjan van de Vena621aae2006-01-12 18:43:35 +0000156 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 md = disk->private_data;
158 if (md && md->usage == 0)
159 md = NULL;
160 if (md)
161 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000162 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 return md;
165}
166
Andrei Warkentin371a6892011-04-11 18:10:25 -0500167static inline int mmc_get_devidx(struct gendisk *disk)
168{
169 int devmaj = MAJOR(disk_devt(disk));
170 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
171
172 if (!devmaj)
173 devidx = disk->first_minor / perdev_minors;
174 return devidx;
175}
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static void mmc_blk_put(struct mmc_blk_data *md)
178{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000179 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 md->usage--;
181 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500182 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800183 blk_cleanup_queue(md->queue.queue);
184
David Woodhouse1dff3142007-11-21 18:45:12 +0100185 __clear_bit(devidx, dev_use);
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 kfree(md);
189 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000190 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Johan Rudholmadd710e2011-12-02 08:51:06 +0100193static ssize_t power_ro_lock_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 int ret;
197 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
198 struct mmc_card *card = md->queue.card;
199 int locked = 0;
200
201 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
202 locked = 2;
203 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
204 locked = 1;
205
206 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
207
208 return ret;
209}
210
211static ssize_t power_ro_lock_store(struct device *dev,
212 struct device_attribute *attr, const char *buf, size_t count)
213{
214 int ret;
215 struct mmc_blk_data *md, *part_md;
216 struct mmc_card *card;
217 unsigned long set;
218
219 if (kstrtoul(buf, 0, &set))
220 return -EINVAL;
221
222 if (set != 1)
223 return count;
224
225 md = mmc_blk_get(dev_to_disk(dev));
226 card = md->queue.card;
227
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200228 mmc_get_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100229
230 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
231 card->ext_csd.boot_ro_lock |
232 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
233 card->ext_csd.part_time);
234 if (ret)
235 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
236 else
237 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
238
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200239 mmc_put_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100240
241 if (!ret) {
242 pr_info("%s: Locking boot partition ro until next power on\n",
243 md->disk->disk_name);
244 set_disk_ro(md->disk, 1);
245
246 list_for_each_entry(part_md, &md->part, part)
247 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
248 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
249 set_disk_ro(part_md->disk, 1);
250 }
251 }
252
253 mmc_blk_put(md);
254 return count;
255}
256
Andrei Warkentin371a6892011-04-11 18:10:25 -0500257static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
258 char *buf)
259{
260 int ret;
261 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
262
Baruch Siach0031a982014-09-22 10:12:51 +0300263 ret = snprintf(buf, PAGE_SIZE, "%d\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -0500264 get_disk_ro(dev_to_disk(dev)) ^
265 md->read_only);
266 mmc_blk_put(md);
267 return ret;
268}
269
270static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
271 const char *buf, size_t count)
272{
273 int ret;
274 char *end;
275 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
276 unsigned long set = simple_strtoul(buf, &end, 0);
277 if (end == buf) {
278 ret = -EINVAL;
279 goto out;
280 }
281
282 set_disk_ro(dev_to_disk(dev), set || md->read_only);
283 ret = count;
284out:
285 mmc_blk_put(md);
286 return ret;
287}
288
Al Viroa5a15612008-03-02 10:33:30 -0500289static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Al Viroa5a15612008-03-02 10:33:30 -0500291 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 int ret = -ENXIO;
293
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200294 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 if (md) {
296 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500297 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700299
Al Viroa5a15612008-03-02 10:33:30 -0500300 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700301 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700302 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200305 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307 return ret;
308}
309
Al Virodb2a1442013-05-05 21:52:57 -0400310static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
Al Viroa5a15612008-03-02 10:33:30 -0500312 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200314 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200316 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800320mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800322 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
323 geo->heads = 4;
324 geo->sectors = 16;
325 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
John Calixtocb87ea22011-04-26 18:56:29 -0400328struct mmc_blk_ioc_data {
329 struct mmc_ioc_cmd ic;
330 unsigned char *buf;
331 u64 buf_bytes;
332};
333
334static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
335 struct mmc_ioc_cmd __user *user)
336{
337 struct mmc_blk_ioc_data *idata;
338 int err;
339
340 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
341 if (!idata) {
342 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400343 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400344 }
345
346 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
347 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400348 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400349 }
350
351 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
352 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
353 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400354 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400355 }
356
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100357 if (!idata->buf_bytes)
358 return idata;
359
John Calixtocb87ea22011-04-26 18:56:29 -0400360 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
361 if (!idata->buf) {
362 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400363 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400364 }
365
366 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
367 idata->ic.data_ptr, idata->buf_bytes)) {
368 err = -EFAULT;
369 goto copy_err;
370 }
371
372 return idata;
373
374copy_err:
375 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400376idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400377 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400378out:
John Calixtocb87ea22011-04-26 18:56:29 -0400379 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400380}
381
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200382static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
383 u32 retries_max)
384{
385 int err;
386 u32 retry_count = 0;
387
388 if (!status || !retries_max)
389 return -EINVAL;
390
391 do {
392 err = get_card_status(card, status, 5);
393 if (err)
394 break;
395
396 if (!R1_STATUS(*status) &&
397 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
398 break; /* RPMB programming operation complete */
399
400 /*
401 * Rechedule to give the MMC device a chance to continue
402 * processing the previous command without being polled too
403 * frequently.
404 */
405 usleep_range(1000, 5000);
406 } while (++retry_count < retries_max);
407
408 if (retry_count == retries_max)
409 err = -EPERM;
410
411 return err;
412}
413
Maya Erez775a9362013-04-18 15:41:55 +0300414static int ioctl_do_sanitize(struct mmc_card *card)
415{
416 int err;
417
Ulf Hanssona2d10862013-12-16 14:37:26 +0100418 if (!mmc_can_sanitize(card)) {
Maya Erez775a9362013-04-18 15:41:55 +0300419 pr_warn("%s: %s - SANITIZE is not supported\n",
420 mmc_hostname(card->host), __func__);
421 err = -EOPNOTSUPP;
422 goto out;
423 }
424
425 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
426 mmc_hostname(card->host), __func__);
427
428 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
429 EXT_CSD_SANITIZE_START, 1,
430 MMC_SANITIZE_REQ_TIMEOUT);
431
432 if (err)
433 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
434 mmc_hostname(card->host), __func__, err);
435
436 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
437 __func__);
438out:
439 return err;
440}
441
John Calixtocb87ea22011-04-26 18:56:29 -0400442static int mmc_blk_ioctl_cmd(struct block_device *bdev,
443 struct mmc_ioc_cmd __user *ic_ptr)
444{
445 struct mmc_blk_ioc_data *idata;
446 struct mmc_blk_data *md;
447 struct mmc_card *card;
448 struct mmc_command cmd = {0};
449 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530450 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400451 struct scatterlist sg;
452 int err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200453 int is_rpmb = false;
454 u32 status = 0;
John Calixtocb87ea22011-04-26 18:56:29 -0400455
456 /*
457 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
458 * whole block device, not on a partition. This prevents overspray
459 * between sibling partitions.
460 */
461 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
462 return -EPERM;
463
464 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
465 if (IS_ERR(idata))
466 return PTR_ERR(idata);
467
John Calixtocb87ea22011-04-26 18:56:29 -0400468 md = mmc_blk_get(bdev->bd_disk);
469 if (!md) {
470 err = -EINVAL;
Philippe De Swert1c02f002012-04-11 23:31:45 +0300471 goto cmd_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400472 }
473
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200474 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
475 is_rpmb = true;
476
John Calixtocb87ea22011-04-26 18:56:29 -0400477 card = md->queue.card;
478 if (IS_ERR(card)) {
479 err = PTR_ERR(card);
480 goto cmd_done;
481 }
482
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100483 cmd.opcode = idata->ic.opcode;
484 cmd.arg = idata->ic.arg;
485 cmd.flags = idata->ic.flags;
486
487 if (idata->buf_bytes) {
488 data.sg = &sg;
489 data.sg_len = 1;
490 data.blksz = idata->ic.blksz;
491 data.blocks = idata->ic.blocks;
492
493 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
494
495 if (idata->ic.write_flag)
496 data.flags = MMC_DATA_WRITE;
497 else
498 data.flags = MMC_DATA_READ;
499
500 /* data.flags must already be set before doing this. */
501 mmc_set_data_timeout(&data, card);
502
503 /* Allow overriding the timeout_ns for empirical tuning. */
504 if (idata->ic.data_timeout_ns)
505 data.timeout_ns = idata->ic.data_timeout_ns;
506
507 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
508 /*
509 * Pretend this is a data transfer and rely on the
510 * host driver to compute timeout. When all host
511 * drivers support cmd.cmd_timeout for R1B, this
512 * can be changed to:
513 *
514 * mrq.data = NULL;
515 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
516 */
517 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
518 }
519
520 mrq.data = &data;
521 }
522
523 mrq.cmd = &cmd;
524
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200525 mmc_get_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -0400526
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200527 err = mmc_blk_part_switch(card, md);
528 if (err)
529 goto cmd_rel_host;
530
John Calixtocb87ea22011-04-26 18:56:29 -0400531 if (idata->ic.is_acmd) {
532 err = mmc_app_cmd(card->host, card);
533 if (err)
534 goto cmd_rel_host;
535 }
536
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200537 if (is_rpmb) {
538 err = mmc_set_blockcount(card, data.blocks,
539 idata->ic.write_flag & (1 << 31));
540 if (err)
541 goto cmd_rel_host;
542 }
543
Yaniv Gardia82e4842013-06-05 14:13:08 +0300544 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
545 (cmd.opcode == MMC_SWITCH)) {
Maya Erez775a9362013-04-18 15:41:55 +0300546 err = ioctl_do_sanitize(card);
547
548 if (err)
549 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
550 __func__, err);
551
552 goto cmd_rel_host;
553 }
554
John Calixtocb87ea22011-04-26 18:56:29 -0400555 mmc_wait_for_req(card->host, &mrq);
556
557 if (cmd.error) {
558 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
559 __func__, cmd.error);
560 err = cmd.error;
561 goto cmd_rel_host;
562 }
563 if (data.error) {
564 dev_err(mmc_dev(card->host), "%s: data error %d\n",
565 __func__, data.error);
566 err = data.error;
567 goto cmd_rel_host;
568 }
569
570 /*
571 * According to the SD specs, some commands require a delay after
572 * issuing the command.
573 */
574 if (idata->ic.postsleep_min_us)
575 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
576
577 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
578 err = -EFAULT;
579 goto cmd_rel_host;
580 }
581
582 if (!idata->ic.write_flag) {
583 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
584 idata->buf, idata->buf_bytes)) {
585 err = -EFAULT;
586 goto cmd_rel_host;
587 }
588 }
589
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200590 if (is_rpmb) {
591 /*
592 * Ensure RPMB command has completed by polling CMD13
593 * "Send Status".
594 */
595 err = ioctl_rpmb_card_status_poll(card, &status, 5);
596 if (err)
597 dev_err(mmc_dev(card->host),
598 "%s: Card Status=0x%08X, error %d\n",
599 __func__, status, err);
600 }
601
John Calixtocb87ea22011-04-26 18:56:29 -0400602cmd_rel_host:
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200603 mmc_put_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -0400604
605cmd_done:
606 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +0300607cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400608 kfree(idata->buf);
609 kfree(idata);
610 return err;
611}
612
613static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
614 unsigned int cmd, unsigned long arg)
615{
616 int ret = -EINVAL;
617 if (cmd == MMC_IOC_CMD)
618 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
619 return ret;
620}
621
622#ifdef CONFIG_COMPAT
623static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
624 unsigned int cmd, unsigned long arg)
625{
626 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
627}
628#endif
629
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700630static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -0500631 .open = mmc_blk_open,
632 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800633 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -0400635 .ioctl = mmc_blk_ioctl,
636#ifdef CONFIG_COMPAT
637 .compat_ioctl = mmc_blk_compat_ioctl,
638#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639};
640
Andrei Warkentin371a6892011-04-11 18:10:25 -0500641static inline int mmc_blk_part_switch(struct mmc_card *card,
642 struct mmc_blk_data *md)
643{
644 int ret;
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200645 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300646
Andrei Warkentin371a6892011-04-11 18:10:25 -0500647 if (main_md->part_curr == md->part_type)
648 return 0;
649
650 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300651 u8 part_config = card->ext_csd.part_config;
652
653 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
654 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500655
656 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300657 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -0500658 card->ext_csd.part_time);
659 if (ret)
660 return ret;
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300661
662 card->ext_csd.part_config = part_config;
Adrian Hunter67716322011-08-29 16:42:15 +0300663 }
Andrei Warkentin371a6892011-04-11 18:10:25 -0500664
665 main_md->part_curr = md->part_type;
666 return 0;
667}
668
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700669static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
670{
671 int err;
Ben Dooks051913d2009-06-08 23:33:57 +0100672 u32 result;
673 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700674
Venkatraman Sad5fd972011-08-25 00:30:50 +0530675 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -0400676 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400677 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700678
679 struct scatterlist sg;
680
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700681 cmd.opcode = MMC_APP_CMD;
682 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -0700683 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700684
685 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -0700686 if (err)
687 return (u32)-1;
688 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700689 return (u32)-1;
690
691 memset(&cmd, 0, sizeof(struct mmc_command));
692
693 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
694 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700695 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700696
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700697 data.blksz = 4;
698 data.blocks = 1;
699 data.flags = MMC_DATA_READ;
700 data.sg = &sg;
701 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +0530702 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700703
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700704 mrq.cmd = &cmd;
705 mrq.data = &data;
706
Ben Dooks051913d2009-06-08 23:33:57 +0100707 blocks = kmalloc(4, GFP_KERNEL);
708 if (!blocks)
709 return (u32)-1;
710
711 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700712
713 mmc_wait_for_req(card->host, &mrq);
714
Ben Dooks051913d2009-06-08 23:33:57 +0100715 result = ntohl(*blocks);
716 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700717
Ben Dooks051913d2009-06-08 23:33:57 +0100718 if (cmd.error || data.error)
719 result = (u32)-1;
720
721 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700722}
723
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +0100724static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +0300725{
Chris Ball1278dba2011-04-13 23:40:30 -0400726 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +0300727 int err;
728
Adrian Hunter504f1912008-10-16 12:55:25 +0300729 cmd.opcode = MMC_SEND_STATUS;
730 if (!mmc_host_is_spi(card->host))
731 cmd.arg = card->rca << 16;
732 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +0100733 err = mmc_wait_for_cmd(card->host, &cmd, retries);
734 if (err == 0)
735 *status = cmd.resp[0];
736 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +0300737}
738
Ulf Hanssonc49433f2014-01-29 11:01:55 +0100739static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
Ulf Hansson95a91292014-01-29 13:11:27 +0100740 bool hw_busy_detect, struct request *req, int *gen_err)
Ulf Hanssonc49433f2014-01-29 11:01:55 +0100741{
742 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
743 int err = 0;
744 u32 status;
745
746 do {
747 err = get_card_status(card, &status, 5);
748 if (err) {
749 pr_err("%s: error %d requesting status\n",
750 req->rq_disk->disk_name, err);
751 return err;
752 }
753
754 if (status & R1_ERROR) {
755 pr_err("%s: %s: error sending status cmd, status %#x\n",
756 req->rq_disk->disk_name, __func__, status);
757 *gen_err = 1;
758 }
759
Ulf Hansson95a91292014-01-29 13:11:27 +0100760 /* We may rely on the host hw to handle busy detection.*/
761 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
762 hw_busy_detect)
763 break;
764
Ulf Hanssonc49433f2014-01-29 11:01:55 +0100765 /*
766 * Timeout if the device never becomes ready for data and never
767 * leaves the program state.
768 */
769 if (time_after(jiffies, timeout)) {
770 pr_err("%s: Card stuck in programming state! %s %s\n",
771 mmc_hostname(card->host),
772 req->rq_disk->disk_name, __func__);
773 return -ETIMEDOUT;
774 }
775
776 /*
777 * Some cards mishandle the status bits,
778 * so make sure to check both the busy
779 * indication and the card state.
780 */
781 } while (!(status & R1_READY_FOR_DATA) ||
782 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
783
784 return err;
785}
786
Ulf Hanssonbb5cba42014-01-14 21:31:35 +0100787static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
788 struct request *req, int *gen_err, u32 *stop_status)
789{
790 struct mmc_host *host = card->host;
791 struct mmc_command cmd = {0};
792 int err;
793 bool use_r1b_resp = rq_data_dir(req) == WRITE;
794
795 /*
796 * Normally we use R1B responses for WRITE, but in cases where the host
797 * has specified a max_busy_timeout we need to validate it. A failure
798 * means we need to prevent the host from doing hw busy detection, which
799 * is done by converting to a R1 response instead.
800 */
801 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
802 use_r1b_resp = false;
803
804 cmd.opcode = MMC_STOP_TRANSMISSION;
805 if (use_r1b_resp) {
806 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
807 cmd.busy_timeout = timeout_ms;
808 } else {
809 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
810 }
811
812 err = mmc_wait_for_cmd(host, &cmd, 5);
813 if (err)
814 return err;
815
816 *stop_status = cmd.resp[0];
817
818 /* No need to check card status in case of READ. */
819 if (rq_data_dir(req) == READ)
820 return 0;
821
822 if (!mmc_host_is_spi(host) &&
823 (*stop_status & R1_ERROR)) {
824 pr_err("%s: %s: general error sending stop command, resp %#x\n",
825 req->rq_disk->disk_name, __func__, *stop_status);
826 *gen_err = 1;
827 }
828
829 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
830}
831
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530832#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100833#define ERR_RETRY 2
834#define ERR_ABORT 1
835#define ERR_CONTINUE 0
836
837static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
838 bool status_valid, u32 status)
839{
840 switch (error) {
841 case -EILSEQ:
842 /* response crc error, retry the r/w cmd */
843 pr_err("%s: %s sending %s command, card status %#x\n",
844 req->rq_disk->disk_name, "response CRC error",
845 name, status);
846 return ERR_RETRY;
847
848 case -ETIMEDOUT:
849 pr_err("%s: %s sending %s command, card status %#x\n",
850 req->rq_disk->disk_name, "timed out", name, status);
851
852 /* If the status cmd initially failed, retry the r/w cmd */
853 if (!status_valid)
854 return ERR_RETRY;
855
856 /*
857 * If it was a r/w cmd crc error, or illegal command
858 * (eg, issued in wrong state) then retry - we should
859 * have corrected the state problem above.
860 */
861 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
862 return ERR_RETRY;
863
864 /* Otherwise abort the command */
865 return ERR_ABORT;
866
867 default:
868 /* We don't understand the error code the driver gave us */
869 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
870 req->rq_disk->disk_name, error, status);
871 return ERR_ABORT;
872 }
873}
874
875/*
876 * Initial r/w and stop cmd error recovery.
877 * We don't know whether the card received the r/w cmd or not, so try to
878 * restore things back to a sane state. Essentially, we do this as follows:
879 * - Obtain card status. If the first attempt to obtain card status fails,
880 * the status word will reflect the failed status cmd, not the failed
881 * r/w cmd. If we fail to obtain card status, it suggests we can no
882 * longer communicate with the card.
883 * - Check the card state. If the card received the cmd but there was a
884 * transient problem with the response, it might still be in a data transfer
885 * mode. Try to send it a stop command. If this fails, we can't recover.
886 * - If the r/w cmd failed due to a response CRC error, it was probably
887 * transient, so retry the cmd.
888 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
889 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
890 * illegal cmd, retry.
891 * Otherwise we don't understand what happened, so abort.
892 */
893static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +0900894 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100895{
896 bool prev_cmd_status_valid = true;
897 u32 status, stop_status = 0;
898 int err, retry;
899
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530900 if (mmc_card_removed(card))
901 return ERR_NOMEDIUM;
902
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100903 /*
904 * Try to get card status which indicates both the card state
905 * and why there was no response. If the first attempt fails,
906 * we can't be sure the returned status is for the r/w command.
907 */
908 for (retry = 2; retry >= 0; retry--) {
909 err = get_card_status(card, &status, 0);
910 if (!err)
911 break;
912
913 prev_cmd_status_valid = false;
914 pr_err("%s: error %d sending status command, %sing\n",
915 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
916 }
917
918 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530919 if (err) {
920 /* Check if the card is removed */
921 if (mmc_detect_card_removed(card->host))
922 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100923 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530924 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100925
Adrian Hunter67716322011-08-29 16:42:15 +0300926 /* Flag ECC errors */
927 if ((status & R1_CARD_ECC_FAILED) ||
928 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
929 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
930 *ecc_err = 1;
931
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +0900932 /* Flag General errors */
933 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
934 if ((status & R1_ERROR) ||
935 (brq->stop.resp[0] & R1_ERROR)) {
936 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
937 req->rq_disk->disk_name, __func__,
938 brq->stop.resp[0], status);
939 *gen_err = 1;
940 }
941
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100942 /*
943 * Check the current card state. If it is in some data transfer
944 * mode, tell it to stop (and hopefully transition back to TRAN.)
945 */
946 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
947 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
Ulf Hanssonbb5cba42014-01-14 21:31:35 +0100948 err = send_stop(card,
949 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
950 req, gen_err, &stop_status);
951 if (err) {
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100952 pr_err("%s: error %d sending stop command\n",
953 req->rq_disk->disk_name, err);
Ulf Hanssonbb5cba42014-01-14 21:31:35 +0100954 /*
955 * If the stop cmd also timed out, the card is probably
956 * not present, so abort. Other errors are bad news too.
957 */
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100958 return ERR_ABORT;
Ulf Hanssonbb5cba42014-01-14 21:31:35 +0100959 }
960
Adrian Hunter67716322011-08-29 16:42:15 +0300961 if (stop_status & R1_CARD_ECC_FAILED)
962 *ecc_err = 1;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100963 }
964
965 /* Check for set block count errors */
966 if (brq->sbc.error)
967 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
968 prev_cmd_status_valid, status);
969
970 /* Check for r/w command errors */
971 if (brq->cmd.error)
972 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
973 prev_cmd_status_valid, status);
974
Adrian Hunter67716322011-08-29 16:42:15 +0300975 /* Data errors */
976 if (!brq->stop.error)
977 return ERR_CONTINUE;
978
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100979 /* Now for stop errors. These aren't fatal to the transfer. */
Johan Rudholm5e1344e2014-09-17 09:50:42 +0200980 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100981 req->rq_disk->disk_name, brq->stop.error,
982 brq->cmd.resp[0], status);
983
984 /*
985 * Subsitute in our own stop status as this will give the error
986 * state which happened during the execution of the r/w command.
987 */
988 if (stop_status) {
989 brq->stop.resp[0] = stop_status;
990 brq->stop.error = 0;
991 }
992 return ERR_CONTINUE;
993}
994
Adrian Hunter67716322011-08-29 16:42:15 +0300995static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
996 int type)
997{
998 int err;
999
1000 if (md->reset_done & type)
1001 return -EEXIST;
1002
1003 md->reset_done |= type;
1004 err = mmc_hw_reset(host);
1005 /* Ensure we switch back to the correct partition */
1006 if (err != -EOPNOTSUPP) {
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001007 struct mmc_blk_data *main_md =
1008 dev_get_drvdata(&host->card->dev);
Adrian Hunter67716322011-08-29 16:42:15 +03001009 int part_err;
1010
1011 main_md->part_curr = main_md->part_type;
1012 part_err = mmc_blk_part_switch(host->card, md);
1013 if (part_err) {
1014 /*
1015 * We have failed to get back into the correct
1016 * partition, so we need to abort the whole request.
1017 */
1018 return -ENODEV;
1019 }
1020 }
1021 return err;
1022}
1023
1024static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1025{
1026 md->reset_done &= ~type;
1027}
1028
Adrian Hunterbd788c92010-08-11 14:17:47 -07001029static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1030{
1031 struct mmc_blk_data *md = mq->data;
1032 struct mmc_card *card = md->queue.card;
1033 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001034 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001035
Adrian Hunterbd788c92010-08-11 14:17:47 -07001036 if (!mmc_can_erase(card)) {
1037 err = -EOPNOTSUPP;
1038 goto out;
1039 }
1040
1041 from = blk_rq_pos(req);
1042 nr = blk_rq_sectors(req);
1043
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001044 if (mmc_can_discard(card))
1045 arg = MMC_DISCARD_ARG;
1046 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -07001047 arg = MMC_TRIM_ARG;
1048 else
1049 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +03001050retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001051 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1052 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1053 INAND_CMD38_ARG_EXT_CSD,
1054 arg == MMC_TRIM_ARG ?
1055 INAND_CMD38_ARG_TRIM :
1056 INAND_CMD38_ARG_ERASE,
1057 0);
1058 if (err)
1059 goto out;
1060 }
Adrian Hunterbd788c92010-08-11 14:17:47 -07001061 err = mmc_erase(card, from, nr, arg);
1062out:
Adrian Hunter67716322011-08-29 16:42:15 +03001063 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1064 goto retry;
1065 if (!err)
1066 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301067 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -07001068
Adrian Hunterbd788c92010-08-11 14:17:47 -07001069 return err ? 0 : 1;
1070}
1071
Adrian Hunter49804542010-08-11 14:17:50 -07001072static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1073 struct request *req)
1074{
1075 struct mmc_blk_data *md = mq->data;
1076 struct mmc_card *card = md->queue.card;
Maya Erez775a9362013-04-18 15:41:55 +03001077 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001078 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -07001079
Maya Erez775a9362013-04-18 15:41:55 +03001080 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -07001081 err = -EOPNOTSUPP;
1082 goto out;
1083 }
1084
1085 from = blk_rq_pos(req);
1086 nr = blk_rq_sectors(req);
1087
Maya Erez775a9362013-04-18 15:41:55 +03001088 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1089 arg = MMC_SECURE_TRIM1_ARG;
1090 else
1091 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter28302812012-04-05 14:45:48 +03001092
Adrian Hunter67716322011-08-29 16:42:15 +03001093retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001094 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1095 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1096 INAND_CMD38_ARG_EXT_CSD,
1097 arg == MMC_SECURE_TRIM1_ARG ?
1098 INAND_CMD38_ARG_SECTRIM1 :
1099 INAND_CMD38_ARG_SECERASE,
1100 0);
1101 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001102 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001103 }
Adrian Hunter28302812012-04-05 14:45:48 +03001104
Adrian Hunter49804542010-08-11 14:17:50 -07001105 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +03001106 if (err == -EIO)
1107 goto out_retry;
1108 if (err)
1109 goto out;
1110
1111 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001112 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1113 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1114 INAND_CMD38_ARG_EXT_CSD,
1115 INAND_CMD38_ARG_SECTRIM2,
1116 0);
1117 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001118 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001119 }
Adrian Hunter28302812012-04-05 14:45:48 +03001120
Adrian Hunter49804542010-08-11 14:17:50 -07001121 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +03001122 if (err == -EIO)
1123 goto out_retry;
1124 if (err)
1125 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001126 }
Adrian Hunter28302812012-04-05 14:45:48 +03001127
Adrian Hunter28302812012-04-05 14:45:48 +03001128out_retry:
1129 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03001130 goto retry;
1131 if (!err)
1132 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03001133out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301134 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07001135
Adrian Hunter49804542010-08-11 14:17:50 -07001136 return err ? 0 : 1;
1137}
1138
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001139static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1140{
1141 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001142 struct mmc_card *card = md->queue.card;
1143 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001144
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001145 ret = mmc_flush_cache(card);
1146 if (ret)
1147 ret = -EIO;
1148
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301149 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001150
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001151 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001152}
1153
1154/*
1155 * Reformat current write as a reliable write, supporting
1156 * both legacy and the enhanced reliable write MMC cards.
1157 * In each transfer we'll handle only as much as a single
1158 * reliable write can handle, thus finish the request in
1159 * partial completions.
1160 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001161static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1162 struct mmc_card *card,
1163 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001164{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001165 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1166 /* Legacy mode imposes restrictions on transfers. */
1167 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1168 brq->data.blocks = 1;
1169
1170 if (brq->data.blocks > card->ext_csd.rel_sectors)
1171 brq->data.blocks = card->ext_csd.rel_sectors;
1172 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1173 brq->data.blocks = 1;
1174 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001175}
1176
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001177#define CMD_ERRORS \
1178 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1179 R1_ADDRESS_ERROR | /* Misaligned address */ \
1180 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1181 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1182 R1_CC_ERROR | /* Card controller error */ \
1183 R1_ERROR) /* General/unknown error */
1184
Per Forlinee8a43a2011-07-01 18:55:33 +02001185static int mmc_blk_err_check(struct mmc_card *card,
1186 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02001187{
Per Forlinee8a43a2011-07-01 18:55:33 +02001188 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1189 mmc_active);
1190 struct mmc_blk_request *brq = &mq_mrq->brq;
1191 struct request *req = mq_mrq->req;
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001192 int ecc_err = 0, gen_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02001193
1194 /*
1195 * sbc.error indicates a problem with the set block count
1196 * command. No data will have been transferred.
1197 *
1198 * cmd.error indicates a problem with the r/w command. No
1199 * data will have been transferred.
1200 *
1201 * stop.error indicates a problem with the stop command. Data
1202 * may have been transferred, or may still be transferring.
1203 */
Adrian Hunter67716322011-08-29 16:42:15 +03001204 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1205 brq->data.error) {
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001206 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02001207 case ERR_RETRY:
1208 return MMC_BLK_RETRY;
1209 case ERR_ABORT:
1210 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301211 case ERR_NOMEDIUM:
1212 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02001213 case ERR_CONTINUE:
1214 break;
1215 }
1216 }
1217
1218 /*
1219 * Check for errors relating to the execution of the
1220 * initial command - such as address errors. No data
1221 * has been transferred.
1222 */
1223 if (brq->cmd.resp[0] & CMD_ERRORS) {
1224 pr_err("%s: r/w command failed, status = %#x\n",
1225 req->rq_disk->disk_name, brq->cmd.resp[0]);
1226 return MMC_BLK_ABORT;
1227 }
1228
1229 /*
1230 * Everything else is either success, or a data error of some
1231 * kind. If it was a write, we may have transitioned to
1232 * program mode, which we have to wait for it to complete.
1233 */
1234 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001235 int err;
Trey Ramsay8fee4762012-11-16 09:31:41 -06001236
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001237 /* Check stop command response */
1238 if (brq->stop.resp[0] & R1_ERROR) {
1239 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1240 req->rq_disk->disk_name, __func__,
1241 brq->stop.resp[0]);
1242 gen_err = 1;
1243 }
1244
Ulf Hansson95a91292014-01-29 13:11:27 +01001245 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1246 &gen_err);
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001247 if (err)
1248 return MMC_BLK_CMD_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001249 }
1250
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001251 /* if general error occurs, retry the write operation. */
1252 if (gen_err) {
1253 pr_warn("%s: retrying write for general error\n",
1254 req->rq_disk->disk_name);
1255 return MMC_BLK_RETRY;
1256 }
1257
Per Forlind78d4a82011-07-01 18:55:30 +02001258 if (brq->data.error) {
1259 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1260 req->rq_disk->disk_name, brq->data.error,
1261 (unsigned)blk_rq_pos(req),
1262 (unsigned)blk_rq_sectors(req),
1263 brq->cmd.resp[0], brq->stop.resp[0]);
1264
1265 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03001266 if (ecc_err)
1267 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001268 return MMC_BLK_DATA_ERR;
1269 } else {
1270 return MMC_BLK_CMD_ERR;
1271 }
1272 }
1273
Adrian Hunter67716322011-08-29 16:42:15 +03001274 if (!brq->data.bytes_xfered)
1275 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02001276
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001277 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1278 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1279 return MMC_BLK_PARTIAL;
1280 else
1281 return MMC_BLK_SUCCESS;
1282 }
1283
Adrian Hunter67716322011-08-29 16:42:15 +03001284 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1285 return MMC_BLK_PARTIAL;
1286
1287 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02001288}
1289
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001290static int mmc_blk_packed_err_check(struct mmc_card *card,
1291 struct mmc_async_req *areq)
1292{
1293 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1294 mmc_active);
1295 struct request *req = mq_rq->req;
1296 struct mmc_packed *packed = mq_rq->packed;
1297 int err, check, status;
1298 u8 *ext_csd;
1299
1300 BUG_ON(!packed);
1301
1302 packed->retries--;
1303 check = mmc_blk_err_check(card, areq);
1304 err = get_card_status(card, &status, 0);
1305 if (err) {
1306 pr_err("%s: error %d sending status command\n",
1307 req->rq_disk->disk_name, err);
1308 return MMC_BLK_ABORT;
1309 }
1310
1311 if (status & R1_EXCEPTION_EVENT) {
Ulf Hansson86817ff2014-10-17 11:39:05 +02001312 err = mmc_get_ext_csd(card, &ext_csd);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001313 if (err) {
1314 pr_err("%s: error %d sending ext_csd\n",
1315 req->rq_disk->disk_name, err);
Ulf Hansson86817ff2014-10-17 11:39:05 +02001316 return MMC_BLK_ABORT;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001317 }
1318
1319 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1320 EXT_CSD_PACKED_FAILURE) &&
1321 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1322 EXT_CSD_PACKED_GENERIC_ERROR)) {
1323 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1324 EXT_CSD_PACKED_INDEXED_ERROR) {
1325 packed->idx_failure =
1326 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1327 check = MMC_BLK_PARTIAL;
1328 }
1329 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1330 "failure index: %d\n",
1331 req->rq_disk->disk_name, packed->nr_entries,
1332 packed->blocks, packed->idx_failure);
1333 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001334 kfree(ext_csd);
1335 }
1336
1337 return check;
1338}
1339
Per Forlin54d49d72011-07-01 18:55:29 +02001340static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1341 struct mmc_card *card,
1342 int disable_multi,
1343 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
Per Forlin54d49d72011-07-01 18:55:29 +02001345 u32 readcmd, writecmd;
1346 struct mmc_blk_request *brq = &mqrq->brq;
1347 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05301349 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001351 /*
1352 * Reliable writes are used to implement Forced Unit Access and
1353 * REQ_META accesses, and are supported only on MMCs.
Christoph Hellwig65299a32011-08-23 14:50:29 +02001354 *
1355 * XXX: this really needs a good explanation of why REQ_META
1356 * is treated special.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001357 */
1358 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1359 (req->cmd_flags & REQ_META)) &&
1360 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001361 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001362
Per Forlin54d49d72011-07-01 18:55:29 +02001363 memset(brq, 0, sizeof(struct mmc_blk_request));
1364 brq->mrq.cmd = &brq->cmd;
1365 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Per Forlin54d49d72011-07-01 18:55:29 +02001367 brq->cmd.arg = blk_rq_pos(req);
1368 if (!mmc_card_blockaddr(card))
1369 brq->cmd.arg <<= 9;
1370 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1371 brq->data.blksz = 512;
1372 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1373 brq->stop.arg = 0;
Per Forlin54d49d72011-07-01 18:55:29 +02001374 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Per Forlin54d49d72011-07-01 18:55:29 +02001376 /*
1377 * The block layer doesn't support all sector count
1378 * restrictions, so we need to be prepared for too big
1379 * requests.
1380 */
1381 if (brq->data.blocks > card->host->max_blk_count)
1382 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383
Paul Walmsley2bf22b32011-10-06 14:50:33 -06001384 if (brq->data.blocks > 1) {
1385 /*
1386 * After a read error, we redo the request one sector
1387 * at a time in order to accurately determine which
1388 * sectors can be read successfully.
1389 */
1390 if (disable_multi)
1391 brq->data.blocks = 1;
1392
Kuninori Morimoto2e47e842014-09-02 19:08:53 -07001393 /*
1394 * Some controllers have HW issues while operating
1395 * in multiple I/O mode
1396 */
1397 if (card->host->ops->multi_io_quirk)
1398 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1399 (rq_data_dir(req) == READ) ?
1400 MMC_DATA_READ : MMC_DATA_WRITE,
1401 brq->data.blocks);
Paul Walmsley2bf22b32011-10-06 14:50:33 -06001402 }
Per Forlin54d49d72011-07-01 18:55:29 +02001403
1404 if (brq->data.blocks > 1 || do_rel_wr) {
1405 /* SPI multiblock writes terminate using a special
1406 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02001407 */
Per Forlin54d49d72011-07-01 18:55:29 +02001408 if (!mmc_host_is_spi(card->host) ||
1409 rq_data_dir(req) == READ)
1410 brq->mrq.stop = &brq->stop;
1411 readcmd = MMC_READ_MULTIPLE_BLOCK;
1412 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1413 } else {
1414 brq->mrq.stop = NULL;
1415 readcmd = MMC_READ_SINGLE_BLOCK;
1416 writecmd = MMC_WRITE_BLOCK;
1417 }
1418 if (rq_data_dir(req) == READ) {
1419 brq->cmd.opcode = readcmd;
1420 brq->data.flags |= MMC_DATA_READ;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01001421 if (brq->mrq.stop)
1422 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1423 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02001424 } else {
1425 brq->cmd.opcode = writecmd;
1426 brq->data.flags |= MMC_DATA_WRITE;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01001427 if (brq->mrq.stop)
1428 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1429 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02001430 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02001431
Per Forlin54d49d72011-07-01 18:55:29 +02001432 if (do_rel_wr)
1433 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01001434
Per Forlin54d49d72011-07-01 18:55:29 +02001435 /*
Saugata Das42659002011-12-21 13:09:17 +05301436 * Data tag is used only during writing meta data to speed
1437 * up write and any subsequent read of this meta data
1438 */
1439 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1440 (req->cmd_flags & REQ_META) &&
1441 (rq_data_dir(req) == WRITE) &&
1442 ((brq->data.blocks * brq->data.blksz) >=
1443 card->ext_csd.data_tag_unit_size);
1444
1445 /*
Per Forlin54d49d72011-07-01 18:55:29 +02001446 * Pre-defined multi-block transfers are preferable to
1447 * open ended-ones (and necessary for reliable writes).
1448 * However, it is not sufficient to just send CMD23,
1449 * and avoid the final CMD12, as on an error condition
1450 * CMD12 (stop) needs to be sent anyway. This, coupled
1451 * with Auto-CMD23 enhancements provided by some
1452 * hosts, means that the complexity of dealing
1453 * with this is best left to the host. If CMD23 is
1454 * supported by card and host, we'll fill sbc in and let
1455 * the host deal with handling it correctly. This means
1456 * that for hosts that don't expose MMC_CAP_CMD23, no
1457 * change of behavior will be observed.
1458 *
1459 * N.B: Some MMC cards experience perf degradation.
1460 * We'll avoid using CMD23-bounded multiblock writes for
1461 * these, while retaining features like reliable writes.
1462 */
Saugata Das42659002011-12-21 13:09:17 +05301463 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1464 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1465 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02001466 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1467 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05301468 (do_rel_wr ? (1 << 31) : 0) |
1469 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02001470 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1471 brq->mrq.sbc = &brq->sbc;
1472 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001473
Per Forlin54d49d72011-07-01 18:55:29 +02001474 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001475
Per Forlin54d49d72011-07-01 18:55:29 +02001476 brq->data.sg = mqrq->sg;
1477 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001478
Per Forlin54d49d72011-07-01 18:55:29 +02001479 /*
1480 * Adjust the sg list so it is the same size as the
1481 * request.
1482 */
1483 if (brq->data.blocks != blk_rq_sectors(req)) {
1484 int i, data_size = brq->data.blocks << 9;
1485 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02001486
Per Forlin54d49d72011-07-01 18:55:29 +02001487 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1488 data_size -= sg->length;
1489 if (data_size <= 0) {
1490 sg->length += data_size;
1491 i++;
1492 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01001493 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001494 }
Per Forlin54d49d72011-07-01 18:55:29 +02001495 brq->data.sg_len = i;
1496 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001497
Per Forlinee8a43a2011-07-01 18:55:33 +02001498 mqrq->mmc_active.mrq = &brq->mrq;
1499 mqrq->mmc_active.err_check = mmc_blk_err_check;
1500
Per Forlin54d49d72011-07-01 18:55:29 +02001501 mmc_queue_bounce_pre(mqrq);
1502}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001504static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1505 struct mmc_card *card)
1506{
1507 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1508 unsigned int max_seg_sz = queue_max_segment_size(q);
1509 unsigned int len, nr_segs = 0;
1510
1511 do {
1512 len = min(hdr_sz, max_seg_sz);
1513 hdr_sz -= len;
1514 nr_segs++;
1515 } while (hdr_sz);
1516
1517 return nr_segs;
1518}
1519
1520static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1521{
1522 struct request_queue *q = mq->queue;
1523 struct mmc_card *card = mq->card;
1524 struct request *cur = req, *next = NULL;
1525 struct mmc_blk_data *md = mq->data;
1526 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1527 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1528 unsigned int req_sectors = 0, phys_segments = 0;
1529 unsigned int max_blk_count, max_phys_segs;
1530 bool put_back = true;
1531 u8 max_packed_rw = 0;
1532 u8 reqs = 0;
1533
1534 if (!(md->flags & MMC_BLK_PACKED_CMD))
1535 goto no_packed;
1536
1537 if ((rq_data_dir(cur) == WRITE) &&
1538 mmc_host_packed_wr(card->host))
1539 max_packed_rw = card->ext_csd.max_packed_writes;
1540
1541 if (max_packed_rw == 0)
1542 goto no_packed;
1543
1544 if (mmc_req_rel_wr(cur) &&
1545 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1546 goto no_packed;
1547
1548 if (mmc_large_sector(card) &&
1549 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1550 goto no_packed;
1551
1552 mmc_blk_clear_packed(mqrq);
1553
1554 max_blk_count = min(card->host->max_blk_count,
1555 card->host->max_req_size >> 9);
1556 if (unlikely(max_blk_count > 0xffff))
1557 max_blk_count = 0xffff;
1558
1559 max_phys_segs = queue_max_segments(q);
1560 req_sectors += blk_rq_sectors(cur);
1561 phys_segments += cur->nr_phys_segments;
1562
1563 if (rq_data_dir(cur) == WRITE) {
1564 req_sectors += mmc_large_sector(card) ? 8 : 1;
1565 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1566 }
1567
1568 do {
1569 if (reqs >= max_packed_rw - 1) {
1570 put_back = false;
1571 break;
1572 }
1573
1574 spin_lock_irq(q->queue_lock);
1575 next = blk_fetch_request(q);
1576 spin_unlock_irq(q->queue_lock);
1577 if (!next) {
1578 put_back = false;
1579 break;
1580 }
1581
1582 if (mmc_large_sector(card) &&
1583 !IS_ALIGNED(blk_rq_sectors(next), 8))
1584 break;
1585
1586 if (next->cmd_flags & REQ_DISCARD ||
1587 next->cmd_flags & REQ_FLUSH)
1588 break;
1589
1590 if (rq_data_dir(cur) != rq_data_dir(next))
1591 break;
1592
1593 if (mmc_req_rel_wr(next) &&
1594 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1595 break;
1596
1597 req_sectors += blk_rq_sectors(next);
1598 if (req_sectors > max_blk_count)
1599 break;
1600
1601 phys_segments += next->nr_phys_segments;
1602 if (phys_segments > max_phys_segs)
1603 break;
1604
1605 list_add_tail(&next->queuelist, &mqrq->packed->list);
1606 cur = next;
1607 reqs++;
1608 } while (1);
1609
1610 if (put_back) {
1611 spin_lock_irq(q->queue_lock);
1612 blk_requeue_request(q, next);
1613 spin_unlock_irq(q->queue_lock);
1614 }
1615
1616 if (reqs > 0) {
1617 list_add(&req->queuelist, &mqrq->packed->list);
1618 mqrq->packed->nr_entries = ++reqs;
1619 mqrq->packed->retries = reqs;
1620 return reqs;
1621 }
1622
1623no_packed:
1624 mqrq->cmd_type = MMC_PACKED_NONE;
1625 return 0;
1626}
1627
1628static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1629 struct mmc_card *card,
1630 struct mmc_queue *mq)
1631{
1632 struct mmc_blk_request *brq = &mqrq->brq;
1633 struct request *req = mqrq->req;
1634 struct request *prq;
1635 struct mmc_blk_data *md = mq->data;
1636 struct mmc_packed *packed = mqrq->packed;
1637 bool do_rel_wr, do_data_tag;
1638 u32 *packed_cmd_hdr;
1639 u8 hdr_blocks;
1640 u8 i = 1;
1641
1642 BUG_ON(!packed);
1643
1644 mqrq->cmd_type = MMC_PACKED_WRITE;
1645 packed->blocks = 0;
1646 packed->idx_failure = MMC_PACKED_NR_IDX;
1647
1648 packed_cmd_hdr = packed->cmd_hdr;
1649 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1650 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1651 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1652 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1653
1654 /*
1655 * Argument for each entry of packed group
1656 */
1657 list_for_each_entry(prq, &packed->list, queuelist) {
1658 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1659 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1660 (prq->cmd_flags & REQ_META) &&
1661 (rq_data_dir(prq) == WRITE) &&
1662 ((brq->data.blocks * brq->data.blksz) >=
1663 card->ext_csd.data_tag_unit_size);
1664 /* Argument of CMD23 */
1665 packed_cmd_hdr[(i * 2)] =
1666 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1667 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1668 blk_rq_sectors(prq);
1669 /* Argument of CMD18 or CMD25 */
1670 packed_cmd_hdr[((i * 2)) + 1] =
1671 mmc_card_blockaddr(card) ?
1672 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1673 packed->blocks += blk_rq_sectors(prq);
1674 i++;
1675 }
1676
1677 memset(brq, 0, sizeof(struct mmc_blk_request));
1678 brq->mrq.cmd = &brq->cmd;
1679 brq->mrq.data = &brq->data;
1680 brq->mrq.sbc = &brq->sbc;
1681 brq->mrq.stop = &brq->stop;
1682
1683 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1684 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1685 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1686
1687 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1688 brq->cmd.arg = blk_rq_pos(req);
1689 if (!mmc_card_blockaddr(card))
1690 brq->cmd.arg <<= 9;
1691 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1692
1693 brq->data.blksz = 512;
1694 brq->data.blocks = packed->blocks + hdr_blocks;
1695 brq->data.flags |= MMC_DATA_WRITE;
1696
1697 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1698 brq->stop.arg = 0;
1699 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1700
1701 mmc_set_data_timeout(&brq->data, card);
1702
1703 brq->data.sg = mqrq->sg;
1704 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1705
1706 mqrq->mmc_active.mrq = &brq->mrq;
1707 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1708
1709 mmc_queue_bounce_pre(mqrq);
1710}
1711
Adrian Hunter67716322011-08-29 16:42:15 +03001712static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1713 struct mmc_blk_request *brq, struct request *req,
1714 int ret)
1715{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001716 struct mmc_queue_req *mq_rq;
1717 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1718
Adrian Hunter67716322011-08-29 16:42:15 +03001719 /*
1720 * If this is an SD card and we're writing, we can first
1721 * mark the known good sectors as ok.
1722 *
1723 * If the card is not SD, we can still ok written sectors
1724 * as reported by the controller (which might be less than
1725 * the real number of written sectors, but never more).
1726 */
1727 if (mmc_card_sd(card)) {
1728 u32 blocks;
1729
1730 blocks = mmc_sd_num_wr_blocks(card);
1731 if (blocks != (u32)-1) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301732 ret = blk_end_request(req, 0, blocks << 9);
Adrian Hunter67716322011-08-29 16:42:15 +03001733 }
1734 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001735 if (!mmc_packed_cmd(mq_rq->cmd_type))
1736 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03001737 }
1738 return ret;
1739}
1740
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001741static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1742{
1743 struct request *prq;
1744 struct mmc_packed *packed = mq_rq->packed;
1745 int idx = packed->idx_failure, i = 0;
1746 int ret = 0;
1747
1748 BUG_ON(!packed);
1749
1750 while (!list_empty(&packed->list)) {
1751 prq = list_entry_rq(packed->list.next);
1752 if (idx == i) {
1753 /* retry from error index */
1754 packed->nr_entries -= idx;
1755 mq_rq->req = prq;
1756 ret = 1;
1757
1758 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1759 list_del_init(&prq->queuelist);
1760 mmc_blk_clear_packed(mq_rq);
1761 }
1762 return ret;
1763 }
1764 list_del_init(&prq->queuelist);
1765 blk_end_request(prq, 0, blk_rq_bytes(prq));
1766 i++;
1767 }
1768
1769 mmc_blk_clear_packed(mq_rq);
1770 return ret;
1771}
1772
1773static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1774{
1775 struct request *prq;
1776 struct mmc_packed *packed = mq_rq->packed;
1777
1778 BUG_ON(!packed);
1779
1780 while (!list_empty(&packed->list)) {
1781 prq = list_entry_rq(packed->list.next);
1782 list_del_init(&prq->queuelist);
1783 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1784 }
1785
1786 mmc_blk_clear_packed(mq_rq);
1787}
1788
1789static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1790 struct mmc_queue_req *mq_rq)
1791{
1792 struct request *prq;
1793 struct request_queue *q = mq->queue;
1794 struct mmc_packed *packed = mq_rq->packed;
1795
1796 BUG_ON(!packed);
1797
1798 while (!list_empty(&packed->list)) {
1799 prq = list_entry_rq(packed->list.prev);
1800 if (prq->queuelist.prev != &packed->list) {
1801 list_del_init(&prq->queuelist);
1802 spin_lock_irq(q->queue_lock);
1803 blk_requeue_request(mq->queue, prq);
1804 spin_unlock_irq(q->queue_lock);
1805 } else {
1806 list_del_init(&prq->queuelist);
1807 }
1808 }
1809
1810 mmc_blk_clear_packed(mq_rq);
1811}
1812
Per Forlinee8a43a2011-07-01 18:55:33 +02001813static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02001814{
1815 struct mmc_blk_data *md = mq->data;
1816 struct mmc_card *card = md->queue.card;
1817 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunter67716322011-08-29 16:42:15 +03001818 int ret = 1, disable_multi = 0, retry = 0, type;
Per Forlind78d4a82011-07-01 18:55:30 +02001819 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02001820 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05301821 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02001822 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001823 const u8 packed_nr = 2;
1824 u8 reqs = 0;
Per Forlinee8a43a2011-07-01 18:55:33 +02001825
1826 if (!rqc && !mq->mqrq_prev->req)
1827 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02001828
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001829 if (rqc)
1830 reqs = mmc_blk_prep_packed_list(mq, rqc);
1831
Per Forlin54d49d72011-07-01 18:55:29 +02001832 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02001833 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05301834 /*
1835 * When 4KB native sector is enabled, only 8 blocks
1836 * multiple read or write is allowed
1837 */
1838 if ((brq->data.blocks & 0x07) &&
1839 (card->ext_csd.data_sector_size == 4096)) {
1840 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1841 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001842 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05301843 goto cmd_abort;
1844 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001845
1846 if (reqs >= packed_nr)
1847 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1848 card, mq);
1849 else
1850 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02001851 areq = &mq->mqrq_cur->mmc_active;
1852 } else
1853 areq = NULL;
1854 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001855 if (!areq) {
1856 if (status == MMC_BLK_NEW_REQUEST)
1857 mq->flags |= MMC_QUEUE_NEW_REQUEST;
Per Forlinee8a43a2011-07-01 18:55:33 +02001858 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001859 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02001860
Per Forlinee8a43a2011-07-01 18:55:33 +02001861 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1862 brq = &mq_rq->brq;
1863 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03001864 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02001865 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02001866
Per Forlind78d4a82011-07-01 18:55:30 +02001867 switch (status) {
1868 case MMC_BLK_SUCCESS:
1869 case MMC_BLK_PARTIAL:
1870 /*
1871 * A block was successfully transferred.
1872 */
Adrian Hunter67716322011-08-29 16:42:15 +03001873 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001874
1875 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1876 ret = mmc_blk_end_packed_req(mq_rq);
1877 break;
1878 } else {
1879 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02001880 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001881 }
1882
Adrian Hunter67716322011-08-29 16:42:15 +03001883 /*
1884 * If the blk_end_request function returns non-zero even
1885 * though all data has been transferred and no errors
1886 * were returned by the host controller, it's a bug.
1887 */
Per Forlinee8a43a2011-07-01 18:55:33 +02001888 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301889 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02001890 __func__, blk_rq_bytes(req),
1891 brq->data.bytes_xfered);
1892 rqc = NULL;
1893 goto cmd_abort;
1894 }
Per Forlind78d4a82011-07-01 18:55:30 +02001895 break;
1896 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03001897 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1898 if (!mmc_blk_reset(md, card->host, type))
1899 break;
1900 goto cmd_abort;
Per Forlind78d4a82011-07-01 18:55:30 +02001901 case MMC_BLK_RETRY:
1902 if (retry++ < 5)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001903 break;
Adrian Hunter67716322011-08-29 16:42:15 +03001904 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02001905 case MMC_BLK_ABORT:
Adrian Hunter67716322011-08-29 16:42:15 +03001906 if (!mmc_blk_reset(md, card->host, type))
1907 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001908 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03001909 case MMC_BLK_DATA_ERR: {
1910 int err;
1911
1912 err = mmc_blk_reset(md, card->host, type);
1913 if (!err)
1914 break;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001915 if (err == -ENODEV ||
1916 mmc_packed_cmd(mq_rq->cmd_type))
Adrian Hunter67716322011-08-29 16:42:15 +03001917 goto cmd_abort;
1918 /* Fall through */
1919 }
1920 case MMC_BLK_ECC_ERR:
1921 if (brq->data.blocks > 1) {
1922 /* Redo read one sector at a time */
Joe Perches66061102014-09-12 14:56:56 -07001923 pr_warn("%s: retrying using single block read\n",
1924 req->rq_disk->disk_name);
Adrian Hunter67716322011-08-29 16:42:15 +03001925 disable_multi = 1;
1926 break;
1927 }
Per Forlind78d4a82011-07-01 18:55:30 +02001928 /*
1929 * After an error, we redo I/O one sector at a
1930 * time, so we only reach here after trying to
1931 * read a single sector.
1932 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301933 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02001934 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02001935 if (!ret)
1936 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02001937 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301938 case MMC_BLK_NOMEDIUM:
1939 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001940 default:
1941 pr_err("%s: Unhandled return value (%d)",
1942 req->rq_disk->disk_name, status);
1943 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001944 }
1945
Per Forlinee8a43a2011-07-01 18:55:33 +02001946 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001947 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1948 if (!mq_rq->packed->retries)
1949 goto cmd_abort;
1950 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1951 mmc_start_req(card->host,
1952 &mq_rq->mmc_active, NULL);
1953 } else {
1954
1955 /*
1956 * In case of a incomplete request
1957 * prepare it again and resend.
1958 */
1959 mmc_blk_rw_rq_prep(mq_rq, card,
1960 disable_multi, mq);
1961 mmc_start_req(card->host,
1962 &mq_rq->mmc_active, NULL);
1963 }
Per Forlinee8a43a2011-07-01 18:55:33 +02001964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 } while (ret);
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 return 1;
1968
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001969 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001970 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1971 mmc_blk_abort_packed_req(mq_rq);
1972 } else {
1973 if (mmc_card_removed(card))
1974 req->cmd_flags |= REQ_QUIET;
1975 while (ret)
1976 ret = blk_end_request(req, -EIO,
1977 blk_rq_cur_bytes(req));
1978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Per Forlinee8a43a2011-07-01 18:55:33 +02001980 start_new_req:
1981 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09001982 if (mmc_card_removed(card)) {
1983 rqc->cmd_flags |= REQ_QUIET;
1984 blk_end_request_all(rqc, -EIO);
1985 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001986 /*
1987 * If current request is packed, it needs to put back.
1988 */
1989 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1990 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1991
Seungwon Jeon7a819022013-01-22 19:48:07 +09001992 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1993 mmc_start_req(card->host,
1994 &mq->mqrq_cur->mmc_active, NULL);
1995 }
Per Forlinee8a43a2011-07-01 18:55:33 +02001996 }
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 return 0;
1999}
2000
Adrian Hunterbd788c92010-08-11 14:17:47 -07002001static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2002{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002003 int ret;
2004 struct mmc_blk_data *md = mq->data;
2005 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002006 struct mmc_host *host = card->host;
2007 unsigned long flags;
Ray Juif662ae42013-10-26 11:03:44 -07002008 unsigned int cmd_flags = req ? req->cmd_flags : 0;
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002009
Per Forlinee8a43a2011-07-01 18:55:33 +02002010 if (req && !mq->mqrq_prev->req)
2011 /* claim host only for the first request */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002012 mmc_get_card(card);
Per Forlinee8a43a2011-07-01 18:55:33 +02002013
Andrei Warkentin371a6892011-04-11 18:10:25 -05002014 ret = mmc_blk_part_switch(card, md);
2015 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03002016 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302017 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03002018 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002019 ret = 0;
2020 goto out;
2021 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002022
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002023 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
Ray Juif662ae42013-10-26 11:03:44 -07002024 if (cmd_flags & REQ_DISCARD) {
Per Forlinee8a43a2011-07-01 18:55:33 +02002025 /* complete ongoing async transfer before issuing discard */
2026 if (card->host->areq)
2027 mmc_blk_issue_rw_rq(mq, NULL);
Lukas Czerner5204d002014-06-18 13:18:07 +02002028 if (req->cmd_flags & REQ_SECURE)
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002029 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07002030 else
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002031 ret = mmc_blk_issue_discard_rq(mq, req);
Ray Juif662ae42013-10-26 11:03:44 -07002032 } else if (cmd_flags & REQ_FLUSH) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09002033 /* complete ongoing async transfer before issuing flush */
2034 if (card->host->areq)
2035 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002036 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07002037 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002038 if (!req && host->areq) {
2039 spin_lock_irqsave(&host->context_info.lock, flags);
2040 host->context_info.is_waiting_last_req = true;
2041 spin_unlock_irqrestore(&host->context_info.lock, flags);
2042 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002043 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07002044 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002045
Andrei Warkentin371a6892011-04-11 18:10:25 -05002046out:
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09002047 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
Ray Juif662ae42013-10-26 11:03:44 -07002048 (cmd_flags & MMC_REQ_SPECIAL_MASK))
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09002049 /*
2050 * Release host when there are no more requests
2051 * and after special request(discard, flush) is done.
2052 * In case sepecial request, there is no reentry to
2053 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2054 */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002055 mmc_put_card(card);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002056 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07002057}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
Russell Kinga6f6c962006-01-03 22:38:44 +00002059static inline int mmc_blk_readonly(struct mmc_card *card)
2060{
2061 return mmc_card_readonly(card) ||
2062 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2063}
2064
Andrei Warkentin371a6892011-04-11 18:10:25 -05002065static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2066 struct device *parent,
2067 sector_t size,
2068 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002069 const char *subname,
2070 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
2072 struct mmc_blk_data *md;
2073 int devidx, ret;
2074
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002075 devidx = find_first_zero_bit(dev_use, max_devices);
2076 if (devidx >= max_devices)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 return ERR_PTR(-ENOSPC);
2078 __set_bit(devidx, dev_use);
2079
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07002080 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00002081 if (!md) {
2082 ret = -ENOMEM;
2083 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 }
Russell Kinga6f6c962006-01-03 22:38:44 +00002085
Russell Kinga6f6c962006-01-03 22:38:44 +00002086 /*
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002087 * !subname implies we are creating main mmc_blk_data that will be
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002088 * associated with mmc_card with dev_set_drvdata. Due to device
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002089 * partitions, devidx will not coincide with a per-physical card
2090 * index anymore so we keep track of a name index.
2091 */
2092 if (!subname) {
2093 md->name_idx = find_first_zero_bit(name_use, max_devices);
2094 __set_bit(md->name_idx, name_use);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002095 } else
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002096 md->name_idx = ((struct mmc_blk_data *)
2097 dev_to_disk(parent)->private_data)->name_idx;
2098
Johan Rudholmadd710e2011-12-02 08:51:06 +01002099 md->area_type = area_type;
2100
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002101 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00002102 * Set the read-only status based on the supported commands
2103 * and the write protect switch.
2104 */
2105 md->read_only = mmc_blk_readonly(card);
2106
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002107 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00002108 if (md->disk == NULL) {
2109 ret = -ENOMEM;
2110 goto err_kfree;
2111 }
2112
2113 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002114 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00002115 md->usage = 1;
2116
Adrian Hunterd09408a2011-06-23 13:40:28 +03002117 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
Russell Kinga6f6c962006-01-03 22:38:44 +00002118 if (ret)
2119 goto err_putdisk;
2120
Russell Kinga6f6c962006-01-03 22:38:44 +00002121 md->queue.issue_fn = mmc_blk_issue_rq;
2122 md->queue.data = md;
2123
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002124 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002125 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00002126 md->disk->fops = &mmc_bdops;
2127 md->disk->private_data = md;
2128 md->disk->queue = md->queue.queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002129 md->disk->driverfs_dev = parent;
2130 set_disk_ro(md->disk, md->read_only || default_ro);
Ulf Hanssonf5b4d712014-09-03 11:02:23 +02002131 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
Loic Pallardy53d8f972012-08-06 17:12:28 +02002132 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00002133
2134 /*
2135 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2136 *
2137 * - be set for removable media with permanent block devices
2138 * - be unset for removable block devices with permanent media
2139 *
2140 * Since MMC block devices clearly fall under the second
2141 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2142 * should use the block device creation/destruction hotplug
2143 * messages to tell when the card is present.
2144 */
2145
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002146 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2147 "mmcblk%d%s", md->name_idx, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00002148
Saugata Dasa5075eb2012-05-17 16:32:21 +05302149 if (mmc_card_mmc(card))
2150 blk_queue_logical_block_size(md->queue.queue,
2151 card->ext_csd.data_sector_size);
2152 else
2153 blk_queue_logical_block_size(md->queue.queue, 512);
2154
Andrei Warkentin371a6892011-04-11 18:10:25 -05002155 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002156
Andrei Warkentinf0d89972011-05-23 15:06:38 -05002157 if (mmc_host_cmd23(card->host)) {
2158 if (mmc_card_mmc(card) ||
2159 (mmc_card_sd(card) &&
2160 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2161 md->flags |= MMC_BLK_CMD23;
2162 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002163
2164 if (mmc_card_mmc(card) &&
2165 md->flags & MMC_BLK_CMD23 &&
2166 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2167 card->ext_csd.rel_sectors)) {
2168 md->flags |= MMC_BLK_REL_WR;
2169 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2170 }
2171
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002172 if (mmc_card_mmc(card) &&
2173 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2174 (md->flags & MMC_BLK_CMD23) &&
2175 card->ext_csd.packed_event_en) {
2176 if (!mmc_packed_init(&md->queue, card))
2177 md->flags |= MMC_BLK_PACKED_CMD;
2178 }
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00002181
2182 err_putdisk:
2183 put_disk(md->disk);
2184 err_kfree:
2185 kfree(md);
2186 out:
2187 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188}
2189
Andrei Warkentin371a6892011-04-11 18:10:25 -05002190static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2191{
2192 sector_t size;
2193 struct mmc_blk_data *md;
2194
2195 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2196 /*
2197 * The EXT_CSD sector count is in number or 512 byte
2198 * sectors.
2199 */
2200 size = card->ext_csd.sectors;
2201 } else {
2202 /*
2203 * The CSD capacity field is in units of read_blkbits.
2204 * set_capacity takes units of 512 bytes.
2205 */
2206 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2207 }
2208
Johan Rudholmadd710e2011-12-02 08:51:06 +01002209 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2210 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002211 return md;
2212}
2213
2214static int mmc_blk_alloc_part(struct mmc_card *card,
2215 struct mmc_blk_data *md,
2216 unsigned int part_type,
2217 sector_t size,
2218 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002219 const char *subname,
2220 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05002221{
2222 char cap_str[10];
2223 struct mmc_blk_data *part_md;
2224
2225 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002226 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002227 if (IS_ERR(part_md))
2228 return PTR_ERR(part_md);
2229 part_md->part_type = part_type;
2230 list_add(&part_md->part, &md->part);
2231
2232 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2233 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05302234 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05002235 part_md->disk->disk_name, mmc_card_id(card),
2236 mmc_card_name(card), part_md->part_type, cap_str);
2237 return 0;
2238}
2239
Namjae Jeone0c368d2011-10-06 23:41:38 +09002240/* MMC Physical partitions consist of two boot partitions and
2241 * up to four general purpose partitions.
2242 * For each partition enabled in EXT_CSD a block device will be allocatedi
2243 * to provide access to the partition.
2244 */
2245
Andrei Warkentin371a6892011-04-11 18:10:25 -05002246static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2247{
Namjae Jeone0c368d2011-10-06 23:41:38 +09002248 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002249
2250 if (!mmc_card_mmc(card))
2251 return 0;
2252
Namjae Jeone0c368d2011-10-06 23:41:38 +09002253 for (idx = 0; idx < card->nr_parts; idx++) {
2254 if (card->part[idx].size) {
2255 ret = mmc_blk_alloc_part(card, md,
2256 card->part[idx].part_cfg,
2257 card->part[idx].size >> 9,
2258 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002259 card->part[idx].name,
2260 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09002261 if (ret)
2262 return ret;
2263 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002264 }
2265
2266 return ret;
2267}
2268
Andrei Warkentin371a6892011-04-11 18:10:25 -05002269static void mmc_blk_remove_req(struct mmc_blk_data *md)
2270{
Johan Rudholmadd710e2011-12-02 08:51:06 +01002271 struct mmc_card *card;
2272
Andrei Warkentin371a6892011-04-11 18:10:25 -05002273 if (md) {
Paul Taysomfdfa20c2013-06-04 14:42:40 -07002274 /*
2275 * Flush remaining requests and free queues. It
2276 * is freeing the queue that stops new requests
2277 * from being accepted.
2278 */
Franck Jullien8efb83a2013-07-24 15:17:48 +02002279 card = md->queue.card;
Paul Taysomfdfa20c2013-06-04 14:42:40 -07002280 mmc_cleanup_queue(&md->queue);
2281 if (md->flags & MMC_BLK_PACKED_CMD)
2282 mmc_packed_clean(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002283 if (md->disk->flags & GENHD_FL_UP) {
2284 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002285 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2286 card->ext_csd.boot_ro_lockable)
2287 device_remove_file(disk_to_dev(md->disk),
2288 &md->power_ro_lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002289
Andrei Warkentin371a6892011-04-11 18:10:25 -05002290 del_gendisk(md->disk);
2291 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002292 mmc_blk_put(md);
2293 }
2294}
2295
2296static void mmc_blk_remove_parts(struct mmc_card *card,
2297 struct mmc_blk_data *md)
2298{
2299 struct list_head *pos, *q;
2300 struct mmc_blk_data *part_md;
2301
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002302 __clear_bit(md->name_idx, name_use);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002303 list_for_each_safe(pos, q, &md->part) {
2304 part_md = list_entry(pos, struct mmc_blk_data, part);
2305 list_del(pos);
2306 mmc_blk_remove_req(part_md);
2307 }
2308}
2309
2310static int mmc_add_disk(struct mmc_blk_data *md)
2311{
2312 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01002313 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002314
2315 add_disk(md->disk);
2316 md->force_ro.show = force_ro_show;
2317 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05302318 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002319 md->force_ro.attr.name = "force_ro";
2320 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2321 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2322 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01002323 goto force_ro_fail;
2324
2325 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2326 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04002327 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01002328
2329 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2330 mode = S_IRUGO;
2331 else
2332 mode = S_IRUGO | S_IWUSR;
2333
2334 md->power_ro_lock.show = power_ro_lock_show;
2335 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01002336 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002337 md->power_ro_lock.attr.mode = mode;
2338 md->power_ro_lock.attr.name =
2339 "ro_lock_until_next_power_on";
2340 ret = device_create_file(disk_to_dev(md->disk),
2341 &md->power_ro_lock);
2342 if (ret)
2343 goto power_ro_lock_fail;
2344 }
2345 return ret;
2346
2347power_ro_lock_fail:
2348 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2349force_ro_fail:
2350 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002351
2352 return ret;
2353}
2354
Chris Ballc59d4472011-11-11 22:01:43 -05002355#define CID_MANFID_SANDISK 0x2
2356#define CID_MANFID_TOSHIBA 0x11
2357#define CID_MANFID_MICRON 0x13
Ian Chen3550ccd2012-08-29 15:05:36 +09002358#define CID_MANFID_SAMSUNG 0x15
Chris Ballc59d4472011-11-11 22:01:43 -05002359
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002360static const struct mmc_fixup blk_fixups[] =
2361{
Chris Ballc59d4472011-11-11 22:01:43 -05002362 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2363 MMC_QUIRK_INAND_CMD38),
2364 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2365 MMC_QUIRK_INAND_CMD38),
2366 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2367 MMC_QUIRK_INAND_CMD38),
2368 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2369 MMC_QUIRK_INAND_CMD38),
2370 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2371 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002372
2373 /*
2374 * Some MMC cards experience performance degradation with CMD23
2375 * instead of CMD12-bounded multiblock transfers. For now we'll
2376 * black list what's bad...
2377 * - Certain Toshiba cards.
2378 *
2379 * N.B. This doesn't affect SD cards.
2380 */
Chris Ballc59d4472011-11-11 22:01:43 -05002381 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002382 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002383 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002384 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002385 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002386 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002387
2388 /*
2389 * Some Micron MMC cards needs longer data read timeout than
2390 * indicated in CSD.
2391 */
Chris Ballc59d4472011-11-11 22:01:43 -05002392 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002393 MMC_QUIRK_LONG_READ_TIME),
2394
Ian Chen3550ccd2012-08-29 15:05:36 +09002395 /*
2396 * On these Samsung MoviNAND parts, performing secure erase or
2397 * secure trim can result in unrecoverable corruption due to a
2398 * firmware bug.
2399 */
2400 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2401 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2402 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2403 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2404 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2405 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2406 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2407 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2408 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2409 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2410 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2411 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2412 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2413 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2414 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2415 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2416
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002417 END_FIXUP
2418};
2419
Ulf Hansson6685ac62014-10-06 13:51:40 +02002420static int mmc_blk_probe(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421{
Ulf Hansson6685ac62014-10-06 13:51:40 +02002422 struct mmc_card *card = mmc_dev_to_card(dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002423 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002424 char cap_str[10];
2425
Pierre Ossman912490d2005-05-21 10:27:02 +01002426 /*
2427 * Check that the card supports the command class(es) we need.
2428 */
2429 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 return -ENODEV;
2431
Lukas Czerner5204d002014-06-18 13:18:07 +02002432 mmc_fixup_device(card, blk_fixups);
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 md = mmc_blk_alloc(card);
2435 if (IS_ERR(md))
2436 return PTR_ERR(md);
2437
Yi Li444122f2009-02-05 15:31:57 +08002438 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002439 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05302440 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002442 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
Andrei Warkentin371a6892011-04-11 18:10:25 -05002444 if (mmc_blk_alloc_parts(card, md))
2445 goto out;
2446
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002447 dev_set_drvdata(dev, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002448
Andrei Warkentin371a6892011-04-11 18:10:25 -05002449 if (mmc_add_disk(md))
2450 goto out;
2451
2452 list_for_each_entry(part_md, &md->part, part) {
2453 if (mmc_add_disk(part_md))
2454 goto out;
2455 }
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002456
2457 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2458 pm_runtime_use_autosuspend(&card->dev);
2459
2460 /*
2461 * Don't enable runtime PM for SD-combo cards here. Leave that
2462 * decision to be taken during the SDIO init sequence instead.
2463 */
2464 if (card->type != MMC_TYPE_SD_COMBO) {
2465 pm_runtime_set_active(&card->dev);
2466 pm_runtime_enable(&card->dev);
2467 }
2468
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 return 0;
2470
2471 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05002472 mmc_blk_remove_parts(card, md);
2473 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01002474 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475}
2476
Ulf Hansson6685ac62014-10-06 13:51:40 +02002477static int mmc_blk_remove(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478{
Ulf Hansson6685ac62014-10-06 13:51:40 +02002479 struct mmc_card *card = mmc_dev_to_card(dev);
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002480 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Andrei Warkentin371a6892011-04-11 18:10:25 -05002482 mmc_blk_remove_parts(card, md);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002483 pm_runtime_get_sync(&card->dev);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03002484 mmc_claim_host(card->host);
2485 mmc_blk_part_switch(card, md);
2486 mmc_release_host(card->host);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002487 if (card->type != MMC_TYPE_SD_COMBO)
2488 pm_runtime_disable(&card->dev);
2489 pm_runtime_put_noidle(&card->dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002490 mmc_blk_remove_req(md);
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002491 dev_set_drvdata(dev, NULL);
Ulf Hansson6685ac62014-10-06 13:51:40 +02002492
2493 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494}
2495
Ulf Hansson6685ac62014-10-06 13:51:40 +02002496static int _mmc_blk_suspend(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002498 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002499 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
2501 if (md) {
2502 mmc_queue_suspend(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002503 list_for_each_entry(part_md, &md->part, part) {
2504 mmc_queue_suspend(&part_md->queue);
2505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 }
2507 return 0;
2508}
2509
Ulf Hansson6685ac62014-10-06 13:51:40 +02002510static void mmc_blk_shutdown(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02002511{
Ulf Hansson6685ac62014-10-06 13:51:40 +02002512 _mmc_blk_suspend(dev);
Ulf Hansson76287742013-06-10 17:03:40 +02002513}
2514
Ulf Hansson0967edc2014-10-06 11:29:42 +02002515#ifdef CONFIG_PM_SLEEP
2516static int mmc_blk_suspend(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02002517{
Ulf Hansson6685ac62014-10-06 13:51:40 +02002518 return _mmc_blk_suspend(dev);
Ulf Hansson76287742013-06-10 17:03:40 +02002519}
2520
Ulf Hansson0967edc2014-10-06 11:29:42 +02002521static int mmc_blk_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002523 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002524 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05002527 /*
2528 * Resume involves the card going into idle state,
2529 * so current partition is always the main one.
2530 */
2531 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002533 list_for_each_entry(part_md, &md->part, part) {
2534 mmc_queue_resume(&part_md->queue);
2535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 }
2537 return 0;
2538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539#endif
2540
Ulf Hansson0967edc2014-10-06 11:29:42 +02002541static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2542
Ulf Hansson6685ac62014-10-06 13:51:40 +02002543static struct device_driver mmc_driver = {
2544 .name = "mmcblk",
2545 .pm = &mmc_blk_pm_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 .probe = mmc_blk_probe,
2547 .remove = mmc_blk_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02002548 .shutdown = mmc_blk_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549};
2550
2551static int __init mmc_blk_init(void)
2552{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002553 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002555 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2556 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2557
2558 max_devices = 256 / perdev_minors;
2559
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002560 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2561 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002564 res = mmc_register_driver(&mmc_driver);
2565 if (res)
2566 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002568 return 0;
2569 out2:
2570 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 out:
2572 return res;
2573}
2574
2575static void __exit mmc_blk_exit(void)
2576{
2577 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002578 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579}
2580
2581module_init(mmc_blk_init);
2582module_exit(mmc_blk_exit);
2583
2584MODULE_LICENSE("GPL");
2585MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2586