blob: e4935ae46314f158fc1eac4aa2f198c55984da1e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020033#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040034#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
John Calixtocb87ea22011-04-26 18:56:29 -040038#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/mmc/card.h>
Pierre Ossman385e3222006-06-18 14:34:37 +020040#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010041#include <linux/mmc/mmc.h>
42#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <asm/system.h>
45#include <asm/uaccess.h>
46
Pierre Ossman98ac2162006-12-23 20:03:02 +010047#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000049MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040050#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010054
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050055#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
Yaniv Gardi7fc4a6a2012-05-29 21:10:55 +030062#define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
Seungwon Jeon968c7742012-05-31 11:54:47 +030063#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
64 (req->cmd_flags & REQ_META)) && \
65 (rq_data_dir(req) == WRITE))
66#define PACKED_CMD_VER 0x01
67#define PACKED_CMD_WR 0x02
Maya Ereze544d7002012-06-04 06:09:47 +030068#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
69 do { \
70 if (stats->enabled) \
71 stats->pack_stop_reason[reason]++; \
72 } while (0)
Yaniv Gardi7fc4a6a2012-05-29 21:10:55 +030073
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020074static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040075
76/*
77 * The defaults come from config options but can be overriden by module
78 * or bootarg options.
79 */
80static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
81
82/*
83 * We've only got one major, so number of mmcblk devices is
84 * limited to 256 / number of minors per device.
85 */
86static int max_devices;
87
88/* 256 minors, so at most 256 separate devices */
89static DECLARE_BITMAP(dev_use, 256);
Andrei Warkentinf06c9152011-04-21 22:46:13 -050090static DECLARE_BITMAP(name_use, 256);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Linus Torvalds1da177e2005-04-16 15:20:36 -070092/*
93 * There is one mmc_blk_data per slot.
94 */
95struct mmc_blk_data {
96 spinlock_t lock;
97 struct gendisk *disk;
98 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -050099 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500101 unsigned int flags;
102#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
103#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000106 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500107 unsigned int part_type;
Andrei Warkentinf06c9152011-04-21 22:46:13 -0500108 unsigned int name_idx;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300109 unsigned int reset_done;
110#define MMC_BLK_READ BIT(0)
111#define MMC_BLK_WRITE BIT(1)
112#define MMC_BLK_DISCARD BIT(2)
113#define MMC_BLK_SECDISCARD BIT(3)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500114
115 /*
116 * Only set in main mmc_blk_data associated
117 * with mmc_card with mmc_set_drvdata, and keeps
118 * track of the current selected device partition.
119 */
120 unsigned int part_curr;
121 struct device_attribute force_ro;
Maya Erez63c61d62012-05-31 21:00:18 +0300122 struct device_attribute num_wr_reqs_to_start_packing;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123};
124
Arjan van de Vena621aae2006-01-12 18:43:35 +0000125static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Per Forlind737c892011-07-01 18:55:30 +0200127enum mmc_blk_status {
128 MMC_BLK_SUCCESS = 0,
129 MMC_BLK_PARTIAL,
Per Forlind737c892011-07-01 18:55:30 +0200130 MMC_BLK_CMD_ERR,
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300131 MMC_BLK_RETRY,
Per Forlind737c892011-07-01 18:55:30 +0200132 MMC_BLK_ABORT,
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300133 MMC_BLK_DATA_ERR,
134 MMC_BLK_ECC_ERR,
Per Forlind737c892011-07-01 18:55:30 +0200135};
136
Seungwon Jeon968c7742012-05-31 11:54:47 +0300137enum {
138 MMC_PACKED_N_IDX = -1,
139 MMC_PACKED_N_ZERO,
140 MMC_PACKED_N_SINGLE,
141};
142
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400143module_param(perdev_minors, int, 0444);
144MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
145
Seungwon Jeon968c7742012-05-31 11:54:47 +0300146static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
147{
148 mqrq->packed_cmd = MMC_PACKED_NONE;
149 mqrq->packed_num = MMC_PACKED_N_ZERO;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
153{
154 struct mmc_blk_data *md;
155
Arjan van de Vena621aae2006-01-12 18:43:35 +0000156 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 md = disk->private_data;
158 if (md && md->usage == 0)
159 md = NULL;
160 if (md)
161 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000162 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 return md;
165}
166
Andrei Warkentin371a6892011-04-11 18:10:25 -0500167static inline int mmc_get_devidx(struct gendisk *disk)
168{
Colin Crossfa746fa2010-09-03 12:41:21 -0700169 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500170 return devidx;
171}
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173static void mmc_blk_put(struct mmc_blk_data *md)
174{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000175 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 md->usage--;
177 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500178 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800179 blk_cleanup_queue(md->queue.queue);
180
David Woodhouse1dff3142007-11-21 18:45:12 +0100181 __clear_bit(devidx, dev_use);
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 kfree(md);
185 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000186 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Andrei Warkentin371a6892011-04-11 18:10:25 -0500189static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
190 char *buf)
191{
192 int ret;
193 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
194
195 ret = snprintf(buf, PAGE_SIZE, "%d",
196 get_disk_ro(dev_to_disk(dev)) ^
197 md->read_only);
198 mmc_blk_put(md);
199 return ret;
200}
201
202static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
203 const char *buf, size_t count)
204{
205 int ret;
206 char *end;
207 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
208 unsigned long set = simple_strtoul(buf, &end, 0);
209 if (end == buf) {
210 ret = -EINVAL;
211 goto out;
212 }
213
214 set_disk_ro(dev_to_disk(dev), set || md->read_only);
215 ret = count;
216out:
217 mmc_blk_put(md);
218 return ret;
219}
220
Maya Erez63c61d62012-05-31 21:00:18 +0300221static ssize_t
222num_wr_reqs_to_start_packing_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224{
225 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
226 int num_wr_reqs_to_start_packing;
227 int ret;
228
229 num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
230
231 ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
232
233 mmc_blk_put(md);
234 return ret;
235}
236
237static ssize_t
238num_wr_reqs_to_start_packing_store(struct device *dev,
239 struct device_attribute *attr,
240 const char *buf, size_t count)
241{
242 int value;
243 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
244
245 sscanf(buf, "%d", &value);
246 if (value >= 0)
247 md->queue.num_wr_reqs_to_start_packing = value;
248
249 mmc_blk_put(md);
250 return count;
251}
252
Al Viroa5a15612008-03-02 10:33:30 -0500253static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
Al Viroa5a15612008-03-02 10:33:30 -0500255 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 int ret = -ENXIO;
257
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200258 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 if (md) {
260 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500261 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700263
Al Viroa5a15612008-03-02 10:33:30 -0500264 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700265 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700266 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200269 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 return ret;
272}
273
Al Viroa5a15612008-03-02 10:33:30 -0500274static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Al Viroa5a15612008-03-02 10:33:30 -0500276 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200278 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200280 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 return 0;
282}
283
284static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800285mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800287 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
288 geo->heads = 4;
289 geo->sectors = 16;
290 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291}
292
John Calixtocb87ea22011-04-26 18:56:29 -0400293struct mmc_blk_ioc_data {
294 struct mmc_ioc_cmd ic;
295 unsigned char *buf;
296 u64 buf_bytes;
297};
298
299static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
300 struct mmc_ioc_cmd __user *user)
301{
302 struct mmc_blk_ioc_data *idata;
303 int err;
304
305 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
306 if (!idata) {
307 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400308 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400309 }
310
311 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
312 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400313 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400314 }
315
316 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
317 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
318 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400319 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400320 }
321
322 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
323 if (!idata->buf) {
324 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400325 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400326 }
327
328 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
329 idata->ic.data_ptr, idata->buf_bytes)) {
330 err = -EFAULT;
331 goto copy_err;
332 }
333
334 return idata;
335
336copy_err:
337 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400338idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400339 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400340out:
John Calixtocb87ea22011-04-26 18:56:29 -0400341 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400342}
343
344static int mmc_blk_ioctl_cmd(struct block_device *bdev,
345 struct mmc_ioc_cmd __user *ic_ptr)
346{
347 struct mmc_blk_ioc_data *idata;
348 struct mmc_blk_data *md;
349 struct mmc_card *card;
350 struct mmc_command cmd = {0};
351 struct mmc_data data = {0};
Venkatraman Sbb929d52011-08-25 00:30:50 +0530352 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400353 struct scatterlist sg;
354 int err;
355
356 /*
357 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
358 * whole block device, not on a partition. This prevents overspray
359 * between sibling partitions.
360 */
361 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
362 return -EPERM;
363
364 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
365 if (IS_ERR(idata))
366 return PTR_ERR(idata);
367
368 cmd.opcode = idata->ic.opcode;
369 cmd.arg = idata->ic.arg;
370 cmd.flags = idata->ic.flags;
371
372 data.sg = &sg;
373 data.sg_len = 1;
374 data.blksz = idata->ic.blksz;
375 data.blocks = idata->ic.blocks;
376
377 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
378
379 if (idata->ic.write_flag)
380 data.flags = MMC_DATA_WRITE;
381 else
382 data.flags = MMC_DATA_READ;
383
384 mrq.cmd = &cmd;
385 mrq.data = &data;
386
387 md = mmc_blk_get(bdev->bd_disk);
388 if (!md) {
389 err = -EINVAL;
390 goto cmd_done;
391 }
392
393 card = md->queue.card;
394 if (IS_ERR(card)) {
395 err = PTR_ERR(card);
396 goto cmd_done;
397 }
398
399 mmc_claim_host(card->host);
400
401 if (idata->ic.is_acmd) {
402 err = mmc_app_cmd(card->host, card);
403 if (err)
404 goto cmd_rel_host;
405 }
406
407 /* data.flags must already be set before doing this. */
408 mmc_set_data_timeout(&data, card);
409 /* Allow overriding the timeout_ns for empirical tuning. */
410 if (idata->ic.data_timeout_ns)
411 data.timeout_ns = idata->ic.data_timeout_ns;
412
413 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
414 /*
415 * Pretend this is a data transfer and rely on the host driver
416 * to compute timeout. When all host drivers support
417 * cmd.cmd_timeout for R1B, this can be changed to:
418 *
419 * mrq.data = NULL;
420 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
421 */
422 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
423 }
424
425 mmc_wait_for_req(card->host, &mrq);
426
427 if (cmd.error) {
428 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
429 __func__, cmd.error);
430 err = cmd.error;
431 goto cmd_rel_host;
432 }
433 if (data.error) {
434 dev_err(mmc_dev(card->host), "%s: data error %d\n",
435 __func__, data.error);
436 err = data.error;
437 goto cmd_rel_host;
438 }
439
440 /*
441 * According to the SD specs, some commands require a delay after
442 * issuing the command.
443 */
444 if (idata->ic.postsleep_min_us)
445 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
446
447 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
448 err = -EFAULT;
449 goto cmd_rel_host;
450 }
451
452 if (!idata->ic.write_flag) {
453 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
454 idata->buf, idata->buf_bytes)) {
455 err = -EFAULT;
456 goto cmd_rel_host;
457 }
458 }
459
460cmd_rel_host:
461 mmc_release_host(card->host);
462
463cmd_done:
464 mmc_blk_put(md);
465 kfree(idata->buf);
466 kfree(idata);
467 return err;
468}
469
470static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
471 unsigned int cmd, unsigned long arg)
472{
473 int ret = -EINVAL;
474 if (cmd == MMC_IOC_CMD)
475 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
476 return ret;
477}
478
479#ifdef CONFIG_COMPAT
480static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
481 unsigned int cmd, unsigned long arg)
482{
483 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
484}
485#endif
486
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700487static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -0500488 .open = mmc_blk_open,
489 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800490 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -0400492 .ioctl = mmc_blk_ioctl,
493#ifdef CONFIG_COMPAT
494 .compat_ioctl = mmc_blk_compat_ioctl,
495#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496};
497
Andrei Warkentin371a6892011-04-11 18:10:25 -0500498static inline int mmc_blk_part_switch(struct mmc_card *card,
499 struct mmc_blk_data *md)
500{
501 int ret;
502 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
Adrian Hunter44956712011-09-23 12:48:20 +0300503
Andrei Warkentin371a6892011-04-11 18:10:25 -0500504 if (main_md->part_curr == md->part_type)
505 return 0;
506
507 if (mmc_card_mmc(card)) {
Adrian Hunter44956712011-09-23 12:48:20 +0300508 u8 part_config = card->ext_csd.part_config;
509
510 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
511 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500512
513 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter44956712011-09-23 12:48:20 +0300514 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -0500515 card->ext_csd.part_time);
516 if (ret)
517 return ret;
Adrian Hunter44956712011-09-23 12:48:20 +0300518
519 card->ext_csd.part_config = part_config;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300520 }
Andrei Warkentin371a6892011-04-11 18:10:25 -0500521
522 main_md->part_curr = md->part_type;
523 return 0;
524}
525
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700526static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
527{
528 int err;
Ben Dooks051913d2009-06-08 23:33:57 +0100529 u32 result;
530 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700531
Venkatraman Sbb929d52011-08-25 00:30:50 +0530532 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -0400533 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400534 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700535 unsigned int timeout_us;
536
537 struct scatterlist sg;
538
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700539 cmd.opcode = MMC_APP_CMD;
540 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -0700541 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700542
543 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -0700544 if (err)
545 return (u32)-1;
546 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700547 return (u32)-1;
548
549 memset(&cmd, 0, sizeof(struct mmc_command));
550
551 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
552 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700553 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700554
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700555 data.timeout_ns = card->csd.tacc_ns * 100;
556 data.timeout_clks = card->csd.tacc_clks * 100;
557
558 timeout_us = data.timeout_ns / 1000;
559 timeout_us += data.timeout_clks * 1000 /
560 (card->host->ios.clock / 1000);
561
562 if (timeout_us > 100000) {
563 data.timeout_ns = 100000000;
564 data.timeout_clks = 0;
565 }
566
567 data.blksz = 4;
568 data.blocks = 1;
569 data.flags = MMC_DATA_READ;
570 data.sg = &sg;
571 data.sg_len = 1;
572
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700573 mrq.cmd = &cmd;
574 mrq.data = &data;
575
Ben Dooks051913d2009-06-08 23:33:57 +0100576 blocks = kmalloc(4, GFP_KERNEL);
577 if (!blocks)
578 return (u32)-1;
579
580 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700581
582 mmc_wait_for_req(card->host, &mrq);
583
Ben Dooks051913d2009-06-08 23:33:57 +0100584 result = ntohl(*blocks);
585 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700586
Ben Dooks051913d2009-06-08 23:33:57 +0100587 if (cmd.error || data.error)
588 result = (u32)-1;
589
590 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700591}
592
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100593static int send_stop(struct mmc_card *card, u32 *status)
594{
595 struct mmc_command cmd = {0};
596 int err;
597
598 cmd.opcode = MMC_STOP_TRANSMISSION;
599 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
600 err = mmc_wait_for_cmd(card->host, &cmd, 5);
601 if (err == 0)
602 *status = cmd.resp[0];
603 return err;
604}
605
Russell King - ARM Linux6be918e2011-06-20 20:10:08 +0100606static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +0300607{
Chris Ball1278dba2011-04-13 23:40:30 -0400608 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +0300609 int err;
610
Adrian Hunter504f1912008-10-16 12:55:25 +0300611 cmd.opcode = MMC_SEND_STATUS;
612 if (!mmc_host_is_spi(card->host))
613 cmd.arg = card->rca << 16;
614 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux6be918e2011-06-20 20:10:08 +0100615 err = mmc_wait_for_cmd(card->host, &cmd, retries);
616 if (err == 0)
617 *status = cmd.resp[0];
618 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +0300619}
620
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530621#define ERR_NOMEDIUM 3
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100622#define ERR_RETRY 2
623#define ERR_ABORT 1
624#define ERR_CONTINUE 0
625
626static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
627 bool status_valid, u32 status)
628{
629 switch (error) {
630 case -EILSEQ:
631 /* response crc error, retry the r/w cmd */
632 pr_err("%s: %s sending %s command, card status %#x\n",
633 req->rq_disk->disk_name, "response CRC error",
634 name, status);
635 return ERR_RETRY;
636
637 case -ETIMEDOUT:
638 pr_err("%s: %s sending %s command, card status %#x\n",
639 req->rq_disk->disk_name, "timed out", name, status);
640
641 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallf4104732011-10-25 18:16:58 -0700642 if (!status_valid) {
643 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100644 return ERR_RETRY;
Ken Sumrallf4104732011-10-25 18:16:58 -0700645 }
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100646 /*
647 * If it was a r/w cmd crc error, or illegal command
648 * (eg, issued in wrong state) then retry - we should
649 * have corrected the state problem above.
650 */
Ken Sumrallf4104732011-10-25 18:16:58 -0700651 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
652 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100653 return ERR_RETRY;
Ken Sumrallf4104732011-10-25 18:16:58 -0700654 }
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100655
656 /* Otherwise abort the command */
Ken Sumrallf4104732011-10-25 18:16:58 -0700657 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100658 return ERR_ABORT;
659
660 default:
661 /* We don't understand the error code the driver gave us */
662 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
663 req->rq_disk->disk_name, error, status);
664 return ERR_ABORT;
665 }
666}
667
668/*
669 * Initial r/w and stop cmd error recovery.
670 * We don't know whether the card received the r/w cmd or not, so try to
671 * restore things back to a sane state. Essentially, we do this as follows:
672 * - Obtain card status. If the first attempt to obtain card status fails,
673 * the status word will reflect the failed status cmd, not the failed
674 * r/w cmd. If we fail to obtain card status, it suggests we can no
675 * longer communicate with the card.
676 * - Check the card state. If the card received the cmd but there was a
677 * transient problem with the response, it might still be in a data transfer
678 * mode. Try to send it a stop command. If this fails, we can't recover.
679 * - If the r/w cmd failed due to a response CRC error, it was probably
680 * transient, so retry the cmd.
681 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
682 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
683 * illegal cmd, retry.
684 * Otherwise we don't understand what happened, so abort.
685 */
686static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300687 struct mmc_blk_request *brq, int *ecc_err)
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100688{
689 bool prev_cmd_status_valid = true;
690 u32 status, stop_status = 0;
691 int err, retry;
692
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530693 if (mmc_card_removed(card))
694 return ERR_NOMEDIUM;
695
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100696 /*
697 * Try to get card status which indicates both the card state
698 * and why there was no response. If the first attempt fails,
699 * we can't be sure the returned status is for the r/w command.
700 */
701 for (retry = 2; retry >= 0; retry--) {
702 err = get_card_status(card, &status, 0);
703 if (!err)
704 break;
705
706 prev_cmd_status_valid = false;
707 pr_err("%s: error %d sending status command, %sing\n",
708 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
709 }
710
711 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530712 if (err) {
713 /* Check if the card is removed */
714 if (mmc_detect_card_removed(card->host))
715 return ERR_NOMEDIUM;
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100716 return ERR_ABORT;
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +0530717 }
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100718
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300719 /* Flag ECC errors */
720 if ((status & R1_CARD_ECC_FAILED) ||
721 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
722 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
723 *ecc_err = 1;
724
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100725 /*
726 * Check the current card state. If it is in some data transfer
727 * mode, tell it to stop (and hopefully transition back to TRAN.)
728 */
729 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
730 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
731 err = send_stop(card, &stop_status);
732 if (err)
733 pr_err("%s: error %d sending stop command\n",
734 req->rq_disk->disk_name, err);
735
736 /*
737 * If the stop cmd also timed out, the card is probably
738 * not present, so abort. Other errors are bad news too.
739 */
740 if (err)
741 return ERR_ABORT;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300742 if (stop_status & R1_CARD_ECC_FAILED)
743 *ecc_err = 1;
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100744 }
745
746 /* Check for set block count errors */
747 if (brq->sbc.error)
748 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
749 prev_cmd_status_valid, status);
750
751 /* Check for r/w command errors */
752 if (brq->cmd.error)
753 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
754 prev_cmd_status_valid, status);
755
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300756 /* Data errors */
757 if (!brq->stop.error)
758 return ERR_CONTINUE;
759
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +0100760 /* Now for stop errors. These aren't fatal to the transfer. */
761 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
762 req->rq_disk->disk_name, brq->stop.error,
763 brq->cmd.resp[0], status);
764
765 /*
766 * Subsitute in our own stop status as this will give the error
767 * state which happened during the execution of the r/w command.
768 */
769 if (stop_status) {
770 brq->stop.resp[0] = stop_status;
771 brq->stop.error = 0;
772 }
773 return ERR_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}
775
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300776static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
777 int type)
778{
779 int err;
780
781 if (md->reset_done & type)
782 return -EEXIST;
783
784 md->reset_done |= type;
785 err = mmc_hw_reset(host);
786 /* Ensure we switch back to the correct partition */
787 if (err != -EOPNOTSUPP) {
788 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
789 int part_err;
790
791 main_md->part_curr = main_md->part_type;
792 part_err = mmc_blk_part_switch(host->card, md);
793 if (part_err) {
794 /*
795 * We have failed to get back into the correct
796 * partition, so we need to abort the whole request.
797 */
798 return -ENODEV;
799 }
800 }
801 return err;
802}
803
804static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
805{
806 md->reset_done &= ~type;
807}
808
Adrian Hunterbd788c92010-08-11 14:17:47 -0700809static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
810{
811 struct mmc_blk_data *md = mq->data;
812 struct mmc_card *card = md->queue.card;
813 unsigned int from, nr, arg;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300814 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -0700815
Adrian Hunterbd788c92010-08-11 14:17:47 -0700816 if (!mmc_can_erase(card)) {
817 err = -EOPNOTSUPP;
818 goto out;
819 }
820
821 from = blk_rq_pos(req);
822 nr = blk_rq_sectors(req);
823
Kyungmin Parka3777a72011-10-18 09:34:04 +0900824 if (mmc_can_discard(card))
825 arg = MMC_DISCARD_ARG;
826 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -0700827 arg = MMC_TRIM_ARG;
828 else
829 arg = MMC_ERASE_ARG;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300830retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500831 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
832 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
833 INAND_CMD38_ARG_EXT_CSD,
834 arg == MMC_TRIM_ARG ?
835 INAND_CMD38_ARG_TRIM :
836 INAND_CMD38_ARG_ERASE,
837 0);
838 if (err)
839 goto out;
840 }
Adrian Hunterbd788c92010-08-11 14:17:47 -0700841 err = mmc_erase(card, from, nr, arg);
842out:
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300843 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
844 goto retry;
845 if (!err)
846 mmc_blk_reset_success(md, type);
Adrian Hunterbd788c92010-08-11 14:17:47 -0700847 spin_lock_irq(&md->lock);
848 __blk_end_request(req, err, blk_rq_bytes(req));
849 spin_unlock_irq(&md->lock);
850
Adrian Hunterbd788c92010-08-11 14:17:47 -0700851 return err ? 0 : 1;
852}
853
Adrian Hunter49804542010-08-11 14:17:50 -0700854static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
855 struct request *req)
856{
857 struct mmc_blk_data *md = mq->data;
858 struct mmc_card *card = md->queue.card;
859 unsigned int from, nr, arg;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300860 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -0700861
Maya Erez463bb952012-05-24 23:46:29 +0300862 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -0700863 err = -EOPNOTSUPP;
864 goto out;
865 }
866
867 from = blk_rq_pos(req);
868 nr = blk_rq_sectors(req);
869
870 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
871 arg = MMC_SECURE_TRIM1_ARG;
872 else
873 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300874retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500875 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
876 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
877 INAND_CMD38_ARG_EXT_CSD,
878 arg == MMC_SECURE_TRIM1_ARG ?
879 INAND_CMD38_ARG_SECTRIM1 :
880 INAND_CMD38_ARG_SECERASE,
881 0);
882 if (err)
883 goto out;
884 }
Adrian Hunter49804542010-08-11 14:17:50 -0700885 err = mmc_erase(card, from, nr, arg);
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500886 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
887 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
888 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
889 INAND_CMD38_ARG_EXT_CSD,
890 INAND_CMD38_ARG_SECTRIM2,
891 0);
892 if (err)
893 goto out;
894 }
Adrian Hunter49804542010-08-11 14:17:50 -0700895 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -0500896 }
Adrian Hunter49804542010-08-11 14:17:50 -0700897out:
Adrian Hunter9ac56f32011-08-29 16:42:15 +0300898 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
899 goto retry;
900 if (!err)
901 mmc_blk_reset_success(md, type);
Adrian Hunter49804542010-08-11 14:17:50 -0700902 spin_lock_irq(&md->lock);
903 __blk_end_request(req, err, blk_rq_bytes(req));
904 spin_unlock_irq(&md->lock);
905
Adrian Hunter49804542010-08-11 14:17:50 -0700906 return err ? 0 : 1;
907}
908
Maya Erez463bb952012-05-24 23:46:29 +0300909static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq,
910 struct request *req)
911{
912 struct mmc_blk_data *md = mq->data;
913 struct mmc_card *card = md->queue.card;
914 int err = 0;
915
916 BUG_ON(!card);
917 BUG_ON(!card->host);
918
919 if (!(mmc_can_sanitize(card) &&
920 (card->host->caps2 & MMC_CAP2_SANITIZE))) {
921 pr_warning("%s: %s - SANITIZE is not supported\n",
922 mmc_hostname(card->host), __func__);
923 err = -EOPNOTSUPP;
924 goto out;
925 }
926
927 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
928 mmc_hostname(card->host), __func__);
929
930 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Yaniv Gardi7fc4a6a2012-05-29 21:10:55 +0300931 EXT_CSD_SANITIZE_START, 1,
932 MMC_SANITIZE_REQ_TIMEOUT);
Maya Erez463bb952012-05-24 23:46:29 +0300933
934 if (err)
935 pr_err("%s: %s - mmc_switch() with "
936 "EXT_CSD_SANITIZE_START failed. err=%d\n",
937 mmc_hostname(card->host), __func__, err);
938
939 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
940 __func__);
941
942out:
943 spin_lock_irq(&md->lock);
944 __blk_end_request(req, err, blk_rq_bytes(req));
945 spin_unlock_irq(&md->lock);
946
947 return err ? 0 : 1;
948}
949
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500950static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
951{
952 struct mmc_blk_data *md = mq->data;
Seungwon Jeonf8764902011-10-14 14:03:21 +0900953 struct mmc_card *card = md->queue.card;
954 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500955
Seungwon Jeonf8764902011-10-14 14:03:21 +0900956 ret = mmc_flush_cache(card);
957 if (ret)
958 ret = -EIO;
959
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500960 spin_lock_irq(&md->lock);
Seungwon Jeonf8764902011-10-14 14:03:21 +0900961 __blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500962 spin_unlock_irq(&md->lock);
963
Seungwon Jeonf8764902011-10-14 14:03:21 +0900964 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500965}
966
967/*
968 * Reformat current write as a reliable write, supporting
969 * both legacy and the enhanced reliable write MMC cards.
970 * In each transfer we'll handle only as much as a single
971 * reliable write can handle, thus finish the request in
972 * partial completions.
973 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500974static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
975 struct mmc_card *card,
976 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500977{
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500978 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
979 /* Legacy mode imposes restrictions on transfers. */
980 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
981 brq->data.blocks = 1;
982
983 if (brq->data.blocks > card->ext_csd.rel_sectors)
984 brq->data.blocks = card->ext_csd.rel_sectors;
985 else if (brq->data.blocks < card->ext_csd.rel_sectors)
986 brq->data.blocks = 1;
987 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500988}
989
Russell King - ARM Linux20803902011-06-20 20:10:49 +0100990#define CMD_ERRORS \
991 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
992 R1_ADDRESS_ERROR | /* Misaligned address */ \
993 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
994 R1_WP_VIOLATION | /* Tried to write to protected block */ \
995 R1_CC_ERROR | /* Card controller error */ \
996 R1_ERROR) /* General/unknown error */
997
Per Forlin91fd00b2011-07-01 18:55:33 +0200998static int mmc_blk_err_check(struct mmc_card *card,
999 struct mmc_async_req *areq)
Per Forlind737c892011-07-01 18:55:30 +02001000{
Per Forlin91fd00b2011-07-01 18:55:33 +02001001 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1002 mmc_active);
1003 struct mmc_blk_request *brq = &mq_mrq->brq;
1004 struct request *req = mq_mrq->req;
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001005 int ecc_err = 0;
Per Forlind737c892011-07-01 18:55:30 +02001006
1007 /*
1008 * sbc.error indicates a problem with the set block count
1009 * command. No data will have been transferred.
1010 *
1011 * cmd.error indicates a problem with the r/w command. No
1012 * data will have been transferred.
1013 *
1014 * stop.error indicates a problem with the stop command. Data
1015 * may have been transferred, or may still be transferring.
1016 */
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001017 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1018 brq->data.error) {
1019 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
Per Forlind737c892011-07-01 18:55:30 +02001020 case ERR_RETRY:
1021 return MMC_BLK_RETRY;
1022 case ERR_ABORT:
1023 case ERR_NOMEDIUM:
1024 return MMC_BLK_ABORT;
1025 case ERR_CONTINUE:
1026 break;
1027 }
1028 }
1029
1030 /*
1031 * Check for errors relating to the execution of the
1032 * initial command - such as address errors. No data
1033 * has been transferred.
1034 */
1035 if (brq->cmd.resp[0] & CMD_ERRORS) {
1036 pr_err("%s: r/w command failed, status = %#x\n",
1037 req->rq_disk->disk_name, brq->cmd.resp[0]);
1038 return MMC_BLK_ABORT;
1039 }
1040
1041 /*
1042 * Everything else is either success, or a data error of some
1043 * kind. If it was a write, we may have transitioned to
1044 * program mode, which we have to wait for it to complete.
1045 */
1046 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1047 u32 status;
1048 do {
1049 int err = get_card_status(card, &status, 5);
1050 if (err) {
1051 printk(KERN_ERR "%s: error %d requesting status\n",
1052 req->rq_disk->disk_name, err);
1053 return MMC_BLK_CMD_ERR;
1054 }
1055 /*
1056 * Some cards mishandle the status bits,
1057 * so make sure to check both the busy
1058 * indication and the card state.
1059 */
1060 } while (!(status & R1_READY_FOR_DATA) ||
1061 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1062 }
1063
1064 if (brq->data.error) {
1065 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1066 req->rq_disk->disk_name, brq->data.error,
1067 (unsigned)blk_rq_pos(req),
1068 (unsigned)blk_rq_sectors(req),
1069 brq->cmd.resp[0], brq->stop.resp[0]);
1070
1071 if (rq_data_dir(req) == READ) {
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001072 if (ecc_err)
1073 return MMC_BLK_ECC_ERR;
Per Forlind737c892011-07-01 18:55:30 +02001074 return MMC_BLK_DATA_ERR;
1075 } else {
1076 return MMC_BLK_CMD_ERR;
1077 }
1078 }
1079
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001080 if (!brq->data.bytes_xfered)
1081 return MMC_BLK_RETRY;
Per Forlind737c892011-07-01 18:55:30 +02001082
Seungwon Jeon968c7742012-05-31 11:54:47 +03001083 if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
1084 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1085 return MMC_BLK_PARTIAL;
1086 else
1087 return MMC_BLK_SUCCESS;
1088 }
1089
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001090 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1091 return MMC_BLK_PARTIAL;
1092
1093 return MMC_BLK_SUCCESS;
Per Forlind737c892011-07-01 18:55:30 +02001094}
1095
Seungwon Jeon968c7742012-05-31 11:54:47 +03001096static int mmc_blk_packed_err_check(struct mmc_card *card,
1097 struct mmc_async_req *areq)
1098{
1099 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1100 mmc_active);
1101 struct request *req = mq_rq->req;
1102 int err, check, status;
1103 u8 ext_csd[512];
1104
1105 check = mmc_blk_err_check(card, areq);
1106 err = get_card_status(card, &status, 0);
1107 if (err) {
1108 pr_err("%s: error %d sending status command\n",
1109 req->rq_disk->disk_name, err);
1110 return MMC_BLK_ABORT;
1111 }
1112
1113 if (status & R1_EXP_EVENT) {
1114 err = mmc_send_ext_csd(card, ext_csd);
1115 if (err) {
1116 pr_err("%s: error %d sending ext_csd\n",
1117 req->rq_disk->disk_name, err);
1118 return MMC_BLK_ABORT;
1119 }
1120
1121 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1122 EXT_CSD_PACKED_FAILURE) &&
1123 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1124 EXT_CSD_PACKED_GENERIC_ERROR)) {
1125 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1126 EXT_CSD_PACKED_INDEXED_ERROR) {
1127 mq_rq->packed_fail_idx =
1128 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1129 return MMC_BLK_PARTIAL;
1130 }
1131 }
1132 }
1133
1134 return check;
1135}
1136
Per Forlina69554e42011-07-01 18:55:29 +02001137static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1138 struct mmc_card *card,
1139 int disable_multi,
1140 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141{
Per Forlina69554e42011-07-01 18:55:29 +02001142 u32 readcmd, writecmd;
1143 struct mmc_blk_request *brq = &mqrq->brq;
1144 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 struct mmc_blk_data *md = mq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001147 /*
1148 * Reliable writes are used to implement Forced Unit Access and
1149 * REQ_META accesses, and are supported only on MMCs.
1150 */
1151 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1152 (req->cmd_flags & REQ_META)) &&
1153 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001154 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001155
Per Forlina69554e42011-07-01 18:55:29 +02001156 memset(brq, 0, sizeof(struct mmc_blk_request));
1157 brq->mrq.cmd = &brq->cmd;
1158 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Per Forlina69554e42011-07-01 18:55:29 +02001160 brq->cmd.arg = blk_rq_pos(req);
1161 if (!mmc_card_blockaddr(card))
1162 brq->cmd.arg <<= 9;
1163 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1164 brq->data.blksz = 512;
1165 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1166 brq->stop.arg = 0;
1167 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1168 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Per Forlina69554e42011-07-01 18:55:29 +02001170 /*
1171 * The block layer doesn't support all sector count
1172 * restrictions, so we need to be prepared for too big
1173 * requests.
1174 */
1175 if (brq->data.blocks > card->host->max_blk_count)
1176 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Paul Walmsleydc5bc782011-10-06 14:50:33 -06001178 if (brq->data.blocks > 1) {
1179 /*
1180 * After a read error, we redo the request one sector
1181 * at a time in order to accurately determine which
1182 * sectors can be read successfully.
1183 */
1184 if (disable_multi)
1185 brq->data.blocks = 1;
1186
1187 /* Some controllers can't do multiblock reads due to hw bugs */
1188 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1189 rq_data_dir(req) == READ)
1190 brq->data.blocks = 1;
1191 }
Per Forlina69554e42011-07-01 18:55:29 +02001192
1193 if (brq->data.blocks > 1 || do_rel_wr) {
1194 /* SPI multiblock writes terminate using a special
1195 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02001196 */
Per Forlina69554e42011-07-01 18:55:29 +02001197 if (!mmc_host_is_spi(card->host) ||
1198 rq_data_dir(req) == READ)
1199 brq->mrq.stop = &brq->stop;
1200 readcmd = MMC_READ_MULTIPLE_BLOCK;
1201 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1202 } else {
1203 brq->mrq.stop = NULL;
1204 readcmd = MMC_READ_SINGLE_BLOCK;
1205 writecmd = MMC_WRITE_BLOCK;
1206 }
1207 if (rq_data_dir(req) == READ) {
1208 brq->cmd.opcode = readcmd;
1209 brq->data.flags |= MMC_DATA_READ;
1210 } else {
1211 brq->cmd.opcode = writecmd;
1212 brq->data.flags |= MMC_DATA_WRITE;
1213 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02001214
Per Forlina69554e42011-07-01 18:55:29 +02001215 if (do_rel_wr)
1216 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01001217
Per Forlina69554e42011-07-01 18:55:29 +02001218 /*
1219 * Pre-defined multi-block transfers are preferable to
1220 * open ended-ones (and necessary for reliable writes).
1221 * However, it is not sufficient to just send CMD23,
1222 * and avoid the final CMD12, as on an error condition
1223 * CMD12 (stop) needs to be sent anyway. This, coupled
1224 * with Auto-CMD23 enhancements provided by some
1225 * hosts, means that the complexity of dealing
1226 * with this is best left to the host. If CMD23 is
1227 * supported by card and host, we'll fill sbc in and let
1228 * the host deal with handling it correctly. This means
1229 * that for hosts that don't expose MMC_CAP_CMD23, no
1230 * change of behavior will be observed.
1231 *
1232 * N.B: Some MMC cards experience perf degradation.
1233 * We'll avoid using CMD23-bounded multiblock writes for
1234 * these, while retaining features like reliable writes.
1235 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
Per Forlina69554e42011-07-01 18:55:29 +02001237 if ((md->flags & MMC_BLK_CMD23) &&
1238 mmc_op_multi(brq->cmd.opcode) &&
1239 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1240 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1241 brq->sbc.arg = brq->data.blocks |
1242 (do_rel_wr ? (1 << 31) : 0);
1243 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1244 brq->mrq.sbc = &brq->sbc;
1245 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001246
Per Forlina69554e42011-07-01 18:55:29 +02001247 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001248
Per Forlina69554e42011-07-01 18:55:29 +02001249 brq->data.sg = mqrq->sg;
1250 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001251
Per Forlina69554e42011-07-01 18:55:29 +02001252 /*
1253 * Adjust the sg list so it is the same size as the
1254 * request.
1255 */
1256 if (brq->data.blocks != blk_rq_sectors(req)) {
1257 int i, data_size = brq->data.blocks << 9;
1258 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02001259
Per Forlina69554e42011-07-01 18:55:29 +02001260 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1261 data_size -= sg->length;
1262 if (data_size <= 0) {
1263 sg->length += data_size;
1264 i++;
1265 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01001266 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001267 }
Per Forlina69554e42011-07-01 18:55:29 +02001268 brq->data.sg_len = i;
1269 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001270
Per Forlin91fd00b2011-07-01 18:55:33 +02001271 mqrq->mmc_active.mrq = &brq->mrq;
1272 mqrq->mmc_active.err_check = mmc_blk_err_check;
1273
Per Forlina69554e42011-07-01 18:55:29 +02001274 mmc_queue_bounce_pre(mqrq);
1275}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Maya Erez63c61d62012-05-31 21:00:18 +03001277static void mmc_blk_write_packing_control(struct mmc_queue *mq,
1278 struct request *req)
1279{
1280 struct mmc_host *host = mq->card->host;
1281 int data_dir;
1282
1283 if (!(host->caps2 & MMC_CAP2_PACKED_WR))
1284 return;
1285
1286 /*
1287 * In case the packing control is not supported by the host, it should
1288 * not have an effect on the write packing. Therefore we have to enable
1289 * the write packing
1290 */
1291 if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
1292 mq->wr_packing_enabled = true;
1293 return;
1294 }
1295
1296 if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
1297 if (mq->num_of_potential_packed_wr_reqs >
1298 mq->num_wr_reqs_to_start_packing)
1299 mq->wr_packing_enabled = true;
1300 return;
1301 }
1302
1303 data_dir = rq_data_dir(req);
1304
1305 if (data_dir == READ) {
1306 mq->num_of_potential_packed_wr_reqs = 0;
1307 mq->wr_packing_enabled = false;
1308 return;
1309 } else if (data_dir == WRITE) {
1310 mq->num_of_potential_packed_wr_reqs++;
1311 }
1312
1313 if (mq->num_of_potential_packed_wr_reqs >
1314 mq->num_wr_reqs_to_start_packing)
1315 mq->wr_packing_enabled = true;
1316
1317}
1318
Maya Ereze544d7002012-06-04 06:09:47 +03001319struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
1320{
1321 if (!card)
1322 return NULL;
1323
1324 return &card->wr_pack_stats;
1325}
1326EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
1327
1328void mmc_blk_init_packed_statistics(struct mmc_card *card)
1329{
1330 int max_num_of_packed_reqs = 0;
1331
1332 if (!card || !card->wr_pack_stats.packing_events)
1333 return;
1334
1335 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
1336
1337 spin_lock(&card->wr_pack_stats.lock);
1338 memset(card->wr_pack_stats.packing_events, 0,
1339 (max_num_of_packed_reqs + 1) *
1340 sizeof(*card->wr_pack_stats.packing_events));
1341 memset(&card->wr_pack_stats.pack_stop_reason, 0,
1342 sizeof(card->wr_pack_stats.pack_stop_reason));
1343 card->wr_pack_stats.enabled = true;
1344 spin_unlock(&card->wr_pack_stats.lock);
1345}
1346EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
1347
Seungwon Jeon968c7742012-05-31 11:54:47 +03001348static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1349{
1350 struct request_queue *q = mq->queue;
1351 struct mmc_card *card = mq->card;
1352 struct request *cur = req, *next = NULL;
1353 struct mmc_blk_data *md = mq->data;
1354 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1355 unsigned int req_sectors = 0, phys_segments = 0;
1356 unsigned int max_blk_count, max_phys_segs;
1357 u8 put_back = 0;
1358 u8 max_packed_rw = 0;
1359 u8 reqs = 0;
Maya Ereze544d7002012-06-04 06:09:47 +03001360 struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
Seungwon Jeon968c7742012-05-31 11:54:47 +03001361
1362 mmc_blk_clear_packed(mq->mqrq_cur);
1363
1364 if (!(md->flags & MMC_BLK_CMD23) ||
1365 !card->ext_csd.packed_event_en)
1366 goto no_packed;
1367
Maya Erez63c61d62012-05-31 21:00:18 +03001368 if (!mq->wr_packing_enabled)
1369 goto no_packed;
1370
Seungwon Jeon968c7742012-05-31 11:54:47 +03001371 if ((rq_data_dir(cur) == WRITE) &&
1372 (card->host->caps2 & MMC_CAP2_PACKED_WR))
1373 max_packed_rw = card->ext_csd.max_packed_writes;
1374
1375 if (max_packed_rw == 0)
1376 goto no_packed;
1377
1378 if (mmc_req_rel_wr(cur) &&
1379 (md->flags & MMC_BLK_REL_WR) &&
1380 !en_rel_wr) {
1381 goto no_packed;
1382 }
1383
1384 max_blk_count = min(card->host->max_blk_count,
1385 card->host->max_req_size >> 9);
1386 if (unlikely(max_blk_count > 0xffff))
1387 max_blk_count = 0xffff;
1388
1389 max_phys_segs = queue_max_segments(q);
1390 req_sectors += blk_rq_sectors(cur);
1391 phys_segments += cur->nr_phys_segments;
1392
1393 if (rq_data_dir(cur) == WRITE) {
1394 req_sectors++;
1395 phys_segments++;
1396 }
1397
Maya Ereze544d7002012-06-04 06:09:47 +03001398 spin_lock(&stats->lock);
1399
Seungwon Jeon968c7742012-05-31 11:54:47 +03001400 while (reqs < max_packed_rw - 1) {
1401 spin_lock_irq(q->queue_lock);
1402 next = blk_fetch_request(q);
1403 spin_unlock_irq(q->queue_lock);
Maya Ereze544d7002012-06-04 06:09:47 +03001404 if (!next) {
1405 MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001406 break;
Maya Ereze544d7002012-06-04 06:09:47 +03001407 }
Seungwon Jeon968c7742012-05-31 11:54:47 +03001408
1409 if (next->cmd_flags & REQ_DISCARD ||
1410 next->cmd_flags & REQ_FLUSH) {
Maya Ereze544d7002012-06-04 06:09:47 +03001411 MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001412 put_back = 1;
1413 break;
1414 }
1415
1416 if (rq_data_dir(cur) != rq_data_dir(next)) {
Maya Ereze544d7002012-06-04 06:09:47 +03001417 MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001418 put_back = 1;
1419 break;
1420 }
1421
1422 if (mmc_req_rel_wr(next) &&
1423 (md->flags & MMC_BLK_REL_WR) &&
1424 !en_rel_wr) {
Maya Ereze544d7002012-06-04 06:09:47 +03001425 MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001426 put_back = 1;
1427 break;
1428 }
1429
1430 req_sectors += blk_rq_sectors(next);
1431 if (req_sectors > max_blk_count) {
Maya Ereze544d7002012-06-04 06:09:47 +03001432 if (stats->enabled)
1433 stats->pack_stop_reason[EXCEEDS_SECTORS]++;
Seungwon Jeon968c7742012-05-31 11:54:47 +03001434 put_back = 1;
1435 break;
1436 }
1437
1438 phys_segments += next->nr_phys_segments;
1439 if (phys_segments > max_phys_segs) {
Maya Ereze544d7002012-06-04 06:09:47 +03001440 MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001441 put_back = 1;
1442 break;
1443 }
1444
Maya Erez63c61d62012-05-31 21:00:18 +03001445 if (rq_data_dir(next) == WRITE)
1446 mq->num_of_potential_packed_wr_reqs++;
Seungwon Jeon968c7742012-05-31 11:54:47 +03001447 list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
1448 cur = next;
1449 reqs++;
1450 }
1451
1452 if (put_back) {
1453 spin_lock_irq(q->queue_lock);
1454 blk_requeue_request(q, next);
1455 spin_unlock_irq(q->queue_lock);
1456 }
1457
Maya Ereze544d7002012-06-04 06:09:47 +03001458 if (stats->enabled) {
1459 if (reqs + 1 <= card->ext_csd.max_packed_writes)
1460 stats->packing_events[reqs + 1]++;
1461 if (reqs + 1 == max_packed_rw)
1462 MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
1463 }
1464
1465 spin_unlock(&stats->lock);
1466
Seungwon Jeon968c7742012-05-31 11:54:47 +03001467 if (reqs > 0) {
1468 list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
1469 mq->mqrq_cur->packed_num = ++reqs;
1470 return reqs;
1471 }
1472
1473no_packed:
1474 mmc_blk_clear_packed(mq->mqrq_cur);
1475 return 0;
1476}
1477
1478static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1479 struct mmc_card *card,
1480 struct mmc_queue *mq)
1481{
1482 struct mmc_blk_request *brq = &mqrq->brq;
1483 struct request *req = mqrq->req;
1484 struct request *prq;
1485 struct mmc_blk_data *md = mq->data;
1486 bool do_rel_wr;
1487 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
1488 u8 i = 1;
1489
1490 mqrq->packed_cmd = MMC_PACKED_WRITE;
1491 mqrq->packed_blocks = 0;
1492 mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
1493
1494 memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
1495 packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
1496 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1497
1498 /*
1499 * Argument for each entry of packed group
1500 */
1501 list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
1502 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1503 /* Argument of CMD23*/
1504 packed_cmd_hdr[(i * 2)] =
1505 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1506 blk_rq_sectors(prq);
1507 /* Argument of CMD18 or CMD25 */
1508 packed_cmd_hdr[((i * 2)) + 1] =
1509 mmc_card_blockaddr(card) ?
1510 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1511 mqrq->packed_blocks += blk_rq_sectors(prq);
1512 i++;
1513 }
1514
1515 memset(brq, 0, sizeof(struct mmc_blk_request));
1516 brq->mrq.cmd = &brq->cmd;
1517 brq->mrq.data = &brq->data;
1518 brq->mrq.sbc = &brq->sbc;
1519 brq->mrq.stop = &brq->stop;
1520
1521 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1522 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1);
1523 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1524
1525 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1526 brq->cmd.arg = blk_rq_pos(req);
1527 if (!mmc_card_blockaddr(card))
1528 brq->cmd.arg <<= 9;
1529 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1530
1531 brq->data.blksz = 512;
1532 brq->data.blocks = mqrq->packed_blocks + 1;
1533 brq->data.flags |= MMC_DATA_WRITE;
1534
1535 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1536 brq->stop.arg = 0;
1537 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1538
1539 mmc_set_data_timeout(&brq->data, card);
1540
1541 brq->data.sg = mqrq->sg;
1542 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1543
1544 mqrq->mmc_active.mrq = &brq->mrq;
1545 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1546
1547 mmc_queue_bounce_pre(mqrq);
1548}
1549
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001550static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1551 struct mmc_blk_request *brq, struct request *req,
1552 int ret)
1553{
Seungwon Jeon968c7742012-05-31 11:54:47 +03001554 struct mmc_queue_req *mq_rq;
1555 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1556
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001557 /*
1558 * If this is an SD card and we're writing, we can first
1559 * mark the known good sectors as ok.
1560 *
1561 * If the card is not SD, we can still ok written sectors
1562 * as reported by the controller (which might be less than
1563 * the real number of written sectors, but never more).
1564 */
1565 if (mmc_card_sd(card)) {
1566 u32 blocks;
1567
1568 blocks = mmc_sd_num_wr_blocks(card);
1569 if (blocks != (u32)-1) {
1570 spin_lock_irq(&md->lock);
1571 ret = __blk_end_request(req, 0, blocks << 9);
1572 spin_unlock_irq(&md->lock);
1573 }
1574 } else {
Seungwon Jeon968c7742012-05-31 11:54:47 +03001575 if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
1576 spin_lock_irq(&md->lock);
1577 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1578 spin_unlock_irq(&md->lock);
1579 }
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001580 }
1581 return ret;
1582}
1583
Seungwon Jeon968c7742012-05-31 11:54:47 +03001584static int mmc_blk_end_packed_req(struct mmc_queue *mq,
1585 struct mmc_queue_req *mq_rq)
1586{
1587 struct mmc_blk_data *md = mq->data;
1588 struct request *prq;
1589 int idx = mq_rq->packed_fail_idx, i = 0;
1590 int ret = 0;
1591
1592 while (!list_empty(&mq_rq->packed_list)) {
1593 prq = list_entry_rq(mq_rq->packed_list.next);
1594 if (idx == i) {
1595 /* retry from error index */
1596 mq_rq->packed_num -= idx;
1597 mq_rq->req = prq;
1598 ret = 1;
1599
1600 if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
1601 list_del_init(&prq->queuelist);
1602 mmc_blk_clear_packed(mq_rq);
1603 }
1604 return ret;
1605 }
1606 list_del_init(&prq->queuelist);
1607 spin_lock_irq(&md->lock);
1608 __blk_end_request(prq, 0, blk_rq_bytes(prq));
1609 spin_unlock_irq(&md->lock);
1610 i++;
1611 }
1612
1613 mmc_blk_clear_packed(mq_rq);
1614 return ret;
1615}
1616
Per Forlin91fd00b2011-07-01 18:55:33 +02001617static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlina69554e42011-07-01 18:55:29 +02001618{
1619 struct mmc_blk_data *md = mq->data;
1620 struct mmc_card *card = md->queue.card;
1621 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001622 int ret = 1, disable_multi = 0, retry = 0, type;
Per Forlind737c892011-07-01 18:55:30 +02001623 enum mmc_blk_status status;
Per Forlin91fd00b2011-07-01 18:55:33 +02001624 struct mmc_queue_req *mq_rq;
Seungwon Jeon968c7742012-05-31 11:54:47 +03001625 struct request *req, *prq;
Per Forlin91fd00b2011-07-01 18:55:33 +02001626 struct mmc_async_req *areq;
Seungwon Jeon968c7742012-05-31 11:54:47 +03001627 const u8 packed_num = 2;
1628 u8 reqs = 0;
Per Forlin91fd00b2011-07-01 18:55:33 +02001629
1630 if (!rqc && !mq->mqrq_prev->req)
1631 return 0;
Per Forlina69554e42011-07-01 18:55:29 +02001632
Seungwon Jeon968c7742012-05-31 11:54:47 +03001633 if (rqc)
1634 reqs = mmc_blk_prep_packed_list(mq, rqc);
1635
Per Forlina69554e42011-07-01 18:55:29 +02001636 do {
Per Forlin91fd00b2011-07-01 18:55:33 +02001637 if (rqc) {
Seungwon Jeon968c7742012-05-31 11:54:47 +03001638 if (reqs >= packed_num)
1639 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1640 card, mq);
1641 else
1642 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlin91fd00b2011-07-01 18:55:33 +02001643 areq = &mq->mqrq_cur->mmc_active;
1644 } else
1645 areq = NULL;
1646 areq = mmc_start_req(card->host, areq, (int *) &status);
1647 if (!areq)
1648 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +02001649
Per Forlin91fd00b2011-07-01 18:55:33 +02001650 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1651 brq = &mq_rq->brq;
1652 req = mq_rq->req;
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001653 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlin91fd00b2011-07-01 18:55:33 +02001654 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02001655
Jaehoon Chungf886c802012-05-28 10:33:35 +03001656 /*
1657 * Check BKOPS urgency from each R1 response
1658 */
1659 if (mmc_card_mmc(card) &&
1660 (brq->cmd.resp[0] & R1_EXCEPTION_EVENT))
1661 mmc_card_set_check_bkops(card);
1662
Per Forlind737c892011-07-01 18:55:30 +02001663 switch (status) {
1664 case MMC_BLK_SUCCESS:
1665 case MMC_BLK_PARTIAL:
1666 /*
1667 * A block was successfully transferred.
1668 */
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001669 mmc_blk_reset_success(md, type);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001670
1671 if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
1672 ret = mmc_blk_end_packed_req(mq, mq_rq);
1673 break;
1674 } else {
1675 spin_lock_irq(&md->lock);
1676 ret = __blk_end_request(req, 0,
Per Forlind737c892011-07-01 18:55:30 +02001677 brq->data.bytes_xfered);
Seungwon Jeon968c7742012-05-31 11:54:47 +03001678 spin_unlock_irq(&md->lock);
1679 }
1680
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001681 /*
1682 * If the blk_end_request function returns non-zero even
1683 * though all data has been transferred and no errors
1684 * were returned by the host controller, it's a bug.
1685 */
Per Forlin91fd00b2011-07-01 18:55:33 +02001686 if (status == MMC_BLK_SUCCESS && ret) {
Per Forlin91fd00b2011-07-01 18:55:33 +02001687 printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
1688 __func__, blk_rq_bytes(req),
1689 brq->data.bytes_xfered);
1690 rqc = NULL;
1691 goto cmd_abort;
1692 }
Per Forlind737c892011-07-01 18:55:30 +02001693 break;
1694 case MMC_BLK_CMD_ERR:
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001695 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1696 if (!mmc_blk_reset(md, card->host, type))
1697 break;
1698 goto cmd_abort;
Per Forlind737c892011-07-01 18:55:30 +02001699 case MMC_BLK_RETRY:
1700 if (retry++ < 5)
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +01001701 break;
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001702 /* Fall through */
Per Forlind737c892011-07-01 18:55:30 +02001703 case MMC_BLK_ABORT:
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001704 if (!mmc_blk_reset(md, card->host, type))
1705 break;
Russell King - ARM Linux20803902011-06-20 20:10:49 +01001706 goto cmd_abort;
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001707 case MMC_BLK_DATA_ERR: {
1708 int err;
1709
1710 err = mmc_blk_reset(md, card->host, type);
1711 if (!err)
1712 break;
Seungwon Jeon968c7742012-05-31 11:54:47 +03001713 if (err == -ENODEV ||
1714 mq_rq->packed_cmd != MMC_PACKED_NONE)
Adrian Hunter9ac56f32011-08-29 16:42:15 +03001715 goto cmd_abort;
1716 /* Fall through */
1717 }
1718 case MMC_BLK_ECC_ERR:
1719 if (brq->data.blocks > 1) {
1720 /* Redo read one sector at a time */
1721 pr_warning("%s: retrying using single block read\n",
1722 req->rq_disk->disk_name);
1723 disable_multi = 1;
1724 break;
1725 }
Per Forlind737c892011-07-01 18:55:30 +02001726 /*
1727 * After an error, we redo I/O one sector at a
1728 * time, so we only reach here after trying to
1729 * read a single sector.
1730 */
1731 spin_lock_irq(&md->lock);
1732 ret = __blk_end_request(req, -EIO,
1733 brq->data.blksz);
1734 spin_unlock_irq(&md->lock);
Per Forlin91fd00b2011-07-01 18:55:33 +02001735 if (!ret)
1736 goto start_new_req;
Per Forlind737c892011-07-01 18:55:30 +02001737 break;
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001738 }
1739
Per Forlin91fd00b2011-07-01 18:55:33 +02001740 if (ret) {
Seungwon Jeon968c7742012-05-31 11:54:47 +03001741 if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
1742 /*
1743 * In case of a incomplete request
1744 * prepare it again and resend.
1745 */
1746 mmc_blk_rw_rq_prep(mq_rq, card,
1747 disable_multi, mq);
1748 mmc_start_req(card->host,
1749 &mq_rq->mmc_active, NULL);
1750 } else {
1751 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1752 mmc_start_req(card->host,
1753 &mq_rq->mmc_active, NULL);
1754 }
Per Forlin91fd00b2011-07-01 18:55:33 +02001755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 } while (ret);
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 return 1;
1759
Russell King - ARM Linux91d56b52011-06-20 20:10:28 +01001760 cmd_abort:
Seungwon Jeon968c7742012-05-31 11:54:47 +03001761 if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
1762 spin_lock_irq(&md->lock);
1763 if (mmc_card_removed(card))
1764 req->cmd_flags |= REQ_QUIET;
1765 while (ret)
1766 ret = __blk_end_request(req, -EIO,
1767 blk_rq_cur_bytes(req));
1768 spin_unlock_irq(&md->lock);
1769 } else {
1770 while (!list_empty(&mq_rq->packed_list)) {
1771 prq = list_entry_rq(mq_rq->packed_list.next);
1772 list_del_init(&prq->queuelist);
1773 spin_lock_irq(&md->lock);
1774 __blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1775 spin_unlock_irq(&md->lock);
1776 }
1777 mmc_blk_clear_packed(mq_rq);
1778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Per Forlin91fd00b2011-07-01 18:55:33 +02001780 start_new_req:
1781 if (rqc) {
Seungwon Jeon968c7742012-05-31 11:54:47 +03001782 /*
1783 * If current request is packed, it needs to put back.
1784 */
1785 if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
1786 while (!list_empty(&mq->mqrq_cur->packed_list)) {
1787 prq = list_entry_rq(
1788 mq->mqrq_cur->packed_list.prev);
1789 if (prq->queuelist.prev !=
1790 &mq->mqrq_cur->packed_list) {
1791 list_del_init(&prq->queuelist);
1792 spin_lock_irq(mq->queue->queue_lock);
1793 blk_requeue_request(mq->queue, prq);
1794 spin_unlock_irq(mq->queue->queue_lock);
1795 } else {
1796 list_del_init(&prq->queuelist);
1797 }
1798 }
1799 mmc_blk_clear_packed(mq->mqrq_cur);
1800 }
Per Forlin91fd00b2011-07-01 18:55:33 +02001801 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1802 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1803 }
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 return 0;
1806}
1807
San Mehatc87f8d42009-07-30 08:21:19 -07001808static int
1809mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
1810
Adrian Hunterbd788c92010-08-11 14:17:47 -07001811static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1812{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001813 int ret;
1814 struct mmc_blk_data *md = mq->data;
1815 struct mmc_card *card = md->queue.card;
1816
San Mehatc87f8d42009-07-30 08:21:19 -07001817#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1818 if (mmc_bus_needs_resume(card->host)) {
1819 mmc_resume_bus(card->host);
1820 mmc_blk_set_blksize(md, card);
1821 }
1822#endif
1823
Per Forlin91fd00b2011-07-01 18:55:33 +02001824 if (req && !mq->mqrq_prev->req)
1825 /* claim host only for the first request */
1826 mmc_claim_host(card->host);
1827
Andrei Warkentin371a6892011-04-11 18:10:25 -05001828 ret = mmc_blk_part_switch(card, md);
1829 if (ret) {
Adrian Hunter44956712011-09-23 12:48:20 +03001830 if (req) {
1831 spin_lock_irq(&md->lock);
1832 __blk_end_request_all(req, -EIO);
1833 spin_unlock_irq(&md->lock);
1834 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05001835 ret = 0;
1836 goto out;
1837 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001838
Maya Erez63c61d62012-05-31 21:00:18 +03001839 mmc_blk_write_packing_control(mq, req);
1840
Maya Erez463bb952012-05-24 23:46:29 +03001841 if (req && req->cmd_flags & REQ_SANITIZE) {
1842 /* complete ongoing async transfer before issuing sanitize */
1843 if (card->host && card->host->areq)
1844 mmc_blk_issue_rw_rq(mq, NULL);
1845 ret = mmc_blk_issue_sanitize_rq(mq, req);
1846 } else if (req && req->cmd_flags & REQ_DISCARD) {
Per Forlin91fd00b2011-07-01 18:55:33 +02001847 /* complete ongoing async transfer before issuing discard */
1848 if (card->host->areq)
1849 mmc_blk_issue_rw_rq(mq, NULL);
Adrian Hunter49804542010-08-11 14:17:50 -07001850 if (req->cmd_flags & REQ_SECURE)
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001851 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001852 else
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001853 ret = mmc_blk_issue_discard_rq(mq, req);
Per Forlin91fd00b2011-07-01 18:55:33 +02001854 } else if (req && req->cmd_flags & REQ_FLUSH) {
Jaehoon Chungf011eff2011-07-13 17:02:16 +09001855 /* complete ongoing async transfer before issuing flush */
1856 if (card->host->areq)
1857 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001858 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001859 } else {
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001860 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07001861 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001862
Andrei Warkentin371a6892011-04-11 18:10:25 -05001863out:
Per Forlin91fd00b2011-07-01 18:55:33 +02001864 if (!req)
1865 /* release host only when there are no more requests */
1866 mmc_release_host(card->host);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05001867 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001868}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869
Russell Kinga6f6c962006-01-03 22:38:44 +00001870static inline int mmc_blk_readonly(struct mmc_card *card)
1871{
1872 return mmc_card_readonly(card) ||
1873 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1874}
1875
Andrei Warkentin371a6892011-04-11 18:10:25 -05001876static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1877 struct device *parent,
1878 sector_t size,
1879 bool default_ro,
1880 const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881{
1882 struct mmc_blk_data *md;
1883 int devidx, ret;
1884
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001885 devidx = find_first_zero_bit(dev_use, max_devices);
1886 if (devidx >= max_devices)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 return ERR_PTR(-ENOSPC);
1888 __set_bit(devidx, dev_use);
1889
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07001890 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00001891 if (!md) {
1892 ret = -ENOMEM;
1893 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 }
Russell Kinga6f6c962006-01-03 22:38:44 +00001895
Russell Kinga6f6c962006-01-03 22:38:44 +00001896 /*
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001897 * !subname implies we are creating main mmc_blk_data that will be
1898 * associated with mmc_card with mmc_set_drvdata. Due to device
1899 * partitions, devidx will not coincide with a per-physical card
1900 * index anymore so we keep track of a name index.
1901 */
1902 if (!subname) {
1903 md->name_idx = find_first_zero_bit(name_use, max_devices);
1904 __set_bit(md->name_idx, name_use);
1905 }
1906 else
1907 md->name_idx = ((struct mmc_blk_data *)
1908 dev_to_disk(parent)->private_data)->name_idx;
1909
1910 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00001911 * Set the read-only status based on the supported commands
1912 * and the write protect switch.
1913 */
1914 md->read_only = mmc_blk_readonly(card);
1915
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001916 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00001917 if (md->disk == NULL) {
1918 ret = -ENOMEM;
1919 goto err_kfree;
1920 }
1921
1922 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001923 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00001924 md->usage = 1;
1925
Adrian Hunterd09408a2011-06-23 13:40:28 +03001926 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
Russell Kinga6f6c962006-01-03 22:38:44 +00001927 if (ret)
1928 goto err_putdisk;
1929
Russell Kinga6f6c962006-01-03 22:38:44 +00001930 md->queue.issue_fn = mmc_blk_issue_rq;
1931 md->queue.data = md;
1932
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02001933 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04001934 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00001935 md->disk->fops = &mmc_bdops;
1936 md->disk->private_data = md;
1937 md->disk->queue = md->queue.queue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 md->disk->driverfs_dev = &card->dev;
Andrei Warkentin371a6892011-04-11 18:10:25 -05001939 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Crossfa746fa2010-09-03 12:41:21 -07001940 md->disk->flags = GENHD_FL_EXT_DEVT;
Russell Kinga6f6c962006-01-03 22:38:44 +00001941
1942 /*
1943 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1944 *
1945 * - be set for removable media with permanent block devices
1946 * - be unset for removable block devices with permanent media
1947 *
1948 * Since MMC block devices clearly fall under the second
1949 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1950 * should use the block device creation/destruction hotplug
1951 * messages to tell when the card is present.
1952 */
1953
Andrei Warkentinf06c9152011-04-21 22:46:13 -05001954 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1955 "mmcblk%d%s", md->name_idx, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00001956
Martin K. Petersene1defc42009-05-22 17:17:49 -04001957 blk_queue_logical_block_size(md->queue.queue, 512);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001958 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001959
Andrei Warkentinf0d89972011-05-23 15:06:38 -05001960 if (mmc_host_cmd23(card->host)) {
1961 if (mmc_card_mmc(card) ||
1962 (mmc_card_sd(card) &&
1963 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1964 md->flags |= MMC_BLK_CMD23;
1965 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001966
1967 if (mmc_card_mmc(card) &&
1968 md->flags & MMC_BLK_CMD23 &&
1969 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1970 card->ext_csd.rel_sectors)) {
1971 md->flags |= MMC_BLK_REL_WR;
1972 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1973 }
1974
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00001976
1977 err_putdisk:
1978 put_disk(md->disk);
1979 err_kfree:
1980 kfree(md);
1981 out:
1982 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983}
1984
Andrei Warkentin371a6892011-04-11 18:10:25 -05001985static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1986{
1987 sector_t size;
1988 struct mmc_blk_data *md;
1989
1990 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1991 /*
1992 * The EXT_CSD sector count is in number or 512 byte
1993 * sectors.
1994 */
1995 size = card->ext_csd.sectors;
1996 } else {
1997 /*
1998 * The CSD capacity field is in units of read_blkbits.
1999 * set_capacity takes units of 512 bytes.
2000 */
2001 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2002 }
2003
2004 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
2005 return md;
2006}
2007
2008static int mmc_blk_alloc_part(struct mmc_card *card,
2009 struct mmc_blk_data *md,
2010 unsigned int part_type,
2011 sector_t size,
2012 bool default_ro,
2013 const char *subname)
2014{
2015 char cap_str[10];
2016 struct mmc_blk_data *part_md;
2017
2018 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2019 subname);
2020 if (IS_ERR(part_md))
2021 return PTR_ERR(part_md);
2022 part_md->part_type = part_type;
2023 list_add(&part_md->part, &md->part);
2024
2025 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2026 cap_str, sizeof(cap_str));
2027 printk(KERN_INFO "%s: %s %s partition %u %s\n",
2028 part_md->disk->disk_name, mmc_card_id(card),
2029 mmc_card_name(card), part_md->part_type, cap_str);
2030 return 0;
2031}
2032
2033static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2034{
2035 int ret = 0;
2036
2037 if (!mmc_card_mmc(card))
2038 return 0;
2039
Adrian Hunter48ec4d12012-03-24 23:09:18 +05302040 if (card->ext_csd.boot_size && mmc_boot_partition_access(card->host)) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05002041 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
2042 card->ext_csd.boot_size >> 9,
2043 true,
2044 "boot0");
2045 if (ret)
2046 return ret;
2047 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
2048 card->ext_csd.boot_size >> 9,
2049 true,
2050 "boot1");
2051 if (ret)
2052 return ret;
2053 }
2054
2055 return ret;
2056}
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058static int
2059mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
2060{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 int err;
2062
Subhash Jadavanic63f0e52012-03-21 17:39:56 +05302063 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
2064 return 0;
2065
Pierre Ossmanb8558852007-01-03 19:47:29 +01002066 mmc_claim_host(card->host);
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03002067 err = mmc_set_blocklen(card, 512);
Pierre Ossmanb8558852007-01-03 19:47:29 +01002068 mmc_release_host(card->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
2070 if (err) {
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03002071 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
2072 md->disk->disk_name, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 return -EINVAL;
2074 }
2075
2076 return 0;
2077}
2078
Andrei Warkentin371a6892011-04-11 18:10:25 -05002079static void mmc_blk_remove_req(struct mmc_blk_data *md)
2080{
2081 if (md) {
Maya Erez63c61d62012-05-31 21:00:18 +03002082 device_remove_file(disk_to_dev(md->disk),
2083 &md->num_wr_reqs_to_start_packing);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002084 if (md->disk->flags & GENHD_FL_UP) {
2085 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2086
2087 /* Stop new requests from getting into the queue */
2088 del_gendisk(md->disk);
2089 }
2090
2091 /* Then flush out any already in there */
2092 mmc_cleanup_queue(&md->queue);
2093 mmc_blk_put(md);
2094 }
2095}
2096
2097static void mmc_blk_remove_parts(struct mmc_card *card,
2098 struct mmc_blk_data *md)
2099{
2100 struct list_head *pos, *q;
2101 struct mmc_blk_data *part_md;
2102
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002103 __clear_bit(md->name_idx, name_use);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002104 list_for_each_safe(pos, q, &md->part) {
2105 part_md = list_entry(pos, struct mmc_blk_data, part);
2106 list_del(pos);
2107 mmc_blk_remove_req(part_md);
2108 }
2109}
2110
2111static int mmc_add_disk(struct mmc_blk_data *md)
2112{
2113 int ret;
2114
2115 add_disk(md->disk);
2116 md->force_ro.show = force_ro_show;
2117 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05302118 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002119 md->force_ro.attr.name = "force_ro";
2120 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2121 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
Maya Erez63c61d62012-05-31 21:00:18 +03002122 if (ret) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05002123 del_gendisk(md->disk);
Maya Erez63c61d62012-05-31 21:00:18 +03002124 goto out;
2125 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002126
Maya Erez63c61d62012-05-31 21:00:18 +03002127 md->num_wr_reqs_to_start_packing.show =
2128 num_wr_reqs_to_start_packing_show;
2129 md->num_wr_reqs_to_start_packing.store =
2130 num_wr_reqs_to_start_packing_store;
2131 sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
2132 md->num_wr_reqs_to_start_packing.attr.name =
2133 "num_wr_reqs_to_start_packing";
2134 md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
2135 ret = device_create_file(disk_to_dev(md->disk),
2136 &md->num_wr_reqs_to_start_packing);
2137 if (ret) {
2138 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2139 del_gendisk(md->disk);
2140 }
2141
2142out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05002143 return ret;
2144}
2145
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002146static const struct mmc_fixup blk_fixups[] =
2147{
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002148 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
2149 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
2150 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
2151 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
2152 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002153
2154 /*
2155 * Some MMC cards experience performance degradation with CMD23
2156 * instead of CMD12-bounded multiblock transfers. For now we'll
2157 * black list what's bad...
2158 * - Certain Toshiba cards.
2159 *
2160 * N.B. This doesn't affect SD cards.
2161 */
2162 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
2163 MMC_QUIRK_BLK_NO_CMD23),
2164 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
2165 MMC_QUIRK_BLK_NO_CMD23),
2166 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
2167 MMC_QUIRK_BLK_NO_CMD23),
Pratibhasagar V3a18bbd2012-04-17 14:41:19 +05302168
2169 /* Some INAND MCP devices advertise incorrect timeout values */
2170 MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
2171 MMC_QUIRK_INAND_DATA_TIMEOUT),
2172
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002173 END_FIXUP
2174};
2175
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176static int mmc_blk_probe(struct mmc_card *card)
2177{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002178 struct mmc_blk_data *md, *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 int err;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002180 char cap_str[10];
2181
Pierre Ossman912490d2005-05-21 10:27:02 +01002182 /*
2183 * Check that the card supports the command class(es) we need.
2184 */
2185 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 return -ENODEV;
2187
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 md = mmc_blk_alloc(card);
2189 if (IS_ERR(md))
2190 return PTR_ERR(md);
2191
2192 err = mmc_blk_set_blksize(md, card);
2193 if (err)
2194 goto out;
2195
Yi Li444122f2009-02-05 15:31:57 +08002196 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002197 cap_str, sizeof(cap_str));
2198 printk(KERN_INFO "%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002200 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Andrei Warkentin371a6892011-04-11 18:10:25 -05002202 if (mmc_blk_alloc_parts(card, md))
2203 goto out;
2204
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 mmc_set_drvdata(card, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002206 mmc_fixup_device(card, blk_fixups);
2207
San Mehatc87f8d42009-07-30 08:21:19 -07002208#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2209 mmc_set_bus_resume_policy(card->host, 1);
2210#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05002211 if (mmc_add_disk(md))
2212 goto out;
2213
2214 list_for_each_entry(part_md, &md->part, part) {
2215 if (mmc_add_disk(part_md))
2216 goto out;
2217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 return 0;
2219
2220 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05002221 mmc_blk_remove_parts(card, md);
2222 mmc_blk_remove_req(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 return err;
2224}
2225
2226static void mmc_blk_remove(struct mmc_card *card)
2227{
2228 struct mmc_blk_data *md = mmc_get_drvdata(card);
2229
Andrei Warkentin371a6892011-04-11 18:10:25 -05002230 mmc_blk_remove_parts(card, md);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03002231 mmc_claim_host(card->host);
2232 mmc_blk_part_switch(card, md);
2233 mmc_release_host(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002234 mmc_blk_remove_req(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 mmc_set_drvdata(card, NULL);
San Mehatc87f8d42009-07-30 08:21:19 -07002236#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2237 mmc_set_bus_resume_policy(card->host, 0);
2238#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239}
2240
2241#ifdef CONFIG_PM
Chuanxiao Dong72407e92011-08-24 14:00:41 +05302242static int mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002244 struct mmc_blk_data *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 struct mmc_blk_data *md = mmc_get_drvdata(card);
2246
2247 if (md) {
2248 mmc_queue_suspend(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002249 list_for_each_entry(part_md, &md->part, part) {
2250 mmc_queue_suspend(&part_md->queue);
2251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 }
2253 return 0;
2254}
2255
2256static int mmc_blk_resume(struct mmc_card *card)
2257{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002258 struct mmc_blk_data *part_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 struct mmc_blk_data *md = mmc_get_drvdata(card);
2260
2261 if (md) {
San Mehatc87f8d42009-07-30 08:21:19 -07002262#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 mmc_blk_set_blksize(md, card);
San Mehatc87f8d42009-07-30 08:21:19 -07002264#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05002265
2266 /*
2267 * Resume involves the card going into idle state,
2268 * so current partition is always the main one.
2269 */
2270 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002272 list_for_each_entry(part_md, &md->part, part) {
2273 mmc_queue_resume(&part_md->queue);
2274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 }
2276 return 0;
2277}
2278#else
2279#define mmc_blk_suspend NULL
2280#define mmc_blk_resume NULL
2281#endif
2282
2283static struct mmc_driver mmc_driver = {
2284 .drv = {
2285 .name = "mmcblk",
2286 },
2287 .probe = mmc_blk_probe,
2288 .remove = mmc_blk_remove,
2289 .suspend = mmc_blk_suspend,
2290 .resume = mmc_blk_resume,
2291};
2292
2293static int __init mmc_blk_init(void)
2294{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002295 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002297 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2298 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2299
2300 max_devices = 256 / perdev_minors;
2301
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002302 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2303 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002306 res = mmc_register_driver(&mmc_driver);
2307 if (res)
2308 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002310 return 0;
2311 out2:
2312 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 out:
2314 return res;
2315}
2316
2317static void __exit mmc_blk_exit(void)
2318{
2319 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002320 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321}
2322
2323module_init(mmc_blk_init);
2324module_exit(mmc_blk_exit);
2325
2326MODULE_LICENSE("GPL");
2327MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2328