blob: 2206d4477dbbdb5190906e277124580b600d71e0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020033#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040034#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
Ulf Hanssone94cfef2013-05-02 14:02:38 +020037#include <linux/pm_runtime.h>
Ulf Hanssonb10fa992016-04-07 14:36:46 +020038#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
John Calixtocb87ea22011-04-26 18:56:29 -040040#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020042#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010043#include <linux/mmc/mmc.h>
44#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/uaccess.h>
47
Pierre Ossman98ac2162006-12-23 20:03:02 +010048#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000050MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040051#ifdef MODULE_PARAM_PREFIX
52#undef MODULE_PARAM_PREFIX
53#endif
54#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010055
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050056#define INAND_CMD38_ARG_EXT_CSD 113
57#define INAND_CMD38_ARG_ERASE 0x00
58#define INAND_CMD38_ARG_TRIM 0x01
59#define INAND_CMD38_ARG_SECERASE 0x80
60#define INAND_CMD38_ARG_SECTRIM1 0x81
61#define INAND_CMD38_ARG_SECTRIM2 0x88
Trey Ramsay8fee4762012-11-16 09:31:41 -060062#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
Maya Erez775a9362013-04-18 15:41:55 +030063#define MMC_SANITIZE_REQ_TIMEOUT 240000
64#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050065
Luca Porziod3df0462015-11-06 15:12:26 +000066#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090067 (rq_data_dir(req) == WRITE))
68#define PACKED_CMD_VER 0x01
69#define PACKED_CMD_WR 0x02
70
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020071static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040072
73/*
74 * The defaults come from config options but can be overriden by module
75 * or bootarg options.
76 */
77static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
78
79/*
80 * We've only got one major, so number of mmcblk devices is
Ben Hutchingsa26eba62014-11-06 03:35:09 +000081 * limited to (1 << 20) / number of minors per device. It is also
Ulf Hanssonb10fa992016-04-07 14:36:46 +020082 * limited by the MAX_DEVICES below.
Olof Johansson5e71b7a2010-09-17 21:19:57 -040083 */
84static int max_devices;
85
Ben Hutchingsa26eba62014-11-06 03:35:09 +000086#define MAX_DEVICES 256
87
Ulf Hanssonb10fa992016-04-07 14:36:46 +020088static DEFINE_IDA(mmc_blk_ida);
89static DEFINE_SPINLOCK(mmc_blk_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Linus Torvalds1da177e2005-04-16 15:20:36 -070091/*
92 * There is one mmc_blk_data per slot.
93 */
94struct mmc_blk_data {
95 spinlock_t lock;
Dan Williams307d8e62016-06-20 10:40:44 -070096 struct device *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 struct gendisk *disk;
98 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -050099 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500101 unsigned int flags;
102#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
103#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900104#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000107 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500108 unsigned int part_type;
Adrian Hunter67716322011-08-29 16:42:15 +0300109 unsigned int reset_done;
110#define MMC_BLK_READ BIT(0)
111#define MMC_BLK_WRITE BIT(1)
112#define MMC_BLK_DISCARD BIT(2)
113#define MMC_BLK_SECDISCARD BIT(3)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500114
115 /*
116 * Only set in main mmc_blk_data associated
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200117 * with mmc_card with dev_set_drvdata, and keeps
Andrei Warkentin371a6892011-04-11 18:10:25 -0500118 * track of the current selected device partition.
119 */
120 unsigned int part_curr;
121 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100122 struct device_attribute power_ro_lock;
123 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124};
125
Arjan van de Vena621aae2006-01-12 18:43:35 +0000126static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900128enum {
129 MMC_PACKED_NR_IDX = -1,
130 MMC_PACKED_NR_ZERO,
131 MMC_PACKED_NR_SINGLE,
132};
133
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400134module_param(perdev_minors, int, 0444);
135MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
136
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200137static inline int mmc_blk_part_switch(struct mmc_card *card,
138 struct mmc_blk_data *md);
139static int get_card_status(struct mmc_card *card, u32 *status, int retries);
140
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900141static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
142{
143 struct mmc_packed *packed = mqrq->packed;
144
145 BUG_ON(!packed);
146
147 mqrq->cmd_type = MMC_PACKED_NONE;
148 packed->nr_entries = MMC_PACKED_NR_ZERO;
149 packed->idx_failure = MMC_PACKED_NR_IDX;
150 packed->retries = 0;
151 packed->blocks = 0;
152}
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
155{
156 struct mmc_blk_data *md;
157
Arjan van de Vena621aae2006-01-12 18:43:35 +0000158 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 md = disk->private_data;
160 if (md && md->usage == 0)
161 md = NULL;
162 if (md)
163 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000164 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 return md;
167}
168
Andrei Warkentin371a6892011-04-11 18:10:25 -0500169static inline int mmc_get_devidx(struct gendisk *disk)
170{
Colin Cross382c55f2015-10-22 10:00:41 -0700171 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500172 return devidx;
173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static void mmc_blk_put(struct mmc_blk_data *md)
176{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000177 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 md->usage--;
179 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500180 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800181 blk_cleanup_queue(md->queue.queue);
182
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200183 spin_lock(&mmc_blk_lock);
184 ida_remove(&mmc_blk_ida, devidx);
185 spin_unlock(&mmc_blk_lock);
David Woodhouse1dff3142007-11-21 18:45:12 +0100186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 kfree(md);
189 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000190 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Johan Rudholmadd710e2011-12-02 08:51:06 +0100193static ssize_t power_ro_lock_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 int ret;
197 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
198 struct mmc_card *card = md->queue.card;
199 int locked = 0;
200
201 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
202 locked = 2;
203 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
204 locked = 1;
205
206 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
207
Tomas Winkler9098f842015-07-16 15:50:45 +0200208 mmc_blk_put(md);
209
Johan Rudholmadd710e2011-12-02 08:51:06 +0100210 return ret;
211}
212
213static ssize_t power_ro_lock_store(struct device *dev,
214 struct device_attribute *attr, const char *buf, size_t count)
215{
216 int ret;
217 struct mmc_blk_data *md, *part_md;
218 struct mmc_card *card;
219 unsigned long set;
220
221 if (kstrtoul(buf, 0, &set))
222 return -EINVAL;
223
224 if (set != 1)
225 return count;
226
227 md = mmc_blk_get(dev_to_disk(dev));
228 card = md->queue.card;
229
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200230 mmc_get_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100231
232 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
233 card->ext_csd.boot_ro_lock |
234 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
235 card->ext_csd.part_time);
236 if (ret)
237 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
238 else
239 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
240
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200241 mmc_put_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100242
243 if (!ret) {
244 pr_info("%s: Locking boot partition ro until next power on\n",
245 md->disk->disk_name);
246 set_disk_ro(md->disk, 1);
247
248 list_for_each_entry(part_md, &md->part, part)
249 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
250 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
251 set_disk_ro(part_md->disk, 1);
252 }
253 }
254
255 mmc_blk_put(md);
256 return count;
257}
258
Andrei Warkentin371a6892011-04-11 18:10:25 -0500259static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
260 char *buf)
261{
262 int ret;
263 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
264
Baruch Siach0031a982014-09-22 10:12:51 +0300265 ret = snprintf(buf, PAGE_SIZE, "%d\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -0500266 get_disk_ro(dev_to_disk(dev)) ^
267 md->read_only);
268 mmc_blk_put(md);
269 return ret;
270}
271
272static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
273 const char *buf, size_t count)
274{
275 int ret;
276 char *end;
277 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
278 unsigned long set = simple_strtoul(buf, &end, 0);
279 if (end == buf) {
280 ret = -EINVAL;
281 goto out;
282 }
283
284 set_disk_ro(dev_to_disk(dev), set || md->read_only);
285 ret = count;
286out:
287 mmc_blk_put(md);
288 return ret;
289}
290
Al Viroa5a15612008-03-02 10:33:30 -0500291static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Al Viroa5a15612008-03-02 10:33:30 -0500293 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 int ret = -ENXIO;
295
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200296 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 if (md) {
298 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500299 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700301
Al Viroa5a15612008-03-02 10:33:30 -0500302 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700303 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700304 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200307 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309 return ret;
310}
311
Al Virodb2a1442013-05-05 21:52:57 -0400312static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
Al Viroa5a15612008-03-02 10:33:30 -0500314 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200316 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200318 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319}
320
321static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800322mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800324 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
325 geo->heads = 4;
326 geo->sectors = 16;
327 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
John Calixtocb87ea22011-04-26 18:56:29 -0400330struct mmc_blk_ioc_data {
331 struct mmc_ioc_cmd ic;
332 unsigned char *buf;
333 u64 buf_bytes;
334};
335
336static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
337 struct mmc_ioc_cmd __user *user)
338{
339 struct mmc_blk_ioc_data *idata;
340 int err;
341
yalin wang1ff89502015-11-12 19:27:11 +0800342 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400343 if (!idata) {
344 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400345 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400346 }
347
348 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
349 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400350 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400351 }
352
353 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
354 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
355 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400356 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400357 }
358
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300359 if (!idata->buf_bytes) {
360 idata->buf = NULL;
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100361 return idata;
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300362 }
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100363
yalin wang1ff89502015-11-12 19:27:11 +0800364 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400365 if (!idata->buf) {
366 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400367 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400368 }
369
370 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
371 idata->ic.data_ptr, idata->buf_bytes)) {
372 err = -EFAULT;
373 goto copy_err;
374 }
375
376 return idata;
377
378copy_err:
379 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400380idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400381 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400382out:
John Calixtocb87ea22011-04-26 18:56:29 -0400383 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400384}
385
Jon Huntera5f57742015-09-22 10:27:53 +0100386static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
387 struct mmc_blk_ioc_data *idata)
388{
389 struct mmc_ioc_cmd *ic = &idata->ic;
390
391 if (copy_to_user(&(ic_ptr->response), ic->response,
392 sizeof(ic->response)))
393 return -EFAULT;
394
395 if (!idata->ic.write_flag) {
396 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
397 idata->buf, idata->buf_bytes))
398 return -EFAULT;
399 }
400
401 return 0;
402}
403
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200404static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
405 u32 retries_max)
406{
407 int err;
408 u32 retry_count = 0;
409
410 if (!status || !retries_max)
411 return -EINVAL;
412
413 do {
414 err = get_card_status(card, status, 5);
415 if (err)
416 break;
417
418 if (!R1_STATUS(*status) &&
419 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
420 break; /* RPMB programming operation complete */
421
422 /*
423 * Rechedule to give the MMC device a chance to continue
424 * processing the previous command without being polled too
425 * frequently.
426 */
427 usleep_range(1000, 5000);
428 } while (++retry_count < retries_max);
429
430 if (retry_count == retries_max)
431 err = -EPERM;
432
433 return err;
434}
435
Maya Erez775a9362013-04-18 15:41:55 +0300436static int ioctl_do_sanitize(struct mmc_card *card)
437{
438 int err;
439
Ulf Hanssona2d10862013-12-16 14:37:26 +0100440 if (!mmc_can_sanitize(card)) {
Maya Erez775a9362013-04-18 15:41:55 +0300441 pr_warn("%s: %s - SANITIZE is not supported\n",
442 mmc_hostname(card->host), __func__);
443 err = -EOPNOTSUPP;
444 goto out;
445 }
446
447 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
448 mmc_hostname(card->host), __func__);
449
450 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451 EXT_CSD_SANITIZE_START, 1,
452 MMC_SANITIZE_REQ_TIMEOUT);
453
454 if (err)
455 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
456 mmc_hostname(card->host), __func__, err);
457
458 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
459 __func__);
460out:
461 return err;
462}
463
Jon Huntera5f57742015-09-22 10:27:53 +0100464static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
465 struct mmc_blk_ioc_data *idata)
John Calixtocb87ea22011-04-26 18:56:29 -0400466{
John Calixtocb87ea22011-04-26 18:56:29 -0400467 struct mmc_command cmd = {0};
468 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530469 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400470 struct scatterlist sg;
471 int err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200472 int is_rpmb = false;
473 u32 status = 0;
John Calixtocb87ea22011-04-26 18:56:29 -0400474
Jon Huntera5f57742015-09-22 10:27:53 +0100475 if (!card || !md || !idata)
476 return -EINVAL;
John Calixtocb87ea22011-04-26 18:56:29 -0400477
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200478 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
479 is_rpmb = true;
480
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100481 cmd.opcode = idata->ic.opcode;
482 cmd.arg = idata->ic.arg;
483 cmd.flags = idata->ic.flags;
484
485 if (idata->buf_bytes) {
486 data.sg = &sg;
487 data.sg_len = 1;
488 data.blksz = idata->ic.blksz;
489 data.blocks = idata->ic.blocks;
490
491 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
492
493 if (idata->ic.write_flag)
494 data.flags = MMC_DATA_WRITE;
495 else
496 data.flags = MMC_DATA_READ;
497
498 /* data.flags must already be set before doing this. */
499 mmc_set_data_timeout(&data, card);
500
501 /* Allow overriding the timeout_ns for empirical tuning. */
502 if (idata->ic.data_timeout_ns)
503 data.timeout_ns = idata->ic.data_timeout_ns;
504
505 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
506 /*
507 * Pretend this is a data transfer and rely on the
508 * host driver to compute timeout. When all host
509 * drivers support cmd.cmd_timeout for R1B, this
510 * can be changed to:
511 *
512 * mrq.data = NULL;
513 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
514 */
515 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
516 }
517
518 mrq.data = &data;
519 }
520
521 mrq.cmd = &cmd;
522
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200523 err = mmc_blk_part_switch(card, md);
524 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100525 return err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200526
John Calixtocb87ea22011-04-26 18:56:29 -0400527 if (idata->ic.is_acmd) {
528 err = mmc_app_cmd(card->host, card);
529 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100530 return err;
John Calixtocb87ea22011-04-26 18:56:29 -0400531 }
532
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200533 if (is_rpmb) {
534 err = mmc_set_blockcount(card, data.blocks,
535 idata->ic.write_flag & (1 << 31));
536 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100537 return err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200538 }
539
Yaniv Gardia82e4842013-06-05 14:13:08 +0300540 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
541 (cmd.opcode == MMC_SWITCH)) {
Maya Erez775a9362013-04-18 15:41:55 +0300542 err = ioctl_do_sanitize(card);
543
544 if (err)
545 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
546 __func__, err);
547
Jon Huntera5f57742015-09-22 10:27:53 +0100548 return err;
Maya Erez775a9362013-04-18 15:41:55 +0300549 }
550
John Calixtocb87ea22011-04-26 18:56:29 -0400551 mmc_wait_for_req(card->host, &mrq);
552
553 if (cmd.error) {
554 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
555 __func__, cmd.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100556 return cmd.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400557 }
558 if (data.error) {
559 dev_err(mmc_dev(card->host), "%s: data error %d\n",
560 __func__, data.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100561 return data.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400562 }
563
564 /*
565 * According to the SD specs, some commands require a delay after
566 * issuing the command.
567 */
568 if (idata->ic.postsleep_min_us)
569 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
570
Jon Huntera5f57742015-09-22 10:27:53 +0100571 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
John Calixtocb87ea22011-04-26 18:56:29 -0400572
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200573 if (is_rpmb) {
574 /*
575 * Ensure RPMB command has completed by polling CMD13
576 * "Send Status".
577 */
578 err = ioctl_rpmb_card_status_poll(card, &status, 5);
579 if (err)
580 dev_err(mmc_dev(card->host),
581 "%s: Card Status=0x%08X, error %d\n",
582 __func__, status, err);
583 }
584
Jon Huntera5f57742015-09-22 10:27:53 +0100585 return err;
586}
587
588static int mmc_blk_ioctl_cmd(struct block_device *bdev,
589 struct mmc_ioc_cmd __user *ic_ptr)
590{
591 struct mmc_blk_ioc_data *idata;
592 struct mmc_blk_data *md;
593 struct mmc_card *card;
Grant Grundlerb0934102015-09-23 18:30:33 -0700594 int err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +0100595
Shawn Lin83c742c2016-03-16 18:15:47 +0800596 /*
597 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
598 * whole block device, not on a partition. This prevents overspray
599 * between sibling partitions.
600 */
601 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
602 return -EPERM;
603
Jon Huntera5f57742015-09-22 10:27:53 +0100604 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
605 if (IS_ERR(idata))
606 return PTR_ERR(idata);
607
608 md = mmc_blk_get(bdev->bd_disk);
609 if (!md) {
610 err = -EINVAL;
611 goto cmd_err;
612 }
613
614 card = md->queue.card;
615 if (IS_ERR(card)) {
616 err = PTR_ERR(card);
617 goto cmd_done;
618 }
619
620 mmc_get_card(card);
621
Grant Grundlerb0934102015-09-23 18:30:33 -0700622 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
Jon Huntera5f57742015-09-22 10:27:53 +0100623
Adrian Hunter3c866562016-05-04 14:38:12 +0300624 /* Always switch back to main area after RPMB access */
625 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
626 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
627
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200628 mmc_put_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -0400629
Grant Grundlerb0934102015-09-23 18:30:33 -0700630 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
Jon Huntera5f57742015-09-22 10:27:53 +0100631
John Calixtocb87ea22011-04-26 18:56:29 -0400632cmd_done:
633 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +0300634cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400635 kfree(idata->buf);
636 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -0700637 return ioc_err ? ioc_err : err;
John Calixtocb87ea22011-04-26 18:56:29 -0400638}
639
Jon Huntera5f57742015-09-22 10:27:53 +0100640static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
641 struct mmc_ioc_multi_cmd __user *user)
642{
643 struct mmc_blk_ioc_data **idata = NULL;
644 struct mmc_ioc_cmd __user *cmds = user->cmds;
645 struct mmc_card *card;
646 struct mmc_blk_data *md;
Grant Grundlerb0934102015-09-23 18:30:33 -0700647 int i, err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +0100648 __u64 num_of_cmds;
649
Shawn Lin83c742c2016-03-16 18:15:47 +0800650 /*
651 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
652 * whole block device, not on a partition. This prevents overspray
653 * between sibling partitions.
654 */
655 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
656 return -EPERM;
657
Jon Huntera5f57742015-09-22 10:27:53 +0100658 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
659 sizeof(num_of_cmds)))
660 return -EFAULT;
661
662 if (num_of_cmds > MMC_IOC_MAX_CMDS)
663 return -EINVAL;
664
665 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
666 if (!idata)
667 return -ENOMEM;
668
669 for (i = 0; i < num_of_cmds; i++) {
670 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
671 if (IS_ERR(idata[i])) {
672 err = PTR_ERR(idata[i]);
673 num_of_cmds = i;
674 goto cmd_err;
675 }
676 }
677
678 md = mmc_blk_get(bdev->bd_disk);
Olof Johanssonf00ab142016-02-09 09:34:30 -0800679 if (!md) {
680 err = -EINVAL;
Jon Huntera5f57742015-09-22 10:27:53 +0100681 goto cmd_err;
Olof Johanssonf00ab142016-02-09 09:34:30 -0800682 }
Jon Huntera5f57742015-09-22 10:27:53 +0100683
684 card = md->queue.card;
685 if (IS_ERR(card)) {
686 err = PTR_ERR(card);
687 goto cmd_done;
688 }
689
690 mmc_get_card(card);
691
Grant Grundlerb0934102015-09-23 18:30:33 -0700692 for (i = 0; i < num_of_cmds && !ioc_err; i++)
693 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +0100694
Adrian Hunter3c866562016-05-04 14:38:12 +0300695 /* Always switch back to main area after RPMB access */
696 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
697 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
698
Jon Huntera5f57742015-09-22 10:27:53 +0100699 mmc_put_card(card);
700
701 /* copy to user if data and response */
Grant Grundlerb0934102015-09-23 18:30:33 -0700702 for (i = 0; i < num_of_cmds && !err; i++)
Jon Huntera5f57742015-09-22 10:27:53 +0100703 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +0100704
705cmd_done:
706 mmc_blk_put(md);
707cmd_err:
708 for (i = 0; i < num_of_cmds; i++) {
709 kfree(idata[i]->buf);
710 kfree(idata[i]);
711 }
712 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -0700713 return ioc_err ? ioc_err : err;
Jon Huntera5f57742015-09-22 10:27:53 +0100714}
715
John Calixtocb87ea22011-04-26 18:56:29 -0400716static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
717 unsigned int cmd, unsigned long arg)
718{
Jon Huntera5f57742015-09-22 10:27:53 +0100719 switch (cmd) {
720 case MMC_IOC_CMD:
721 return mmc_blk_ioctl_cmd(bdev,
722 (struct mmc_ioc_cmd __user *)arg);
723 case MMC_IOC_MULTI_CMD:
724 return mmc_blk_ioctl_multi_cmd(bdev,
725 (struct mmc_ioc_multi_cmd __user *)arg);
726 default:
727 return -EINVAL;
728 }
John Calixtocb87ea22011-04-26 18:56:29 -0400729}
730
731#ifdef CONFIG_COMPAT
732static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
733 unsigned int cmd, unsigned long arg)
734{
735 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
736}
737#endif
738
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700739static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -0500740 .open = mmc_blk_open,
741 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800742 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -0400744 .ioctl = mmc_blk_ioctl,
745#ifdef CONFIG_COMPAT
746 .compat_ioctl = mmc_blk_compat_ioctl,
747#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748};
749
Andrei Warkentin371a6892011-04-11 18:10:25 -0500750static inline int mmc_blk_part_switch(struct mmc_card *card,
751 struct mmc_blk_data *md)
752{
753 int ret;
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200754 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300755
Andrei Warkentin371a6892011-04-11 18:10:25 -0500756 if (main_md->part_curr == md->part_type)
757 return 0;
758
759 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300760 u8 part_config = card->ext_csd.part_config;
761
Adrian Hunter57da0c02016-05-04 14:38:13 +0300762 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
763 mmc_retune_pause(card->host);
764
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300765 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
766 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500767
768 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300769 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -0500770 card->ext_csd.part_time);
Adrian Hunter57da0c02016-05-04 14:38:13 +0300771 if (ret) {
772 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
773 mmc_retune_unpause(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -0500774 return ret;
Adrian Hunter57da0c02016-05-04 14:38:13 +0300775 }
Adrian Hunter0d7d85c2011-09-23 12:48:20 +0300776
777 card->ext_csd.part_config = part_config;
Adrian Hunter57da0c02016-05-04 14:38:13 +0300778
779 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
780 mmc_retune_unpause(card->host);
Adrian Hunter67716322011-08-29 16:42:15 +0300781 }
Andrei Warkentin371a6892011-04-11 18:10:25 -0500782
783 main_md->part_curr = md->part_type;
784 return 0;
785}
786
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700787static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
788{
789 int err;
Ben Dooks051913d2009-06-08 23:33:57 +0100790 u32 result;
791 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700792
Venkatraman Sad5fd972011-08-25 00:30:50 +0530793 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -0400794 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400795 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700796
797 struct scatterlist sg;
798
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700799 cmd.opcode = MMC_APP_CMD;
800 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -0700801 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700802
803 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -0700804 if (err)
805 return (u32)-1;
806 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700807 return (u32)-1;
808
809 memset(&cmd, 0, sizeof(struct mmc_command));
810
811 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
812 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700813 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700814
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700815 data.blksz = 4;
816 data.blocks = 1;
817 data.flags = MMC_DATA_READ;
818 data.sg = &sg;
819 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +0530820 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700821
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700822 mrq.cmd = &cmd;
823 mrq.data = &data;
824
Ben Dooks051913d2009-06-08 23:33:57 +0100825 blocks = kmalloc(4, GFP_KERNEL);
826 if (!blocks)
827 return (u32)-1;
828
829 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700830
831 mmc_wait_for_req(card->host, &mrq);
832
Ben Dooks051913d2009-06-08 23:33:57 +0100833 result = ntohl(*blocks);
834 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700835
Ben Dooks051913d2009-06-08 23:33:57 +0100836 if (cmd.error || data.error)
837 result = (u32)-1;
838
839 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700840}
841
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +0100842static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +0300843{
Chris Ball1278dba2011-04-13 23:40:30 -0400844 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +0300845 int err;
846
Adrian Hunter504f1912008-10-16 12:55:25 +0300847 cmd.opcode = MMC_SEND_STATUS;
848 if (!mmc_host_is_spi(card->host))
849 cmd.arg = card->rca << 16;
850 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +0100851 err = mmc_wait_for_cmd(card->host, &cmd, retries);
852 if (err == 0)
853 *status = cmd.resp[0];
854 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +0300855}
856
Ulf Hanssonc49433f2014-01-29 11:01:55 +0100857static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
Ulf Hansson95a91292014-01-29 13:11:27 +0100858 bool hw_busy_detect, struct request *req, int *gen_err)
Ulf Hanssonc49433f2014-01-29 11:01:55 +0100859{
860 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
861 int err = 0;
862 u32 status;
863
864 do {
865 err = get_card_status(card, &status, 5);
866 if (err) {
867 pr_err("%s: error %d requesting status\n",
868 req->rq_disk->disk_name, err);
869 return err;
870 }
871
872 if (status & R1_ERROR) {
873 pr_err("%s: %s: error sending status cmd, status %#x\n",
874 req->rq_disk->disk_name, __func__, status);
875 *gen_err = 1;
876 }
877
Ulf Hansson95a91292014-01-29 13:11:27 +0100878 /* We may rely on the host hw to handle busy detection.*/
879 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
880 hw_busy_detect)
881 break;
882
Ulf Hanssonc49433f2014-01-29 11:01:55 +0100883 /*
884 * Timeout if the device never becomes ready for data and never
885 * leaves the program state.
886 */
887 if (time_after(jiffies, timeout)) {
888 pr_err("%s: Card stuck in programming state! %s %s\n",
889 mmc_hostname(card->host),
890 req->rq_disk->disk_name, __func__);
891 return -ETIMEDOUT;
892 }
893
894 /*
895 * Some cards mishandle the status bits,
896 * so make sure to check both the busy
897 * indication and the card state.
898 */
899 } while (!(status & R1_READY_FOR_DATA) ||
900 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
901
902 return err;
903}
904
Ulf Hanssonbb5cba42014-01-14 21:31:35 +0100905static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
906 struct request *req, int *gen_err, u32 *stop_status)
907{
908 struct mmc_host *host = card->host;
909 struct mmc_command cmd = {0};
910 int err;
911 bool use_r1b_resp = rq_data_dir(req) == WRITE;
912
913 /*
914 * Normally we use R1B responses for WRITE, but in cases where the host
915 * has specified a max_busy_timeout we need to validate it. A failure
916 * means we need to prevent the host from doing hw busy detection, which
917 * is done by converting to a R1 response instead.
918 */
919 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
920 use_r1b_resp = false;
921
922 cmd.opcode = MMC_STOP_TRANSMISSION;
923 if (use_r1b_resp) {
924 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
925 cmd.busy_timeout = timeout_ms;
926 } else {
927 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
928 }
929
930 err = mmc_wait_for_cmd(host, &cmd, 5);
931 if (err)
932 return err;
933
934 *stop_status = cmd.resp[0];
935
936 /* No need to check card status in case of READ. */
937 if (rq_data_dir(req) == READ)
938 return 0;
939
940 if (!mmc_host_is_spi(host) &&
941 (*stop_status & R1_ERROR)) {
942 pr_err("%s: %s: general error sending stop command, resp %#x\n",
943 req->rq_disk->disk_name, __func__, *stop_status);
944 *gen_err = 1;
945 }
946
947 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
948}
949
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +0530950#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100951#define ERR_RETRY 2
952#define ERR_ABORT 1
953#define ERR_CONTINUE 0
954
955static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
956 bool status_valid, u32 status)
957{
958 switch (error) {
959 case -EILSEQ:
960 /* response crc error, retry the r/w cmd */
961 pr_err("%s: %s sending %s command, card status %#x\n",
962 req->rq_disk->disk_name, "response CRC error",
963 name, status);
964 return ERR_RETRY;
965
966 case -ETIMEDOUT:
967 pr_err("%s: %s sending %s command, card status %#x\n",
968 req->rq_disk->disk_name, "timed out", name, status);
969
970 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +0530971 if (!status_valid) {
972 pr_err("%s: status not valid, retrying timeout\n",
973 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100974 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +0530975 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100976
977 /*
978 * If it was a r/w cmd crc error, or illegal command
979 * (eg, issued in wrong state) then retry - we should
980 * have corrected the state problem above.
981 */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +0530982 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
983 pr_err("%s: command error, retrying timeout\n",
984 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100985 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +0530986 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +0100987
988 /* Otherwise abort the command */
989 return ERR_ABORT;
990
991 default:
992 /* We don't understand the error code the driver gave us */
993 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
994 req->rq_disk->disk_name, error, status);
995 return ERR_ABORT;
996 }
997}
998
999/*
1000 * Initial r/w and stop cmd error recovery.
1001 * We don't know whether the card received the r/w cmd or not, so try to
1002 * restore things back to a sane state. Essentially, we do this as follows:
1003 * - Obtain card status. If the first attempt to obtain card status fails,
1004 * the status word will reflect the failed status cmd, not the failed
1005 * r/w cmd. If we fail to obtain card status, it suggests we can no
1006 * longer communicate with the card.
1007 * - Check the card state. If the card received the cmd but there was a
1008 * transient problem with the response, it might still be in a data transfer
1009 * mode. Try to send it a stop command. If this fails, we can't recover.
1010 * - If the r/w cmd failed due to a response CRC error, it was probably
1011 * transient, so retry the cmd.
1012 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1013 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1014 * illegal cmd, retry.
1015 * Otherwise we don't understand what happened, so abort.
1016 */
1017static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001018 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001019{
1020 bool prev_cmd_status_valid = true;
1021 u32 status, stop_status = 0;
1022 int err, retry;
1023
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301024 if (mmc_card_removed(card))
1025 return ERR_NOMEDIUM;
1026
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001027 /*
1028 * Try to get card status which indicates both the card state
1029 * and why there was no response. If the first attempt fails,
1030 * we can't be sure the returned status is for the r/w command.
1031 */
1032 for (retry = 2; retry >= 0; retry--) {
1033 err = get_card_status(card, &status, 0);
1034 if (!err)
1035 break;
1036
Adrian Hunter6f398ad2015-05-07 13:10:23 +03001037 /* Re-tune if needed */
1038 mmc_retune_recheck(card->host);
1039
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001040 prev_cmd_status_valid = false;
1041 pr_err("%s: error %d sending status command, %sing\n",
1042 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1043 }
1044
1045 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301046 if (err) {
1047 /* Check if the card is removed */
1048 if (mmc_detect_card_removed(card->host))
1049 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001050 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301051 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001052
Adrian Hunter67716322011-08-29 16:42:15 +03001053 /* Flag ECC errors */
1054 if ((status & R1_CARD_ECC_FAILED) ||
1055 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1056 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1057 *ecc_err = 1;
1058
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001059 /* Flag General errors */
1060 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1061 if ((status & R1_ERROR) ||
1062 (brq->stop.resp[0] & R1_ERROR)) {
1063 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1064 req->rq_disk->disk_name, __func__,
1065 brq->stop.resp[0], status);
1066 *gen_err = 1;
1067 }
1068
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001069 /*
1070 * Check the current card state. If it is in some data transfer
1071 * mode, tell it to stop (and hopefully transition back to TRAN.)
1072 */
1073 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1074 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001075 err = send_stop(card,
1076 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1077 req, gen_err, &stop_status);
1078 if (err) {
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001079 pr_err("%s: error %d sending stop command\n",
1080 req->rq_disk->disk_name, err);
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001081 /*
1082 * If the stop cmd also timed out, the card is probably
1083 * not present, so abort. Other errors are bad news too.
1084 */
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001085 return ERR_ABORT;
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001086 }
1087
Adrian Hunter67716322011-08-29 16:42:15 +03001088 if (stop_status & R1_CARD_ECC_FAILED)
1089 *ecc_err = 1;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001090 }
1091
1092 /* Check for set block count errors */
1093 if (brq->sbc.error)
1094 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1095 prev_cmd_status_valid, status);
1096
1097 /* Check for r/w command errors */
1098 if (brq->cmd.error)
1099 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1100 prev_cmd_status_valid, status);
1101
Adrian Hunter67716322011-08-29 16:42:15 +03001102 /* Data errors */
1103 if (!brq->stop.error)
1104 return ERR_CONTINUE;
1105
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001106 /* Now for stop errors. These aren't fatal to the transfer. */
Johan Rudholm5e1344e2014-09-17 09:50:42 +02001107 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001108 req->rq_disk->disk_name, brq->stop.error,
1109 brq->cmd.resp[0], status);
1110
1111 /*
1112 * Subsitute in our own stop status as this will give the error
1113 * state which happened during the execution of the r/w command.
1114 */
1115 if (stop_status) {
1116 brq->stop.resp[0] = stop_status;
1117 brq->stop.error = 0;
1118 }
1119 return ERR_CONTINUE;
1120}
1121
Adrian Hunter67716322011-08-29 16:42:15 +03001122static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1123 int type)
1124{
1125 int err;
1126
1127 if (md->reset_done & type)
1128 return -EEXIST;
1129
1130 md->reset_done |= type;
1131 err = mmc_hw_reset(host);
1132 /* Ensure we switch back to the correct partition */
1133 if (err != -EOPNOTSUPP) {
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001134 struct mmc_blk_data *main_md =
1135 dev_get_drvdata(&host->card->dev);
Adrian Hunter67716322011-08-29 16:42:15 +03001136 int part_err;
1137
1138 main_md->part_curr = main_md->part_type;
1139 part_err = mmc_blk_part_switch(host->card, md);
1140 if (part_err) {
1141 /*
1142 * We have failed to get back into the correct
1143 * partition, so we need to abort the whole request.
1144 */
1145 return -ENODEV;
1146 }
1147 }
1148 return err;
1149}
1150
1151static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1152{
1153 md->reset_done &= ~type;
1154}
1155
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +08001156int mmc_access_rpmb(struct mmc_queue *mq)
1157{
1158 struct mmc_blk_data *md = mq->data;
1159 /*
1160 * If this is a RPMB partition access, return ture
1161 */
1162 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1163 return true;
1164
1165 return false;
1166}
1167
Adrian Hunterbd788c92010-08-11 14:17:47 -07001168static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1169{
1170 struct mmc_blk_data *md = mq->data;
1171 struct mmc_card *card = md->queue.card;
1172 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001173 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001174
Adrian Hunterbd788c92010-08-11 14:17:47 -07001175 if (!mmc_can_erase(card)) {
1176 err = -EOPNOTSUPP;
1177 goto out;
1178 }
1179
1180 from = blk_rq_pos(req);
1181 nr = blk_rq_sectors(req);
1182
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001183 if (mmc_can_discard(card))
1184 arg = MMC_DISCARD_ARG;
1185 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -07001186 arg = MMC_TRIM_ARG;
1187 else
1188 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +03001189retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001190 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1191 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1192 INAND_CMD38_ARG_EXT_CSD,
1193 arg == MMC_TRIM_ARG ?
1194 INAND_CMD38_ARG_TRIM :
1195 INAND_CMD38_ARG_ERASE,
1196 0);
1197 if (err)
1198 goto out;
1199 }
Adrian Hunterbd788c92010-08-11 14:17:47 -07001200 err = mmc_erase(card, from, nr, arg);
1201out:
Adrian Hunter67716322011-08-29 16:42:15 +03001202 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1203 goto retry;
1204 if (!err)
1205 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301206 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -07001207
Adrian Hunterbd788c92010-08-11 14:17:47 -07001208 return err ? 0 : 1;
1209}
1210
Adrian Hunter49804542010-08-11 14:17:50 -07001211static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1212 struct request *req)
1213{
1214 struct mmc_blk_data *md = mq->data;
1215 struct mmc_card *card = md->queue.card;
Maya Erez775a9362013-04-18 15:41:55 +03001216 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001217 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -07001218
Maya Erez775a9362013-04-18 15:41:55 +03001219 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -07001220 err = -EOPNOTSUPP;
1221 goto out;
1222 }
1223
1224 from = blk_rq_pos(req);
1225 nr = blk_rq_sectors(req);
1226
Maya Erez775a9362013-04-18 15:41:55 +03001227 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1228 arg = MMC_SECURE_TRIM1_ARG;
1229 else
1230 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter28302812012-04-05 14:45:48 +03001231
Adrian Hunter67716322011-08-29 16:42:15 +03001232retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001233 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1234 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1235 INAND_CMD38_ARG_EXT_CSD,
1236 arg == MMC_SECURE_TRIM1_ARG ?
1237 INAND_CMD38_ARG_SECTRIM1 :
1238 INAND_CMD38_ARG_SECERASE,
1239 0);
1240 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001241 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001242 }
Adrian Hunter28302812012-04-05 14:45:48 +03001243
Adrian Hunter49804542010-08-11 14:17:50 -07001244 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +03001245 if (err == -EIO)
1246 goto out_retry;
1247 if (err)
1248 goto out;
1249
1250 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001251 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1252 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1253 INAND_CMD38_ARG_EXT_CSD,
1254 INAND_CMD38_ARG_SECTRIM2,
1255 0);
1256 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001257 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001258 }
Adrian Hunter28302812012-04-05 14:45:48 +03001259
Adrian Hunter49804542010-08-11 14:17:50 -07001260 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +03001261 if (err == -EIO)
1262 goto out_retry;
1263 if (err)
1264 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001265 }
Adrian Hunter28302812012-04-05 14:45:48 +03001266
Adrian Hunter28302812012-04-05 14:45:48 +03001267out_retry:
1268 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03001269 goto retry;
1270 if (!err)
1271 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03001272out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301273 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07001274
Adrian Hunter49804542010-08-11 14:17:50 -07001275 return err ? 0 : 1;
1276}
1277
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001278static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1279{
1280 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001281 struct mmc_card *card = md->queue.card;
1282 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001283
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001284 ret = mmc_flush_cache(card);
1285 if (ret)
1286 ret = -EIO;
1287
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301288 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001289
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001290 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001291}
1292
1293/*
1294 * Reformat current write as a reliable write, supporting
1295 * both legacy and the enhanced reliable write MMC cards.
1296 * In each transfer we'll handle only as much as a single
1297 * reliable write can handle, thus finish the request in
1298 * partial completions.
1299 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001300static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1301 struct mmc_card *card,
1302 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001303{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001304 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1305 /* Legacy mode imposes restrictions on transfers. */
1306 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1307 brq->data.blocks = 1;
1308
1309 if (brq->data.blocks > card->ext_csd.rel_sectors)
1310 brq->data.blocks = card->ext_csd.rel_sectors;
1311 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1312 brq->data.blocks = 1;
1313 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001314}
1315
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001316#define CMD_ERRORS \
1317 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1318 R1_ADDRESS_ERROR | /* Misaligned address */ \
1319 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1320 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1321 R1_CC_ERROR | /* Card controller error */ \
1322 R1_ERROR) /* General/unknown error */
1323
Per Forlinee8a43a2011-07-01 18:55:33 +02001324static int mmc_blk_err_check(struct mmc_card *card,
1325 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02001326{
Per Forlinee8a43a2011-07-01 18:55:33 +02001327 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1328 mmc_active);
1329 struct mmc_blk_request *brq = &mq_mrq->brq;
1330 struct request *req = mq_mrq->req;
Adrian Hunterb8360a42015-05-07 13:10:24 +03001331 int need_retune = card->host->need_retune;
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001332 int ecc_err = 0, gen_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02001333
1334 /*
1335 * sbc.error indicates a problem with the set block count
1336 * command. No data will have been transferred.
1337 *
1338 * cmd.error indicates a problem with the r/w command. No
1339 * data will have been transferred.
1340 *
1341 * stop.error indicates a problem with the stop command. Data
1342 * may have been transferred, or may still be transferring.
1343 */
Adrian Hunter67716322011-08-29 16:42:15 +03001344 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1345 brq->data.error) {
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001346 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02001347 case ERR_RETRY:
1348 return MMC_BLK_RETRY;
1349 case ERR_ABORT:
1350 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301351 case ERR_NOMEDIUM:
1352 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02001353 case ERR_CONTINUE:
1354 break;
1355 }
1356 }
1357
1358 /*
1359 * Check for errors relating to the execution of the
1360 * initial command - such as address errors. No data
1361 * has been transferred.
1362 */
1363 if (brq->cmd.resp[0] & CMD_ERRORS) {
1364 pr_err("%s: r/w command failed, status = %#x\n",
1365 req->rq_disk->disk_name, brq->cmd.resp[0]);
1366 return MMC_BLK_ABORT;
1367 }
1368
1369 /*
1370 * Everything else is either success, or a data error of some
1371 * kind. If it was a write, we may have transitioned to
1372 * program mode, which we have to wait for it to complete.
1373 */
1374 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001375 int err;
Trey Ramsay8fee4762012-11-16 09:31:41 -06001376
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001377 /* Check stop command response */
1378 if (brq->stop.resp[0] & R1_ERROR) {
1379 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1380 req->rq_disk->disk_name, __func__,
1381 brq->stop.resp[0]);
1382 gen_err = 1;
1383 }
1384
Ulf Hansson95a91292014-01-29 13:11:27 +01001385 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1386 &gen_err);
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001387 if (err)
1388 return MMC_BLK_CMD_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001389 }
1390
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001391 /* if general error occurs, retry the write operation. */
1392 if (gen_err) {
1393 pr_warn("%s: retrying write for general error\n",
1394 req->rq_disk->disk_name);
1395 return MMC_BLK_RETRY;
1396 }
1397
Per Forlind78d4a82011-07-01 18:55:30 +02001398 if (brq->data.error) {
Adrian Hunterb8360a42015-05-07 13:10:24 +03001399 if (need_retune && !brq->retune_retry_done) {
Russell King09faf612016-01-29 09:44:00 +00001400 pr_debug("%s: retrying because a re-tune was needed\n",
1401 req->rq_disk->disk_name);
Adrian Hunterb8360a42015-05-07 13:10:24 +03001402 brq->retune_retry_done = 1;
1403 return MMC_BLK_RETRY;
1404 }
Per Forlind78d4a82011-07-01 18:55:30 +02001405 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1406 req->rq_disk->disk_name, brq->data.error,
1407 (unsigned)blk_rq_pos(req),
1408 (unsigned)blk_rq_sectors(req),
1409 brq->cmd.resp[0], brq->stop.resp[0]);
1410
1411 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03001412 if (ecc_err)
1413 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001414 return MMC_BLK_DATA_ERR;
1415 } else {
1416 return MMC_BLK_CMD_ERR;
1417 }
1418 }
1419
Adrian Hunter67716322011-08-29 16:42:15 +03001420 if (!brq->data.bytes_xfered)
1421 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02001422
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001423 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1424 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1425 return MMC_BLK_PARTIAL;
1426 else
1427 return MMC_BLK_SUCCESS;
1428 }
1429
Adrian Hunter67716322011-08-29 16:42:15 +03001430 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1431 return MMC_BLK_PARTIAL;
1432
1433 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02001434}
1435
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001436static int mmc_blk_packed_err_check(struct mmc_card *card,
1437 struct mmc_async_req *areq)
1438{
1439 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1440 mmc_active);
1441 struct request *req = mq_rq->req;
1442 struct mmc_packed *packed = mq_rq->packed;
1443 int err, check, status;
1444 u8 *ext_csd;
1445
1446 BUG_ON(!packed);
1447
1448 packed->retries--;
1449 check = mmc_blk_err_check(card, areq);
1450 err = get_card_status(card, &status, 0);
1451 if (err) {
1452 pr_err("%s: error %d sending status command\n",
1453 req->rq_disk->disk_name, err);
1454 return MMC_BLK_ABORT;
1455 }
1456
1457 if (status & R1_EXCEPTION_EVENT) {
Ulf Hansson86817ff2014-10-17 11:39:05 +02001458 err = mmc_get_ext_csd(card, &ext_csd);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001459 if (err) {
1460 pr_err("%s: error %d sending ext_csd\n",
1461 req->rq_disk->disk_name, err);
Ulf Hansson86817ff2014-10-17 11:39:05 +02001462 return MMC_BLK_ABORT;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001463 }
1464
1465 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1466 EXT_CSD_PACKED_FAILURE) &&
1467 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1468 EXT_CSD_PACKED_GENERIC_ERROR)) {
1469 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1470 EXT_CSD_PACKED_INDEXED_ERROR) {
1471 packed->idx_failure =
1472 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1473 check = MMC_BLK_PARTIAL;
1474 }
1475 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1476 "failure index: %d\n",
1477 req->rq_disk->disk_name, packed->nr_entries,
1478 packed->blocks, packed->idx_failure);
1479 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001480 kfree(ext_csd);
1481 }
1482
1483 return check;
1484}
1485
Per Forlin54d49d72011-07-01 18:55:29 +02001486static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1487 struct mmc_card *card,
1488 int disable_multi,
1489 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490{
Per Forlin54d49d72011-07-01 18:55:29 +02001491 u32 readcmd, writecmd;
1492 struct mmc_blk_request *brq = &mqrq->brq;
1493 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05301495 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001497 /*
1498 * Reliable writes are used to implement Forced Unit Access and
Luca Porziod3df0462015-11-06 15:12:26 +00001499 * are supported only on MMCs.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001500 */
Luca Porziod3df0462015-11-06 15:12:26 +00001501 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001502 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001503 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001504
Per Forlin54d49d72011-07-01 18:55:29 +02001505 memset(brq, 0, sizeof(struct mmc_blk_request));
1506 brq->mrq.cmd = &brq->cmd;
1507 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Per Forlin54d49d72011-07-01 18:55:29 +02001509 brq->cmd.arg = blk_rq_pos(req);
1510 if (!mmc_card_blockaddr(card))
1511 brq->cmd.arg <<= 9;
1512 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1513 brq->data.blksz = 512;
1514 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1515 brq->stop.arg = 0;
Per Forlin54d49d72011-07-01 18:55:29 +02001516 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Per Forlin54d49d72011-07-01 18:55:29 +02001518 /*
1519 * The block layer doesn't support all sector count
1520 * restrictions, so we need to be prepared for too big
1521 * requests.
1522 */
1523 if (brq->data.blocks > card->host->max_blk_count)
1524 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Paul Walmsley2bf22b32011-10-06 14:50:33 -06001526 if (brq->data.blocks > 1) {
1527 /*
1528 * After a read error, we redo the request one sector
1529 * at a time in order to accurately determine which
1530 * sectors can be read successfully.
1531 */
1532 if (disable_multi)
1533 brq->data.blocks = 1;
1534
Kuninori Morimoto2e47e842014-09-02 19:08:53 -07001535 /*
1536 * Some controllers have HW issues while operating
1537 * in multiple I/O mode
1538 */
1539 if (card->host->ops->multi_io_quirk)
1540 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1541 (rq_data_dir(req) == READ) ?
1542 MMC_DATA_READ : MMC_DATA_WRITE,
1543 brq->data.blocks);
Paul Walmsley2bf22b32011-10-06 14:50:33 -06001544 }
Per Forlin54d49d72011-07-01 18:55:29 +02001545
1546 if (brq->data.blocks > 1 || do_rel_wr) {
1547 /* SPI multiblock writes terminate using a special
1548 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02001549 */
Per Forlin54d49d72011-07-01 18:55:29 +02001550 if (!mmc_host_is_spi(card->host) ||
1551 rq_data_dir(req) == READ)
1552 brq->mrq.stop = &brq->stop;
1553 readcmd = MMC_READ_MULTIPLE_BLOCK;
1554 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1555 } else {
1556 brq->mrq.stop = NULL;
1557 readcmd = MMC_READ_SINGLE_BLOCK;
1558 writecmd = MMC_WRITE_BLOCK;
1559 }
1560 if (rq_data_dir(req) == READ) {
1561 brq->cmd.opcode = readcmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09001562 brq->data.flags = MMC_DATA_READ;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01001563 if (brq->mrq.stop)
1564 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1565 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02001566 } else {
1567 brq->cmd.opcode = writecmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09001568 brq->data.flags = MMC_DATA_WRITE;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01001569 if (brq->mrq.stop)
1570 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1571 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02001572 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02001573
Per Forlin54d49d72011-07-01 18:55:29 +02001574 if (do_rel_wr)
1575 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01001576
Per Forlin54d49d72011-07-01 18:55:29 +02001577 /*
Saugata Das42659002011-12-21 13:09:17 +05301578 * Data tag is used only during writing meta data to speed
1579 * up write and any subsequent read of this meta data
1580 */
1581 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1582 (req->cmd_flags & REQ_META) &&
1583 (rq_data_dir(req) == WRITE) &&
1584 ((brq->data.blocks * brq->data.blksz) >=
1585 card->ext_csd.data_tag_unit_size);
1586
1587 /*
Per Forlin54d49d72011-07-01 18:55:29 +02001588 * Pre-defined multi-block transfers are preferable to
1589 * open ended-ones (and necessary for reliable writes).
1590 * However, it is not sufficient to just send CMD23,
1591 * and avoid the final CMD12, as on an error condition
1592 * CMD12 (stop) needs to be sent anyway. This, coupled
1593 * with Auto-CMD23 enhancements provided by some
1594 * hosts, means that the complexity of dealing
1595 * with this is best left to the host. If CMD23 is
1596 * supported by card and host, we'll fill sbc in and let
1597 * the host deal with handling it correctly. This means
1598 * that for hosts that don't expose MMC_CAP_CMD23, no
1599 * change of behavior will be observed.
1600 *
1601 * N.B: Some MMC cards experience perf degradation.
1602 * We'll avoid using CMD23-bounded multiblock writes for
1603 * these, while retaining features like reliable writes.
1604 */
Saugata Das42659002011-12-21 13:09:17 +05301605 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1606 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1607 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02001608 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1609 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05301610 (do_rel_wr ? (1 << 31) : 0) |
1611 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02001612 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1613 brq->mrq.sbc = &brq->sbc;
1614 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001615
Per Forlin54d49d72011-07-01 18:55:29 +02001616 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001617
Per Forlin54d49d72011-07-01 18:55:29 +02001618 brq->data.sg = mqrq->sg;
1619 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001620
Per Forlin54d49d72011-07-01 18:55:29 +02001621 /*
1622 * Adjust the sg list so it is the same size as the
1623 * request.
1624 */
1625 if (brq->data.blocks != blk_rq_sectors(req)) {
1626 int i, data_size = brq->data.blocks << 9;
1627 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02001628
Per Forlin54d49d72011-07-01 18:55:29 +02001629 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1630 data_size -= sg->length;
1631 if (data_size <= 0) {
1632 sg->length += data_size;
1633 i++;
1634 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01001635 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001636 }
Per Forlin54d49d72011-07-01 18:55:29 +02001637 brq->data.sg_len = i;
1638 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01001639
Per Forlinee8a43a2011-07-01 18:55:33 +02001640 mqrq->mmc_active.mrq = &brq->mrq;
1641 mqrq->mmc_active.err_check = mmc_blk_err_check;
1642
Per Forlin54d49d72011-07-01 18:55:29 +02001643 mmc_queue_bounce_pre(mqrq);
1644}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001646static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1647 struct mmc_card *card)
1648{
1649 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1650 unsigned int max_seg_sz = queue_max_segment_size(q);
1651 unsigned int len, nr_segs = 0;
1652
1653 do {
1654 len = min(hdr_sz, max_seg_sz);
1655 hdr_sz -= len;
1656 nr_segs++;
1657 } while (hdr_sz);
1658
1659 return nr_segs;
1660}
1661
1662static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1663{
1664 struct request_queue *q = mq->queue;
1665 struct mmc_card *card = mq->card;
1666 struct request *cur = req, *next = NULL;
1667 struct mmc_blk_data *md = mq->data;
1668 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1669 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1670 unsigned int req_sectors = 0, phys_segments = 0;
1671 unsigned int max_blk_count, max_phys_segs;
1672 bool put_back = true;
1673 u8 max_packed_rw = 0;
1674 u8 reqs = 0;
1675
1676 if (!(md->flags & MMC_BLK_PACKED_CMD))
1677 goto no_packed;
1678
1679 if ((rq_data_dir(cur) == WRITE) &&
1680 mmc_host_packed_wr(card->host))
1681 max_packed_rw = card->ext_csd.max_packed_writes;
1682
1683 if (max_packed_rw == 0)
1684 goto no_packed;
1685
1686 if (mmc_req_rel_wr(cur) &&
1687 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1688 goto no_packed;
1689
1690 if (mmc_large_sector(card) &&
1691 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1692 goto no_packed;
1693
1694 mmc_blk_clear_packed(mqrq);
1695
1696 max_blk_count = min(card->host->max_blk_count,
1697 card->host->max_req_size >> 9);
1698 if (unlikely(max_blk_count > 0xffff))
1699 max_blk_count = 0xffff;
1700
1701 max_phys_segs = queue_max_segments(q);
1702 req_sectors += blk_rq_sectors(cur);
1703 phys_segments += cur->nr_phys_segments;
1704
1705 if (rq_data_dir(cur) == WRITE) {
1706 req_sectors += mmc_large_sector(card) ? 8 : 1;
1707 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1708 }
1709
1710 do {
1711 if (reqs >= max_packed_rw - 1) {
1712 put_back = false;
1713 break;
1714 }
1715
1716 spin_lock_irq(q->queue_lock);
1717 next = blk_fetch_request(q);
1718 spin_unlock_irq(q->queue_lock);
1719 if (!next) {
1720 put_back = false;
1721 break;
1722 }
1723
1724 if (mmc_large_sector(card) &&
1725 !IS_ALIGNED(blk_rq_sectors(next), 8))
1726 break;
1727
Mike Christie3a5e02c2016-06-05 14:32:23 -05001728 if (req_op(next) == REQ_OP_DISCARD ||
Adrian Hunter7afafc82016-08-16 10:59:35 +03001729 req_op(next) == REQ_OP_SECURE_ERASE ||
Mike Christie3a5e02c2016-06-05 14:32:23 -05001730 req_op(next) == REQ_OP_FLUSH)
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001731 break;
1732
1733 if (rq_data_dir(cur) != rq_data_dir(next))
1734 break;
1735
1736 if (mmc_req_rel_wr(next) &&
1737 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1738 break;
1739
1740 req_sectors += blk_rq_sectors(next);
1741 if (req_sectors > max_blk_count)
1742 break;
1743
1744 phys_segments += next->nr_phys_segments;
1745 if (phys_segments > max_phys_segs)
1746 break;
1747
1748 list_add_tail(&next->queuelist, &mqrq->packed->list);
1749 cur = next;
1750 reqs++;
1751 } while (1);
1752
1753 if (put_back) {
1754 spin_lock_irq(q->queue_lock);
1755 blk_requeue_request(q, next);
1756 spin_unlock_irq(q->queue_lock);
1757 }
1758
1759 if (reqs > 0) {
1760 list_add(&req->queuelist, &mqrq->packed->list);
1761 mqrq->packed->nr_entries = ++reqs;
1762 mqrq->packed->retries = reqs;
1763 return reqs;
1764 }
1765
1766no_packed:
1767 mqrq->cmd_type = MMC_PACKED_NONE;
1768 return 0;
1769}
1770
1771static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1772 struct mmc_card *card,
1773 struct mmc_queue *mq)
1774{
1775 struct mmc_blk_request *brq = &mqrq->brq;
1776 struct request *req = mqrq->req;
1777 struct request *prq;
1778 struct mmc_blk_data *md = mq->data;
1779 struct mmc_packed *packed = mqrq->packed;
1780 bool do_rel_wr, do_data_tag;
1781 u32 *packed_cmd_hdr;
1782 u8 hdr_blocks;
1783 u8 i = 1;
1784
1785 BUG_ON(!packed);
1786
1787 mqrq->cmd_type = MMC_PACKED_WRITE;
1788 packed->blocks = 0;
1789 packed->idx_failure = MMC_PACKED_NR_IDX;
1790
1791 packed_cmd_hdr = packed->cmd_hdr;
1792 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
Taras Kondratiukf68381a2016-07-13 22:05:38 +00001793 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
1794 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001795 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1796
1797 /*
1798 * Argument for each entry of packed group
1799 */
1800 list_for_each_entry(prq, &packed->list, queuelist) {
1801 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1802 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1803 (prq->cmd_flags & REQ_META) &&
1804 (rq_data_dir(prq) == WRITE) &&
Adrian Hunterd806b462016-06-10 16:22:16 +03001805 blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001806 /* Argument of CMD23 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00001807 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001808 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1809 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
Taras Kondratiukf68381a2016-07-13 22:05:38 +00001810 blk_rq_sectors(prq));
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001811 /* Argument of CMD18 or CMD25 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00001812 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001813 mmc_card_blockaddr(card) ?
Taras Kondratiukf68381a2016-07-13 22:05:38 +00001814 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001815 packed->blocks += blk_rq_sectors(prq);
1816 i++;
1817 }
1818
1819 memset(brq, 0, sizeof(struct mmc_blk_request));
1820 brq->mrq.cmd = &brq->cmd;
1821 brq->mrq.data = &brq->data;
1822 brq->mrq.sbc = &brq->sbc;
1823 brq->mrq.stop = &brq->stop;
1824
1825 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1826 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1827 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1828
1829 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1830 brq->cmd.arg = blk_rq_pos(req);
1831 if (!mmc_card_blockaddr(card))
1832 brq->cmd.arg <<= 9;
1833 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1834
1835 brq->data.blksz = 512;
1836 brq->data.blocks = packed->blocks + hdr_blocks;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09001837 brq->data.flags = MMC_DATA_WRITE;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001838
1839 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1840 brq->stop.arg = 0;
1841 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1842
1843 mmc_set_data_timeout(&brq->data, card);
1844
1845 brq->data.sg = mqrq->sg;
1846 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1847
1848 mqrq->mmc_active.mrq = &brq->mrq;
1849 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1850
1851 mmc_queue_bounce_pre(mqrq);
1852}
1853
Adrian Hunter67716322011-08-29 16:42:15 +03001854static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1855 struct mmc_blk_request *brq, struct request *req,
1856 int ret)
1857{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001858 struct mmc_queue_req *mq_rq;
1859 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1860
Adrian Hunter67716322011-08-29 16:42:15 +03001861 /*
1862 * If this is an SD card and we're writing, we can first
1863 * mark the known good sectors as ok.
1864 *
1865 * If the card is not SD, we can still ok written sectors
1866 * as reported by the controller (which might be less than
1867 * the real number of written sectors, but never more).
1868 */
1869 if (mmc_card_sd(card)) {
1870 u32 blocks;
1871
1872 blocks = mmc_sd_num_wr_blocks(card);
1873 if (blocks != (u32)-1) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301874 ret = blk_end_request(req, 0, blocks << 9);
Adrian Hunter67716322011-08-29 16:42:15 +03001875 }
1876 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001877 if (!mmc_packed_cmd(mq_rq->cmd_type))
1878 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03001879 }
1880 return ret;
1881}
1882
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001883static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1884{
1885 struct request *prq;
1886 struct mmc_packed *packed = mq_rq->packed;
1887 int idx = packed->idx_failure, i = 0;
1888 int ret = 0;
1889
1890 BUG_ON(!packed);
1891
1892 while (!list_empty(&packed->list)) {
1893 prq = list_entry_rq(packed->list.next);
1894 if (idx == i) {
1895 /* retry from error index */
1896 packed->nr_entries -= idx;
1897 mq_rq->req = prq;
1898 ret = 1;
1899
1900 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1901 list_del_init(&prq->queuelist);
1902 mmc_blk_clear_packed(mq_rq);
1903 }
1904 return ret;
1905 }
1906 list_del_init(&prq->queuelist);
1907 blk_end_request(prq, 0, blk_rq_bytes(prq));
1908 i++;
1909 }
1910
1911 mmc_blk_clear_packed(mq_rq);
1912 return ret;
1913}
1914
1915static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1916{
1917 struct request *prq;
1918 struct mmc_packed *packed = mq_rq->packed;
1919
1920 BUG_ON(!packed);
1921
1922 while (!list_empty(&packed->list)) {
1923 prq = list_entry_rq(packed->list.next);
1924 list_del_init(&prq->queuelist);
1925 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1926 }
1927
1928 mmc_blk_clear_packed(mq_rq);
1929}
1930
1931static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1932 struct mmc_queue_req *mq_rq)
1933{
1934 struct request *prq;
1935 struct request_queue *q = mq->queue;
1936 struct mmc_packed *packed = mq_rq->packed;
1937
1938 BUG_ON(!packed);
1939
1940 while (!list_empty(&packed->list)) {
1941 prq = list_entry_rq(packed->list.prev);
1942 if (prq->queuelist.prev != &packed->list) {
1943 list_del_init(&prq->queuelist);
1944 spin_lock_irq(q->queue_lock);
1945 blk_requeue_request(mq->queue, prq);
1946 spin_unlock_irq(q->queue_lock);
1947 } else {
1948 list_del_init(&prq->queuelist);
1949 }
1950 }
1951
1952 mmc_blk_clear_packed(mq_rq);
1953}
1954
Per Forlinee8a43a2011-07-01 18:55:33 +02001955static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02001956{
1957 struct mmc_blk_data *md = mq->data;
1958 struct mmc_card *card = md->queue.card;
1959 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunterb8360a42015-05-07 13:10:24 +03001960 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02001961 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02001962 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05301963 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02001964 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001965 const u8 packed_nr = 2;
1966 u8 reqs = 0;
Per Forlinee8a43a2011-07-01 18:55:33 +02001967
1968 if (!rqc && !mq->mqrq_prev->req)
1969 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02001970
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001971 if (rqc)
1972 reqs = mmc_blk_prep_packed_list(mq, rqc);
1973
Per Forlin54d49d72011-07-01 18:55:29 +02001974 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02001975 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05301976 /*
1977 * When 4KB native sector is enabled, only 8 blocks
1978 * multiple read or write is allowed
1979 */
Yuan, Juntaoe87c8562016-05-13 07:59:24 +00001980 if (mmc_large_sector(card) &&
1981 !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05301982 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1983 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001984 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05301985 goto cmd_abort;
1986 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001987
1988 if (reqs >= packed_nr)
1989 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1990 card, mq);
1991 else
1992 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02001993 areq = &mq->mqrq_cur->mmc_active;
1994 } else
1995 areq = NULL;
1996 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05001997 if (!areq) {
1998 if (status == MMC_BLK_NEW_REQUEST)
1999 mq->flags |= MMC_QUEUE_NEW_REQUEST;
Per Forlinee8a43a2011-07-01 18:55:33 +02002000 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002001 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02002002
Per Forlinee8a43a2011-07-01 18:55:33 +02002003 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2004 brq = &mq_rq->brq;
2005 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03002006 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02002007 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02002008
Per Forlind78d4a82011-07-01 18:55:30 +02002009 switch (status) {
2010 case MMC_BLK_SUCCESS:
2011 case MMC_BLK_PARTIAL:
2012 /*
2013 * A block was successfully transferred.
2014 */
Adrian Hunter67716322011-08-29 16:42:15 +03002015 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002016
2017 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2018 ret = mmc_blk_end_packed_req(mq_rq);
2019 break;
2020 } else {
2021 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02002022 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002023 }
2024
Adrian Hunter67716322011-08-29 16:42:15 +03002025 /*
2026 * If the blk_end_request function returns non-zero even
2027 * though all data has been transferred and no errors
2028 * were returned by the host controller, it's a bug.
2029 */
Per Forlinee8a43a2011-07-01 18:55:33 +02002030 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05302031 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02002032 __func__, blk_rq_bytes(req),
2033 brq->data.bytes_xfered);
2034 rqc = NULL;
2035 goto cmd_abort;
2036 }
Per Forlind78d4a82011-07-01 18:55:30 +02002037 break;
2038 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03002039 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
Ding Wang29535f72015-05-18 20:14:15 +08002040 if (mmc_blk_reset(md, card->host, type))
2041 goto cmd_abort;
2042 if (!ret)
2043 goto start_new_req;
2044 break;
Per Forlind78d4a82011-07-01 18:55:30 +02002045 case MMC_BLK_RETRY:
Adrian Hunterb8360a42015-05-07 13:10:24 +03002046 retune_retry_done = brq->retune_retry_done;
Per Forlind78d4a82011-07-01 18:55:30 +02002047 if (retry++ < 5)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01002048 break;
Adrian Hunter67716322011-08-29 16:42:15 +03002049 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02002050 case MMC_BLK_ABORT:
Adrian Hunter67716322011-08-29 16:42:15 +03002051 if (!mmc_blk_reset(md, card->host, type))
2052 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01002053 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03002054 case MMC_BLK_DATA_ERR: {
2055 int err;
2056
2057 err = mmc_blk_reset(md, card->host, type);
2058 if (!err)
2059 break;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002060 if (err == -ENODEV ||
2061 mmc_packed_cmd(mq_rq->cmd_type))
Adrian Hunter67716322011-08-29 16:42:15 +03002062 goto cmd_abort;
2063 /* Fall through */
2064 }
2065 case MMC_BLK_ECC_ERR:
2066 if (brq->data.blocks > 1) {
2067 /* Redo read one sector at a time */
Joe Perches66061102014-09-12 14:56:56 -07002068 pr_warn("%s: retrying using single block read\n",
2069 req->rq_disk->disk_name);
Adrian Hunter67716322011-08-29 16:42:15 +03002070 disable_multi = 1;
2071 break;
2072 }
Per Forlind78d4a82011-07-01 18:55:30 +02002073 /*
2074 * After an error, we redo I/O one sector at a
2075 * time, so we only reach here after trying to
2076 * read a single sector.
2077 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302078 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02002079 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02002080 if (!ret)
2081 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02002082 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05302083 case MMC_BLK_NOMEDIUM:
2084 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002085 default:
2086 pr_err("%s: Unhandled return value (%d)",
2087 req->rq_disk->disk_name, status);
2088 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01002089 }
2090
Per Forlinee8a43a2011-07-01 18:55:33 +02002091 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002092 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2093 if (!mq_rq->packed->retries)
2094 goto cmd_abort;
2095 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2096 mmc_start_req(card->host,
2097 &mq_rq->mmc_active, NULL);
2098 } else {
2099
2100 /*
2101 * In case of a incomplete request
2102 * prepare it again and resend.
2103 */
2104 mmc_blk_rw_rq_prep(mq_rq, card,
2105 disable_multi, mq);
2106 mmc_start_req(card->host,
2107 &mq_rq->mmc_active, NULL);
2108 }
Adrian Hunterb8360a42015-05-07 13:10:24 +03002109 mq_rq->brq.retune_retry_done = retune_retry_done;
Per Forlinee8a43a2011-07-01 18:55:33 +02002110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 } while (ret);
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 return 1;
2114
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01002115 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002116 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2117 mmc_blk_abort_packed_req(mq_rq);
2118 } else {
2119 if (mmc_card_removed(card))
2120 req->cmd_flags |= REQ_QUIET;
2121 while (ret)
2122 ret = blk_end_request(req, -EIO,
2123 blk_rq_cur_bytes(req));
2124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Per Forlinee8a43a2011-07-01 18:55:33 +02002126 start_new_req:
2127 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09002128 if (mmc_card_removed(card)) {
2129 rqc->cmd_flags |= REQ_QUIET;
2130 blk_end_request_all(rqc, -EIO);
2131 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002132 /*
2133 * If current request is packed, it needs to put back.
2134 */
2135 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2136 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2137
Seungwon Jeon7a819022013-01-22 19:48:07 +09002138 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2139 mmc_start_req(card->host,
2140 &mq->mqrq_cur->mmc_active, NULL);
2141 }
Per Forlinee8a43a2011-07-01 18:55:33 +02002142 }
2143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 return 0;
2145}
2146
Adrian Hunterbd788c92010-08-11 14:17:47 -07002147static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2148{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002149 int ret;
2150 struct mmc_blk_data *md = mq->data;
2151 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002152 struct mmc_host *host = card->host;
2153 unsigned long flags;
Adrian Hunter869c5542016-08-25 14:11:43 -06002154 bool req_is_special = mmc_req_is_special(req);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002155
Per Forlinee8a43a2011-07-01 18:55:33 +02002156 if (req && !mq->mqrq_prev->req)
2157 /* claim host only for the first request */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002158 mmc_get_card(card);
Per Forlinee8a43a2011-07-01 18:55:33 +02002159
Andrei Warkentin371a6892011-04-11 18:10:25 -05002160 ret = mmc_blk_part_switch(card, md);
2161 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03002162 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302163 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03002164 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002165 ret = 0;
2166 goto out;
2167 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002168
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002169 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
Mike Christiec2df40d2016-06-05 14:32:17 -05002170 if (req && req_op(req) == REQ_OP_DISCARD) {
Per Forlinee8a43a2011-07-01 18:55:33 +02002171 /* complete ongoing async transfer before issuing discard */
2172 if (card->host->areq)
2173 mmc_blk_issue_rw_rq(mq, NULL);
Christoph Hellwig288dab82016-06-09 16:00:36 +02002174 ret = mmc_blk_issue_discard_rq(mq, req);
2175 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
2176 /* complete ongoing async transfer before issuing secure erase*/
2177 if (card->host->areq)
2178 mmc_blk_issue_rw_rq(mq, NULL);
2179 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Mike Christie3a5e02c2016-06-05 14:32:23 -05002180 } else if (req && req_op(req) == REQ_OP_FLUSH) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09002181 /* complete ongoing async transfer before issuing flush */
2182 if (card->host->areq)
2183 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002184 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07002185 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002186 if (!req && host->areq) {
2187 spin_lock_irqsave(&host->context_info.lock, flags);
2188 host->context_info.is_waiting_last_req = true;
2189 spin_unlock_irqrestore(&host->context_info.lock, flags);
2190 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002191 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07002192 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002193
Andrei Warkentin371a6892011-04-11 18:10:25 -05002194out:
Adrian Hunter869c5542016-08-25 14:11:43 -06002195 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09002196 /*
2197 * Release host when there are no more requests
2198 * and after special request(discard, flush) is done.
2199 * In case sepecial request, there is no reentry to
2200 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2201 */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002202 mmc_put_card(card);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002203 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07002204}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Russell Kinga6f6c962006-01-03 22:38:44 +00002206static inline int mmc_blk_readonly(struct mmc_card *card)
2207{
2208 return mmc_card_readonly(card) ||
2209 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2210}
2211
Andrei Warkentin371a6892011-04-11 18:10:25 -05002212static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2213 struct device *parent,
2214 sector_t size,
2215 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002216 const char *subname,
2217 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218{
2219 struct mmc_blk_data *md;
2220 int devidx, ret;
2221
Ulf Hanssonb10fa992016-04-07 14:36:46 +02002222again:
2223 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
2224 return ERR_PTR(-ENOMEM);
2225
2226 spin_lock(&mmc_blk_lock);
2227 ret = ida_get_new(&mmc_blk_ida, &devidx);
2228 spin_unlock(&mmc_blk_lock);
2229
2230 if (ret == -EAGAIN)
2231 goto again;
2232 else if (ret)
2233 return ERR_PTR(ret);
2234
2235 if (devidx >= max_devices) {
2236 ret = -ENOSPC;
2237 goto out;
2238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07002240 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00002241 if (!md) {
2242 ret = -ENOMEM;
2243 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 }
Russell Kinga6f6c962006-01-03 22:38:44 +00002245
Johan Rudholmadd710e2011-12-02 08:51:06 +01002246 md->area_type = area_type;
2247
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002248 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00002249 * Set the read-only status based on the supported commands
2250 * and the write protect switch.
2251 */
2252 md->read_only = mmc_blk_readonly(card);
2253
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002254 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00002255 if (md->disk == NULL) {
2256 ret = -ENOMEM;
2257 goto err_kfree;
2258 }
2259
2260 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002261 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00002262 md->usage = 1;
2263
Adrian Hunterd09408a2011-06-23 13:40:28 +03002264 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
Russell Kinga6f6c962006-01-03 22:38:44 +00002265 if (ret)
2266 goto err_putdisk;
2267
Russell Kinga6f6c962006-01-03 22:38:44 +00002268 md->queue.issue_fn = mmc_blk_issue_rq;
2269 md->queue.data = md;
2270
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002271 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002272 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00002273 md->disk->fops = &mmc_bdops;
2274 md->disk->private_data = md;
2275 md->disk->queue = md->queue.queue;
Dan Williams307d8e62016-06-20 10:40:44 -07002276 md->parent = parent;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002277 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Cross382c55f2015-10-22 10:00:41 -07002278 md->disk->flags = GENHD_FL_EXT_DEVT;
Ulf Hanssonf5b4d712014-09-03 11:02:23 +02002279 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
Loic Pallardy53d8f972012-08-06 17:12:28 +02002280 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00002281
2282 /*
2283 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2284 *
2285 * - be set for removable media with permanent block devices
2286 * - be unset for removable block devices with permanent media
2287 *
2288 * Since MMC block devices clearly fall under the second
2289 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2290 * should use the block device creation/destruction hotplug
2291 * messages to tell when the card is present.
2292 */
2293
Andrei Warkentinf06c9152011-04-21 22:46:13 -05002294 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
Ulf Hansson9aaf3432016-04-06 16:12:08 +02002295 "mmcblk%u%s", card->host->index, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00002296
Saugata Dasa5075eb2012-05-17 16:32:21 +05302297 if (mmc_card_mmc(card))
2298 blk_queue_logical_block_size(md->queue.queue,
2299 card->ext_csd.data_sector_size);
2300 else
2301 blk_queue_logical_block_size(md->queue.queue, 512);
2302
Andrei Warkentin371a6892011-04-11 18:10:25 -05002303 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002304
Andrei Warkentinf0d89972011-05-23 15:06:38 -05002305 if (mmc_host_cmd23(card->host)) {
2306 if (mmc_card_mmc(card) ||
2307 (mmc_card_sd(card) &&
2308 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2309 md->flags |= MMC_BLK_CMD23;
2310 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002311
2312 if (mmc_card_mmc(card) &&
2313 md->flags & MMC_BLK_CMD23 &&
2314 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2315 card->ext_csd.rel_sectors)) {
2316 md->flags |= MMC_BLK_REL_WR;
Jens Axboee9d5c742016-03-30 10:17:20 -06002317 blk_queue_write_cache(md->queue.queue, true, true);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002318 }
2319
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002320 if (mmc_card_mmc(card) &&
2321 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2322 (md->flags & MMC_BLK_CMD23) &&
2323 card->ext_csd.packed_event_en) {
2324 if (!mmc_packed_init(&md->queue, card))
2325 md->flags |= MMC_BLK_PACKED_CMD;
2326 }
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00002329
2330 err_putdisk:
2331 put_disk(md->disk);
2332 err_kfree:
2333 kfree(md);
2334 out:
Ulf Hanssonb10fa992016-04-07 14:36:46 +02002335 spin_lock(&mmc_blk_lock);
2336 ida_remove(&mmc_blk_ida, devidx);
2337 spin_unlock(&mmc_blk_lock);
Russell Kinga6f6c962006-01-03 22:38:44 +00002338 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339}
2340
Andrei Warkentin371a6892011-04-11 18:10:25 -05002341static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2342{
2343 sector_t size;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002344
2345 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2346 /*
2347 * The EXT_CSD sector count is in number or 512 byte
2348 * sectors.
2349 */
2350 size = card->ext_csd.sectors;
2351 } else {
2352 /*
2353 * The CSD capacity field is in units of read_blkbits.
2354 * set_capacity takes units of 512 bytes.
2355 */
Kuninori Morimoto087de9e2015-05-11 07:35:28 +00002356 size = (typeof(sector_t))card->csd.capacity
2357 << (card->csd.read_blkbits - 9);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002358 }
2359
Tobias Klauser7a30f2a2015-01-21 15:56:44 +01002360 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002361 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002362}
2363
2364static int mmc_blk_alloc_part(struct mmc_card *card,
2365 struct mmc_blk_data *md,
2366 unsigned int part_type,
2367 sector_t size,
2368 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002369 const char *subname,
2370 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05002371{
2372 char cap_str[10];
2373 struct mmc_blk_data *part_md;
2374
2375 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002376 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002377 if (IS_ERR(part_md))
2378 return PTR_ERR(part_md);
2379 part_md->part_type = part_type;
2380 list_add(&part_md->part, &md->part);
2381
James Bottomleyb9f28d82015-03-05 18:47:01 -08002382 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
Andrei Warkentin371a6892011-04-11 18:10:25 -05002383 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05302384 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05002385 part_md->disk->disk_name, mmc_card_id(card),
2386 mmc_card_name(card), part_md->part_type, cap_str);
2387 return 0;
2388}
2389
Namjae Jeone0c368d2011-10-06 23:41:38 +09002390/* MMC Physical partitions consist of two boot partitions and
2391 * up to four general purpose partitions.
2392 * For each partition enabled in EXT_CSD a block device will be allocatedi
2393 * to provide access to the partition.
2394 */
2395
Andrei Warkentin371a6892011-04-11 18:10:25 -05002396static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2397{
Namjae Jeone0c368d2011-10-06 23:41:38 +09002398 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002399
2400 if (!mmc_card_mmc(card))
2401 return 0;
2402
Namjae Jeone0c368d2011-10-06 23:41:38 +09002403 for (idx = 0; idx < card->nr_parts; idx++) {
2404 if (card->part[idx].size) {
2405 ret = mmc_blk_alloc_part(card, md,
2406 card->part[idx].part_cfg,
2407 card->part[idx].size >> 9,
2408 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01002409 card->part[idx].name,
2410 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09002411 if (ret)
2412 return ret;
2413 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002414 }
2415
2416 return ret;
2417}
2418
Andrei Warkentin371a6892011-04-11 18:10:25 -05002419static void mmc_blk_remove_req(struct mmc_blk_data *md)
2420{
Johan Rudholmadd710e2011-12-02 08:51:06 +01002421 struct mmc_card *card;
2422
Andrei Warkentin371a6892011-04-11 18:10:25 -05002423 if (md) {
Paul Taysomfdfa20c2013-06-04 14:42:40 -07002424 /*
2425 * Flush remaining requests and free queues. It
2426 * is freeing the queue that stops new requests
2427 * from being accepted.
2428 */
Franck Jullien8efb83a2013-07-24 15:17:48 +02002429 card = md->queue.card;
Paul Taysomfdfa20c2013-06-04 14:42:40 -07002430 mmc_cleanup_queue(&md->queue);
2431 if (md->flags & MMC_BLK_PACKED_CMD)
2432 mmc_packed_clean(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002433 if (md->disk->flags & GENHD_FL_UP) {
2434 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002435 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2436 card->ext_csd.boot_ro_lockable)
2437 device_remove_file(disk_to_dev(md->disk),
2438 &md->power_ro_lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002439
Andrei Warkentin371a6892011-04-11 18:10:25 -05002440 del_gendisk(md->disk);
2441 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002442 mmc_blk_put(md);
2443 }
2444}
2445
2446static void mmc_blk_remove_parts(struct mmc_card *card,
2447 struct mmc_blk_data *md)
2448{
2449 struct list_head *pos, *q;
2450 struct mmc_blk_data *part_md;
2451
2452 list_for_each_safe(pos, q, &md->part) {
2453 part_md = list_entry(pos, struct mmc_blk_data, part);
2454 list_del(pos);
2455 mmc_blk_remove_req(part_md);
2456 }
2457}
2458
2459static int mmc_add_disk(struct mmc_blk_data *md)
2460{
2461 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01002462 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05002463
Dan Williams307d8e62016-06-20 10:40:44 -07002464 device_add_disk(md->parent, md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002465 md->force_ro.show = force_ro_show;
2466 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05302467 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002468 md->force_ro.attr.name = "force_ro";
2469 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2470 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2471 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01002472 goto force_ro_fail;
2473
2474 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2475 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04002476 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01002477
2478 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2479 mode = S_IRUGO;
2480 else
2481 mode = S_IRUGO | S_IWUSR;
2482
2483 md->power_ro_lock.show = power_ro_lock_show;
2484 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01002485 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01002486 md->power_ro_lock.attr.mode = mode;
2487 md->power_ro_lock.attr.name =
2488 "ro_lock_until_next_power_on";
2489 ret = device_create_file(disk_to_dev(md->disk),
2490 &md->power_ro_lock);
2491 if (ret)
2492 goto power_ro_lock_fail;
2493 }
2494 return ret;
2495
2496power_ro_lock_fail:
2497 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2498force_ro_fail:
2499 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002500
2501 return ret;
2502}
2503
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002504static const struct mmc_fixup blk_fixups[] =
2505{
Chris Ballc59d4472011-11-11 22:01:43 -05002506 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2507 MMC_QUIRK_INAND_CMD38),
2508 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2509 MMC_QUIRK_INAND_CMD38),
2510 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2511 MMC_QUIRK_INAND_CMD38),
2512 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2513 MMC_QUIRK_INAND_CMD38),
2514 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2515 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002516
2517 /*
2518 * Some MMC cards experience performance degradation with CMD23
2519 * instead of CMD12-bounded multiblock transfers. For now we'll
2520 * black list what's bad...
2521 * - Certain Toshiba cards.
2522 *
2523 * N.B. This doesn't affect SD cards.
2524 */
Yangbo Lu7d70d472015-07-10 11:44:03 +08002525 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2526 MMC_QUIRK_BLK_NO_CMD23),
2527 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2528 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002529 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002530 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002531 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002532 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05002533 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002534 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002535
2536 /*
Matt Gumbel32ecd322016-05-20 10:33:46 +03002537 * Some MMC cards need longer data read timeout than indicated in CSD.
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002538 */
Chris Ballc59d4472011-11-11 22:01:43 -05002539 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002540 MMC_QUIRK_LONG_READ_TIME),
Matt Gumbel32ecd322016-05-20 10:33:46 +03002541 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2542 MMC_QUIRK_LONG_READ_TIME),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01002543
Ian Chen3550ccd2012-08-29 15:05:36 +09002544 /*
2545 * On these Samsung MoviNAND parts, performing secure erase or
2546 * secure trim can result in unrecoverable corruption due to a
2547 * firmware bug.
2548 */
2549 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2550 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2551 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2552 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2553 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2554 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2555 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2556 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2557 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2558 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2559 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2560 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2561 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2562 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2563 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2564 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2565
Shawn Linb5b4ff02015-08-12 13:08:32 +08002566 /*
2567 * On Some Kingston eMMCs, performing trim can result in
2568 * unrecoverable data conrruption occasionally due to a firmware bug.
2569 */
2570 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2571 MMC_QUIRK_TRIM_BROKEN),
2572 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2573 MMC_QUIRK_TRIM_BROKEN),
2574
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002575 END_FIXUP
2576};
2577
Ulf Hansson96541ba2015-04-14 13:06:12 +02002578static int mmc_blk_probe(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002580 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002581 char cap_str[10];
2582
Pierre Ossman912490d2005-05-21 10:27:02 +01002583 /*
2584 * Check that the card supports the command class(es) we need.
2585 */
2586 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 return -ENODEV;
2588
Lukas Czerner5204d002014-06-18 13:18:07 +02002589 mmc_fixup_device(card, blk_fixups);
2590
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 md = mmc_blk_alloc(card);
2592 if (IS_ERR(md))
2593 return PTR_ERR(md);
2594
James Bottomleyb9f28d82015-03-05 18:47:01 -08002595 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002596 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05302597 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02002599 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Andrei Warkentin371a6892011-04-11 18:10:25 -05002601 if (mmc_blk_alloc_parts(card, md))
2602 goto out;
2603
Ulf Hansson96541ba2015-04-14 13:06:12 +02002604 dev_set_drvdata(&card->dev, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04002605
Andrei Warkentin371a6892011-04-11 18:10:25 -05002606 if (mmc_add_disk(md))
2607 goto out;
2608
2609 list_for_each_entry(part_md, &md->part, part) {
2610 if (mmc_add_disk(part_md))
2611 goto out;
2612 }
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002613
2614 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2615 pm_runtime_use_autosuspend(&card->dev);
2616
2617 /*
2618 * Don't enable runtime PM for SD-combo cards here. Leave that
2619 * decision to be taken during the SDIO init sequence instead.
2620 */
2621 if (card->type != MMC_TYPE_SD_COMBO) {
2622 pm_runtime_set_active(&card->dev);
2623 pm_runtime_enable(&card->dev);
2624 }
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 return 0;
2627
2628 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05002629 mmc_blk_remove_parts(card, md);
2630 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01002631 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632}
2633
Ulf Hansson96541ba2015-04-14 13:06:12 +02002634static void mmc_blk_remove(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635{
Ulf Hansson96541ba2015-04-14 13:06:12 +02002636 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
Andrei Warkentin371a6892011-04-11 18:10:25 -05002638 mmc_blk_remove_parts(card, md);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002639 pm_runtime_get_sync(&card->dev);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03002640 mmc_claim_host(card->host);
2641 mmc_blk_part_switch(card, md);
2642 mmc_release_host(card->host);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002643 if (card->type != MMC_TYPE_SD_COMBO)
2644 pm_runtime_disable(&card->dev);
2645 pm_runtime_put_noidle(&card->dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002646 mmc_blk_remove_req(md);
Ulf Hansson96541ba2015-04-14 13:06:12 +02002647 dev_set_drvdata(&card->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648}
2649
Ulf Hansson96541ba2015-04-14 13:06:12 +02002650static int _mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002652 struct mmc_blk_data *part_md;
Ulf Hansson96541ba2015-04-14 13:06:12 +02002653 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
2655 if (md) {
2656 mmc_queue_suspend(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002657 list_for_each_entry(part_md, &md->part, part) {
2658 mmc_queue_suspend(&part_md->queue);
2659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 }
2661 return 0;
2662}
2663
Ulf Hansson96541ba2015-04-14 13:06:12 +02002664static void mmc_blk_shutdown(struct mmc_card *card)
Ulf Hansson76287742013-06-10 17:03:40 +02002665{
Ulf Hansson96541ba2015-04-14 13:06:12 +02002666 _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02002667}
2668
Ulf Hansson0967edc2014-10-06 11:29:42 +02002669#ifdef CONFIG_PM_SLEEP
2670static int mmc_blk_suspend(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02002671{
Ulf Hansson96541ba2015-04-14 13:06:12 +02002672 struct mmc_card *card = mmc_dev_to_card(dev);
2673
2674 return _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02002675}
2676
Ulf Hansson0967edc2014-10-06 11:29:42 +02002677static int mmc_blk_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678{
Andrei Warkentin371a6892011-04-11 18:10:25 -05002679 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02002680 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
2682 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05002683 /*
2684 * Resume involves the card going into idle state,
2685 * so current partition is always the main one.
2686 */
2687 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002689 list_for_each_entry(part_md, &md->part, part) {
2690 mmc_queue_resume(&part_md->queue);
2691 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 }
2693 return 0;
2694}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695#endif
2696
Ulf Hansson0967edc2014-10-06 11:29:42 +02002697static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2698
Ulf Hansson96541ba2015-04-14 13:06:12 +02002699static struct mmc_driver mmc_driver = {
2700 .drv = {
2701 .name = "mmcblk",
2702 .pm = &mmc_blk_pm_ops,
2703 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 .probe = mmc_blk_probe,
2705 .remove = mmc_blk_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02002706 .shutdown = mmc_blk_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707};
2708
2709static int __init mmc_blk_init(void)
2710{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002711 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002713 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2714 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2715
Ben Hutchingsa26eba62014-11-06 03:35:09 +00002716 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
Olof Johansson5e71b7a2010-09-17 21:19:57 -04002717
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002718 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2719 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002722 res = mmc_register_driver(&mmc_driver);
2723 if (res)
2724 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09002726 return 0;
2727 out2:
2728 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 out:
2730 return res;
2731}
2732
2733static void __exit mmc_blk_exit(void)
2734{
2735 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02002736 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737}
2738
2739module_init(mmc_blk_init);
2740module_exit(mmc_blk_exit);
2741
2742MODULE_LICENSE("GPL");
2743MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2744