blob: d476d9a3f31b6869bfc3033b472dc4017a90f718 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Sujit Reddy Thumma55291992014-12-09 20:40:16 +020033#include <linux/bitops.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020034#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040035#include <linux/delay.h>
36#include <linux/capability.h>
37#include <linux/compat.h>
Ulf Hanssone94cfef2013-05-02 14:02:38 +020038#include <linux/pm_runtime.h>
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -070039#include <linux/ioprio.h>
Ulf Hanssonb10fa992016-04-07 14:36:46 +020040#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
John Calixtocb87ea22011-04-26 18:56:29 -040042#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020044#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010045#include <linux/mmc/mmc.h>
46#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/uaccess.h>
49
Pierre Ossman98ac2162006-12-23 20:03:02 +010050#include "queue.h"
Baoyou Xie48ab0862016-09-30 09:37:38 +080051#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000053MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040054#ifdef MODULE_PARAM_PREFIX
55#undef MODULE_PARAM_PREFIX
56#endif
57#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010058
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050059#define INAND_CMD38_ARG_EXT_CSD 113
60#define INAND_CMD38_ARG_ERASE 0x00
61#define INAND_CMD38_ARG_TRIM 0x01
62#define INAND_CMD38_ARG_SECERASE 0x80
63#define INAND_CMD38_ARG_SECTRIM1 0x81
64#define INAND_CMD38_ARG_SECTRIM2 0x88
Subhash Jadavani2fbab612014-12-04 15:16:17 +020065#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
Maya Erez775a9362013-04-18 15:41:55 +030066#define MMC_SANITIZE_REQ_TIMEOUT 240000
67#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
Asutosh Das02e30862015-05-20 16:52:04 +053068#define MMC_CMDQ_STOP_TIMEOUT_MS 100
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050069
Luca Porziod3df0462015-11-06 15:12:26 +000070#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090071 (rq_data_dir(req) == WRITE))
72#define PACKED_CMD_VER 0x01
73#define PACKED_CMD_WR 0x02
Lee Susman841fd132013-04-23 17:59:26 +030074#define PACKED_TRIGGER_MAX_ELEMENTS 5000
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090075
Maya Erezf93ca0a2014-12-09 23:34:41 +020076#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
Tatyana Brokhman08238ce2012-10-07 10:33:13 +020077#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
78 do { \
79 if (stats->enabled) \
80 stats->pack_stop_reason[reason]++; \
81 } while (0)
82
Asutosh Das02e30862015-05-20 16:52:04 +053083#define MAX_RETRIES 5
Lee Susman841fd132013-04-23 17:59:26 +030084#define PCKD_TRGR_INIT_MEAN_POTEN 17
85#define PCKD_TRGR_POTEN_LOWER_BOUND 5
86#define PCKD_TRGR_URGENT_PENALTY 2
87#define PCKD_TRGR_LOWER_BOUND 5
88#define PCKD_TRGR_PRECISION_MULTIPLIER 100
89
Sahitya Tummala9433a132015-06-09 09:38:36 +053090static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
91 struct mmc_queue_req *mqrq, struct mmc_queue *mq);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020092static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040093
94/*
95 * The defaults come from config options but can be overriden by module
96 * or bootarg options.
97 */
98static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
99
100/*
101 * We've only got one major, so number of mmcblk devices is
Ben Hutchingsa26eba62014-11-06 03:35:09 +0000102 * limited to (1 << 20) / number of minors per device. It is also
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200103 * limited by the MAX_DEVICES below.
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400104 */
105static int max_devices;
106
Ben Hutchingsa26eba62014-11-06 03:35:09 +0000107#define MAX_DEVICES 256
108
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200109static DEFINE_IDA(mmc_blk_ida);
110static DEFINE_SPINLOCK(mmc_blk_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * There is one mmc_blk_data per slot.
114 */
115struct mmc_blk_data {
116 spinlock_t lock;
Dan Williams307d8e62016-06-20 10:40:44 -0700117 struct device *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct gendisk *disk;
119 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500120 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500122 unsigned int flags;
123#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
124#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900125#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -0700126#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000129 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500130 unsigned int part_type;
Adrian Hunter67716322011-08-29 16:42:15 +0300131 unsigned int reset_done;
132#define MMC_BLK_READ BIT(0)
133#define MMC_BLK_WRITE BIT(1)
134#define MMC_BLK_DISCARD BIT(2)
135#define MMC_BLK_SECDISCARD BIT(3)
Talel Shenhar8a8e3b42015-02-11 12:58:16 +0200136#define MMC_BLK_FLUSH BIT(4)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500137
138 /*
139 * Only set in main mmc_blk_data associated
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200140 * with mmc_card with dev_set_drvdata, and keeps
Andrei Warkentin371a6892011-04-11 18:10:25 -0500141 * track of the current selected device partition.
142 */
143 unsigned int part_curr;
144 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100145 struct device_attribute power_ro_lock;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200146 struct device_attribute num_wr_reqs_to_start_packing;
Maya Erez5a8dae12014-12-04 15:13:59 +0200147 struct device_attribute no_pack_for_random;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100148 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149};
150
Arjan van de Vena621aae2006-01-12 18:43:35 +0000151static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900153enum {
154 MMC_PACKED_NR_IDX = -1,
155 MMC_PACKED_NR_ZERO,
156 MMC_PACKED_NR_SINGLE,
157};
158
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400159module_param(perdev_minors, int, 0444);
160MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
161
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200162static inline int mmc_blk_part_switch(struct mmc_card *card,
163 struct mmc_blk_data *md);
164static int get_card_status(struct mmc_card *card, u32 *status, int retries);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -0700165static int mmc_blk_cmdq_switch(struct mmc_card *card,
166 struct mmc_blk_data *md, bool enable);
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200167
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900168static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
169{
170 struct mmc_packed *packed = mqrq->packed;
171
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900172 mqrq->cmd_type = MMC_PACKED_NONE;
173 packed->nr_entries = MMC_PACKED_NR_ZERO;
174 packed->idx_failure = MMC_PACKED_NR_IDX;
175 packed->retries = 0;
176 packed->blocks = 0;
177}
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
180{
181 struct mmc_blk_data *md;
182
Arjan van de Vena621aae2006-01-12 18:43:35 +0000183 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 md = disk->private_data;
185 if (md && md->usage == 0)
186 md = NULL;
187 if (md)
188 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000189 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 return md;
192}
193
Andrei Warkentin371a6892011-04-11 18:10:25 -0500194static inline int mmc_get_devidx(struct gendisk *disk)
195{
Colin Cross382c55f2015-10-22 10:00:41 -0700196 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500197 return devidx;
198}
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200static void mmc_blk_put(struct mmc_blk_data *md)
201{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000202 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 md->usage--;
204 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500205 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800206 blk_cleanup_queue(md->queue.queue);
207
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200208 spin_lock(&mmc_blk_lock);
209 ida_remove(&mmc_blk_ida, devidx);
210 spin_unlock(&mmc_blk_lock);
David Woodhouse1dff3142007-11-21 18:45:12 +0100211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 kfree(md);
214 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000215 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Johan Rudholmadd710e2011-12-02 08:51:06 +0100218static ssize_t power_ro_lock_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 int ret;
222 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200223 struct mmc_card *card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100224 int locked = 0;
225
Asutosh Das507d9a72014-12-09 10:15:53 +0200226 if (!md)
227 return -EINVAL;
228
229 card = md->queue.card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100230 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
231 locked = 2;
232 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
233 locked = 1;
234
235 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
236
Tomas Winkler9098f842015-07-16 15:50:45 +0200237 mmc_blk_put(md);
238
Johan Rudholmadd710e2011-12-02 08:51:06 +0100239 return ret;
240}
241
242static ssize_t power_ro_lock_store(struct device *dev,
243 struct device_attribute *attr, const char *buf, size_t count)
244{
245 int ret;
246 struct mmc_blk_data *md, *part_md;
247 struct mmc_card *card;
248 unsigned long set;
249
250 if (kstrtoul(buf, 0, &set))
251 return -EINVAL;
252
253 if (set != 1)
254 return count;
255
256 md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200257 if (!md)
258 return -EINVAL;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100259 card = md->queue.card;
260
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200261 mmc_get_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100262
263 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
264 card->ext_csd.boot_ro_lock |
265 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
266 card->ext_csd.part_time);
267 if (ret)
268 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
269 else
270 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
271
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200272 mmc_put_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100273
274 if (!ret) {
275 pr_info("%s: Locking boot partition ro until next power on\n",
276 md->disk->disk_name);
277 set_disk_ro(md->disk, 1);
278
279 list_for_each_entry(part_md, &md->part, part)
280 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
281 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
282 set_disk_ro(part_md->disk, 1);
283 }
284 }
285
286 mmc_blk_put(md);
287 return count;
288}
289
Andrei Warkentin371a6892011-04-11 18:10:25 -0500290static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
291 char *buf)
292{
293 int ret;
294 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
295
Asutosh Das507d9a72014-12-09 10:15:53 +0200296 if (!md)
297 return -EINVAL;
298
Baruch Siach0031a982014-09-22 10:12:51 +0300299 ret = snprintf(buf, PAGE_SIZE, "%d\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -0500300 get_disk_ro(dev_to_disk(dev)) ^
301 md->read_only);
302 mmc_blk_put(md);
303 return ret;
304}
305
306static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
307 const char *buf, size_t count)
308{
309 int ret;
310 char *end;
311 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
312 unsigned long set = simple_strtoul(buf, &end, 0);
Asutosh Das507d9a72014-12-09 10:15:53 +0200313
314 if (!md)
315 return -EINVAL;
316
Andrei Warkentin371a6892011-04-11 18:10:25 -0500317 if (end == buf) {
318 ret = -EINVAL;
319 goto out;
320 }
321
322 set_disk_ro(dev_to_disk(dev), set || md->read_only);
323 ret = count;
324out:
325 mmc_blk_put(md);
326 return ret;
327}
328
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200329static ssize_t
Maya Erez5a8dae12014-12-04 15:13:59 +0200330no_pack_for_random_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
332{
333 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
334 int ret;
335
Asutosh Das507d9a72014-12-09 10:15:53 +0200336 if (!md)
337 return -EINVAL;
Maya Erez5a8dae12014-12-04 15:13:59 +0200338 ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
339
340 mmc_blk_put(md);
341 return ret;
342}
343
344static ssize_t
345no_pack_for_random_store(struct device *dev,
346 struct device_attribute *attr,
347 const char *buf, size_t count)
348{
349 int value;
350 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200351 struct mmc_card *card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200352 int ret = count;
353
Asutosh Das507d9a72014-12-09 10:15:53 +0200354 if (!md)
355 return -EINVAL;
356
357 card = md->queue.card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200358 if (!card) {
359 ret = -EINVAL;
360 goto exit;
361 }
362
363 sscanf(buf, "%d", &value);
364
365 if (value < 0) {
366 pr_err("%s: value %d is not valid. old value remains = %d",
367 mmc_hostname(card->host), value,
368 md->queue.no_pack_for_random);
369 ret = -EINVAL;
370 goto exit;
371 }
372
373 md->queue.no_pack_for_random = (value > 0) ? true : false;
374
375 pr_debug("%s: no_pack_for_random: new value = %d",
376 mmc_hostname(card->host),
377 md->queue.no_pack_for_random);
378
379exit:
380 mmc_blk_put(md);
381 return ret;
382}
383
384static ssize_t
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200385num_wr_reqs_to_start_packing_show(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
389 int num_wr_reqs_to_start_packing;
390 int ret;
391
Asutosh Das507d9a72014-12-09 10:15:53 +0200392 if (!md)
393 return -EINVAL;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200394 num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
395
396 ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
397
398 mmc_blk_put(md);
399 return ret;
400}
401
402static ssize_t
403num_wr_reqs_to_start_packing_store(struct device *dev,
404 struct device_attribute *attr,
405 const char *buf, size_t count)
406{
407 int value;
408 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200409 struct mmc_card *card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200410 int ret = count;
411
Asutosh Das507d9a72014-12-09 10:15:53 +0200412 if (!md)
413 return -EINVAL;
414
415 card = md->queue.card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200416 if (!card) {
417 ret = -EINVAL;
418 goto exit;
419 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200420
421 sscanf(buf, "%d", &value);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200422
Yaniv Gardi42399822014-12-04 00:26:23 +0200423 if (value >= 0) {
424 md->queue.num_wr_reqs_to_start_packing =
425 min_t(int, value, (int)card->ext_csd.max_packed_writes);
426
427 pr_debug("%s: trigger to pack: new value = %d",
428 mmc_hostname(card->host),
429 md->queue.num_wr_reqs_to_start_packing);
430 } else {
431 pr_err("%s: value %d is not valid. old value remains = %d",
432 mmc_hostname(card->host), value,
433 md->queue.num_wr_reqs_to_start_packing);
434 ret = -EINVAL;
435 }
436
437exit:
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200438 mmc_blk_put(md);
Yaniv Gardi42399822014-12-04 00:26:23 +0200439 return ret;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200440}
441
Mark Salyzyn6904e432016-01-28 11:12:25 -0800442#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
443
444static int max_read_speed, max_write_speed, cache_size = 4;
445
446module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
447MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
448module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
449MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
450module_param(cache_size, int, S_IRUSR | S_IRGRP);
451MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
452
453/*
454 * helper macros and expectations:
455 * size - unsigned long number of bytes
456 * jiffies - unsigned long HZ timestamp difference
457 * speed - unsigned KB/s transfer rate
458 */
459#define size_and_speed_to_jiffies(size, speed) \
460 ((size) * HZ / (speed) / 1024UL)
461#define jiffies_and_speed_to_size(jiffies, speed) \
462 (((speed) * (jiffies) * 1024UL) / HZ)
463#define jiffies_and_size_to_speed(jiffies, size) \
464 ((size) * HZ / (jiffies) / 1024UL)
465
466/* Limits to report warning */
467/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
468#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
469#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
470
471#define speed_valid(speed) ((speed) > 0)
472
473static const char off[] = "off\n";
474
475static int max_speed_show(int speed, char *buf)
476{
477 if (speed)
478 return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
479 else
480 return scnprintf(buf, PAGE_SIZE, off);
481}
482
483static int max_speed_store(const char *buf, struct request_queue *q)
484{
485 unsigned int limit, set = 0;
486
487 if (!strncasecmp(off, buf, sizeof(off) - 2))
488 return set;
489 if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
490 return -EINVAL;
491 if (set == 0)
492 return set;
493 limit = MAX_SPEED(q);
494 if (set > limit)
495 pr_warn("max speed %u ineffective above %u\n", set, limit);
496 limit = MIN_SPEED(q);
497 if (set < limit)
498 pr_warn("max speed %u painful below %u\n", set, limit);
499 return set;
500}
501
502static ssize_t max_write_speed_show(struct device *dev,
503 struct device_attribute *attr, char *buf)
504{
505 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
506 int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
507
508 mmc_blk_put(md);
509 return ret;
510}
511
512static ssize_t max_write_speed_store(struct device *dev,
513 struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
517 int set = max_speed_store(buf, md->queue.queue);
518
519 if (set < 0) {
520 mmc_blk_put(md);
521 return set;
522 }
523
524 atomic_set(&md->queue.max_write_speed, set);
525 mmc_blk_put(md);
526 return count;
527}
528
529static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
530 max_write_speed_show, max_write_speed_store);
531
532static ssize_t max_read_speed_show(struct device *dev,
533 struct device_attribute *attr, char *buf)
534{
535 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
536 int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
537
538 mmc_blk_put(md);
539 return ret;
540}
541
542static ssize_t max_read_speed_store(struct device *dev,
543 struct device_attribute *attr,
544 const char *buf, size_t count)
545{
546 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
547 int set = max_speed_store(buf, md->queue.queue);
548
549 if (set < 0) {
550 mmc_blk_put(md);
551 return set;
552 }
553
554 atomic_set(&md->queue.max_read_speed, set);
555 mmc_blk_put(md);
556 return count;
557}
558
559static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
560 max_read_speed_show, max_read_speed_store);
561
562static ssize_t cache_size_show(struct device *dev,
563 struct device_attribute *attr, char *buf)
564{
565 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
566 struct mmc_queue *mq = &md->queue;
567 int cache_size = atomic_read(&mq->cache_size);
568 int ret;
569
570 if (!cache_size)
571 ret = scnprintf(buf, PAGE_SIZE, off);
572 else {
573 int speed = atomic_read(&mq->max_write_speed);
574
575 if (!speed_valid(speed))
576 ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
577 else { /* We accept race between cache_jiffies and cache_used */
578 unsigned long size = jiffies_and_speed_to_size(
579 jiffies - mq->cache_jiffies, speed);
580 long used = atomic_long_read(&mq->cache_used);
581
582 if (size >= used)
583 size = 0;
584 else
585 size = (used - size) * 100 / cache_size
586 / 1024UL / 1024UL;
587
588 ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
589 cache_size, size);
590 }
591 }
592
593 mmc_blk_put(md);
594 return ret;
595}
596
597static ssize_t cache_size_store(struct device *dev,
598 struct device_attribute *attr,
599 const char *buf, size_t count)
600{
601 struct mmc_blk_data *md;
602 unsigned int set = 0;
603
604 if (strncasecmp(off, buf, sizeof(off) - 2)
605 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
606 return -EINVAL;
607
608 md = mmc_blk_get(dev_to_disk(dev));
609 atomic_set(&md->queue.cache_size, set);
610 mmc_blk_put(md);
611 return count;
612}
613
614static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
615 cache_size_show, cache_size_store);
616
617/* correct for write-back */
618static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
619{
620 long used = 0;
621 int speed = atomic_read(&mq->max_write_speed);
622
623 if (speed_valid(speed)) {
624 unsigned long size = jiffies_and_speed_to_size(
625 waitfor - mq->cache_jiffies, speed);
626 used = atomic_long_read(&mq->cache_used);
627
628 if (size >= used)
629 used = 0;
630 else
631 used -= size;
632 }
633
634 atomic_long_set(&mq->cache_used, used);
635 mq->cache_jiffies = waitfor;
636
637 return used;
638}
639
640static void mmc_blk_simulate_delay(
641 struct mmc_queue *mq,
642 struct request *req,
643 unsigned long waitfor)
644{
645 int max_speed;
646
647 if (!req)
648 return;
649
650 max_speed = (rq_data_dir(req) == READ)
651 ? atomic_read(&mq->max_read_speed)
652 : atomic_read(&mq->max_write_speed);
653 if (speed_valid(max_speed)) {
654 unsigned long bytes = blk_rq_bytes(req);
655
656 if (rq_data_dir(req) != READ) {
657 int cache_size = atomic_read(&mq->cache_size);
658
659 if (cache_size) {
660 unsigned long size = cache_size * 1024L * 1024L;
661 long used = mmc_blk_cache_used(mq, waitfor);
662
663 used += bytes;
664 atomic_long_set(&mq->cache_used, used);
665 bytes = 0;
666 if (used > size)
667 bytes = used - size;
668 }
669 }
670 waitfor += size_and_speed_to_jiffies(bytes, max_speed);
671 if (time_is_after_jiffies(waitfor)) {
672 long msecs = jiffies_to_msecs(waitfor - jiffies);
673
674 if (likely(msecs > 0))
675 msleep(msecs);
676 }
677 }
678}
679
680#else
681
682#define mmc_blk_simulate_delay(mq, req, waitfor)
683
684#endif
685
Al Viroa5a15612008-03-02 10:33:30 -0500686static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
Al Viroa5a15612008-03-02 10:33:30 -0500688 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 int ret = -ENXIO;
690
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200691 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (md) {
693 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500694 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700696
Al Viroa5a15612008-03-02 10:33:30 -0500697 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700698 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700699 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200702 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 return ret;
705}
706
Al Virodb2a1442013-05-05 21:52:57 -0400707static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
Al Viroa5a15612008-03-02 10:33:30 -0500709 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200711 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200713 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
716static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800717mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800719 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
720 geo->heads = 4;
721 geo->sectors = 16;
722 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723}
724
John Calixtocb87ea22011-04-26 18:56:29 -0400725struct mmc_blk_ioc_data {
726 struct mmc_ioc_cmd ic;
727 unsigned char *buf;
728 u64 buf_bytes;
729};
730
731static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
732 struct mmc_ioc_cmd __user *user)
733{
734 struct mmc_blk_ioc_data *idata;
735 int err;
736
yalin wang1ff89502015-11-12 19:27:11 +0800737 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400738 if (!idata) {
739 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400740 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400741 }
742
743 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
744 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400745 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400746 }
747
748 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
749 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
750 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400751 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400752 }
753
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300754 if (!idata->buf_bytes) {
755 idata->buf = NULL;
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100756 return idata;
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300757 }
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100758
yalin wang1ff89502015-11-12 19:27:11 +0800759 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400760 if (!idata->buf) {
761 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400762 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400763 }
764
765 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
766 idata->ic.data_ptr, idata->buf_bytes)) {
767 err = -EFAULT;
768 goto copy_err;
769 }
770
771 return idata;
772
773copy_err:
774 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400775idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400776 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400777out:
John Calixtocb87ea22011-04-26 18:56:29 -0400778 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400779}
780
Jon Huntera5f57742015-09-22 10:27:53 +0100781static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
782 struct mmc_blk_ioc_data *idata)
783{
784 struct mmc_ioc_cmd *ic = &idata->ic;
785
786 if (copy_to_user(&(ic_ptr->response), ic->response,
787 sizeof(ic->response)))
788 return -EFAULT;
789
790 if (!idata->ic.write_flag) {
791 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
792 idata->buf, idata->buf_bytes))
793 return -EFAULT;
794 }
795
796 return 0;
797}
798
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200799static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
800 u32 retries_max)
801{
802 int err;
803 u32 retry_count = 0;
804
805 if (!status || !retries_max)
806 return -EINVAL;
807
808 do {
809 err = get_card_status(card, status, 5);
810 if (err)
811 break;
812
813 if (!R1_STATUS(*status) &&
814 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
815 break; /* RPMB programming operation complete */
816
817 /*
818 * Rechedule to give the MMC device a chance to continue
819 * processing the previous command without being polled too
820 * frequently.
821 */
822 usleep_range(1000, 5000);
823 } while (++retry_count < retries_max);
824
825 if (retry_count == retries_max)
826 err = -EPERM;
827
828 return err;
829}
830
Maya Erez775a9362013-04-18 15:41:55 +0300831static int ioctl_do_sanitize(struct mmc_card *card)
832{
833 int err;
834
Ulf Hanssona2d10862013-12-16 14:37:26 +0100835 if (!mmc_can_sanitize(card)) {
Maya Erez775a9362013-04-18 15:41:55 +0300836 pr_warn("%s: %s - SANITIZE is not supported\n",
837 mmc_hostname(card->host), __func__);
838 err = -EOPNOTSUPP;
839 goto out;
840 }
841
842 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
843 mmc_hostname(card->host), __func__);
844
845 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
846 EXT_CSD_SANITIZE_START, 1,
847 MMC_SANITIZE_REQ_TIMEOUT);
848
849 if (err)
850 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
851 mmc_hostname(card->host), __func__, err);
852
853 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
854 __func__);
855out:
856 return err;
857}
858
Jon Huntera5f57742015-09-22 10:27:53 +0100859static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
860 struct mmc_blk_ioc_data *idata)
John Calixtocb87ea22011-04-26 18:56:29 -0400861{
John Calixtocb87ea22011-04-26 18:56:29 -0400862 struct mmc_command cmd = {0};
863 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530864 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400865 struct scatterlist sg;
866 int err;
867
Jon Huntera5f57742015-09-22 10:27:53 +0100868 if (!card || !md || !idata)
869 return -EINVAL;
John Calixtocb87ea22011-04-26 18:56:29 -0400870
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100871 cmd.opcode = idata->ic.opcode;
872 cmd.arg = idata->ic.arg;
873 cmd.flags = idata->ic.flags;
874
875 if (idata->buf_bytes) {
876 data.sg = &sg;
877 data.sg_len = 1;
878 data.blksz = idata->ic.blksz;
879 data.blocks = idata->ic.blocks;
880
881 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
882
883 if (idata->ic.write_flag)
884 data.flags = MMC_DATA_WRITE;
885 else
886 data.flags = MMC_DATA_READ;
887
888 /* data.flags must already be set before doing this. */
889 mmc_set_data_timeout(&data, card);
890
891 /* Allow overriding the timeout_ns for empirical tuning. */
892 if (idata->ic.data_timeout_ns)
893 data.timeout_ns = idata->ic.data_timeout_ns;
894
895 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
896 /*
897 * Pretend this is a data transfer and rely on the
898 * host driver to compute timeout. When all host
899 * drivers support cmd.cmd_timeout for R1B, this
900 * can be changed to:
901 *
902 * mrq.data = NULL;
903 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
904 */
905 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
906 }
907
908 mrq.data = &data;
909 }
910
911 mrq.cmd = &cmd;
912
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200913 err = mmc_blk_part_switch(card, md);
914 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100915 return err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200916
John Calixtocb87ea22011-04-26 18:56:29 -0400917 if (idata->ic.is_acmd) {
918 err = mmc_app_cmd(card->host, card);
919 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100920 return err;
John Calixtocb87ea22011-04-26 18:56:29 -0400921 }
922
Yaniv Gardia82e4842013-06-05 14:13:08 +0300923 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
924 (cmd.opcode == MMC_SWITCH)) {
Maya Erez775a9362013-04-18 15:41:55 +0300925 err = ioctl_do_sanitize(card);
926
927 if (err)
928 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
929 __func__, err);
930
Jon Huntera5f57742015-09-22 10:27:53 +0100931 return err;
Maya Erez775a9362013-04-18 15:41:55 +0300932 }
933
John Calixtocb87ea22011-04-26 18:56:29 -0400934 mmc_wait_for_req(card->host, &mrq);
935
936 if (cmd.error) {
937 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
938 __func__, cmd.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100939 return cmd.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400940 }
941 if (data.error) {
942 dev_err(mmc_dev(card->host), "%s: data error %d\n",
943 __func__, data.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100944 return data.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400945 }
946
947 /*
948 * According to the SD specs, some commands require a delay after
949 * issuing the command.
950 */
951 if (idata->ic.postsleep_min_us)
952 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
953
Jon Huntera5f57742015-09-22 10:27:53 +0100954 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
John Calixtocb87ea22011-04-26 18:56:29 -0400955
Krishna Kondae6711632014-12-04 15:20:57 +0200956 return err;
957}
958
959struct mmc_blk_ioc_rpmb_data {
960 struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
961};
962
963static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
964 struct mmc_ioc_rpmb __user *user)
965{
966 struct mmc_blk_ioc_rpmb_data *idata;
967 int err, i;
968
969 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
970 if (!idata) {
971 err = -ENOMEM;
972 goto out;
973 }
974
975 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
976 idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
977 if (IS_ERR(idata->data[i])) {
978 err = PTR_ERR(idata->data[i]);
979 goto copy_err;
980 }
981 }
982
983 return idata;
984
985copy_err:
986 while (--i >= 0) {
987 kfree(idata->data[i]->buf);
988 kfree(idata->data[i]);
989 }
990 kfree(idata);
991out:
992 return ERR_PTR(err);
993}
994
995static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
996 struct mmc_ioc_rpmb __user *ic_ptr)
997{
998 struct mmc_blk_ioc_rpmb_data *idata;
999 struct mmc_blk_data *md;
1000 struct mmc_card *card;
1001 struct mmc_command cmd = {0};
1002 struct mmc_data data = {0};
1003 struct mmc_request mrq = {NULL};
1004 struct scatterlist sg;
1005 int err = 0, i = 0;
1006 u32 status = 0;
1007
1008 /* The caller must have CAP_SYS_RAWIO */
1009 if (!capable(CAP_SYS_RAWIO))
1010 return -EPERM;
1011
1012 md = mmc_blk_get(bdev->bd_disk);
1013 /* make sure this is a rpmb partition */
1014 if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
1015 err = -EINVAL;
Asutosh Das507d9a72014-12-09 10:15:53 +02001016 return err;
Krishna Kondae6711632014-12-04 15:20:57 +02001017 }
1018
1019 idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
1020 if (IS_ERR(idata)) {
1021 err = PTR_ERR(idata);
1022 goto cmd_done;
1023 }
1024
1025 card = md->queue.card;
1026 if (IS_ERR(card)) {
1027 err = PTR_ERR(card);
1028 goto idata_free;
1029 }
1030
Maya Erezdd669562015-02-12 20:37:31 +02001031 mmc_get_card(card);
Krishna Kondae6711632014-12-04 15:20:57 +02001032
1033 err = mmc_blk_part_switch(card, md);
1034 if (err)
1035 goto cmd_rel_host;
1036
1037 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1038 struct mmc_blk_ioc_data *curr_data;
1039 struct mmc_ioc_cmd *curr_cmd;
1040
1041 curr_data = idata->data[i];
1042 curr_cmd = &curr_data->ic;
1043 if (!curr_cmd->opcode)
1044 break;
1045
1046 cmd.opcode = curr_cmd->opcode;
1047 cmd.arg = curr_cmd->arg;
1048 cmd.flags = curr_cmd->flags;
1049
1050 if (curr_data->buf_bytes) {
1051 data.sg = &sg;
1052 data.sg_len = 1;
1053 data.blksz = curr_cmd->blksz;
1054 data.blocks = curr_cmd->blocks;
1055
1056 sg_init_one(data.sg, curr_data->buf,
1057 curr_data->buf_bytes);
1058
1059 if (curr_cmd->write_flag)
1060 data.flags = MMC_DATA_WRITE;
1061 else
1062 data.flags = MMC_DATA_READ;
1063
1064 /* data.flags must already be set before doing this. */
1065 mmc_set_data_timeout(&data, card);
1066
1067 /*
1068 * Allow overriding the timeout_ns for empirical tuning.
1069 */
1070 if (curr_cmd->data_timeout_ns)
1071 data.timeout_ns = curr_cmd->data_timeout_ns;
1072
1073 mrq.data = &data;
1074 }
1075
1076 mrq.cmd = &cmd;
1077
1078 err = mmc_set_blockcount(card, data.blocks,
1079 curr_cmd->write_flag & (1 << 31));
1080 if (err)
1081 goto cmd_rel_host;
1082
1083 mmc_wait_for_req(card->host, &mrq);
1084
1085 if (cmd.error) {
1086 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
1087 __func__, cmd.error);
1088 err = cmd.error;
1089 goto cmd_rel_host;
1090 }
1091 if (data.error) {
1092 dev_err(mmc_dev(card->host), "%s: data error %d\n",
1093 __func__, data.error);
1094 err = data.error;
1095 goto cmd_rel_host;
1096 }
1097
1098 if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
1099 sizeof(cmd.resp))) {
1100 err = -EFAULT;
1101 goto cmd_rel_host;
1102 }
1103
1104 if (!curr_cmd->write_flag) {
1105 if (copy_to_user((void __user *)(unsigned long)
1106 curr_cmd->data_ptr,
1107 curr_data->buf,
1108 curr_data->buf_bytes)) {
1109 err = -EFAULT;
1110 goto cmd_rel_host;
1111 }
1112 }
1113
Loic Pallardy8d1e9772012-08-06 17:12:31 +02001114 /*
1115 * Ensure RPMB command has completed by polling CMD13
1116 * "Send Status".
1117 */
1118 err = ioctl_rpmb_card_status_poll(card, &status, 5);
1119 if (err)
1120 dev_err(mmc_dev(card->host),
1121 "%s: Card Status=0x%08X, error %d\n",
1122 __func__, status, err);
1123 }
1124
Krishna Kondae6711632014-12-04 15:20:57 +02001125cmd_rel_host:
1126 mmc_put_card(card);
1127
1128idata_free:
1129 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1130 kfree(idata->data[i]->buf);
1131 kfree(idata->data[i]);
1132 }
1133 kfree(idata);
1134
1135cmd_done:
1136 mmc_blk_put(md);
Jon Huntera5f57742015-09-22 10:27:53 +01001137 return err;
1138}
1139
1140static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1141 struct mmc_ioc_cmd __user *ic_ptr)
1142{
1143 struct mmc_blk_ioc_data *idata;
1144 struct mmc_blk_data *md;
1145 struct mmc_card *card;
Grant Grundlerb0934102015-09-23 18:30:33 -07001146 int err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001147
Shawn Lin83c742c2016-03-16 18:15:47 +08001148 /*
1149 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1150 * whole block device, not on a partition. This prevents overspray
1151 * between sibling partitions.
1152 */
1153 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1154 return -EPERM;
1155
Jon Huntera5f57742015-09-22 10:27:53 +01001156 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
Asutosh Dasbbefab32013-10-07 14:53:32 +05301157 if (IS_ERR_OR_NULL(idata))
Jon Huntera5f57742015-09-22 10:27:53 +01001158 return PTR_ERR(idata);
1159
1160 md = mmc_blk_get(bdev->bd_disk);
1161 if (!md) {
1162 err = -EINVAL;
1163 goto cmd_err;
1164 }
1165
1166 card = md->queue.card;
Asutosh Dasbbefab32013-10-07 14:53:32 +05301167 if (IS_ERR_OR_NULL(card)) {
Jon Huntera5f57742015-09-22 10:27:53 +01001168 err = PTR_ERR(card);
1169 goto cmd_done;
1170 }
1171
1172 mmc_get_card(card);
1173
Grant Grundlerb0934102015-09-23 18:30:33 -07001174 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001175
Adrian Hunter3c866562016-05-04 14:38:12 +03001176 /* Always switch back to main area after RPMB access */
1177 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1178 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1179
Ulf Hanssone94cfef2013-05-02 14:02:38 +02001180 mmc_put_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -04001181
Grant Grundlerb0934102015-09-23 18:30:33 -07001182 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001183
John Calixtocb87ea22011-04-26 18:56:29 -04001184cmd_done:
1185 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +03001186cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -04001187 kfree(idata->buf);
1188 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001189 return ioc_err ? ioc_err : err;
John Calixtocb87ea22011-04-26 18:56:29 -04001190}
1191
Jon Huntera5f57742015-09-22 10:27:53 +01001192static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
1193 struct mmc_ioc_multi_cmd __user *user)
1194{
1195 struct mmc_blk_ioc_data **idata = NULL;
1196 struct mmc_ioc_cmd __user *cmds = user->cmds;
1197 struct mmc_card *card;
1198 struct mmc_blk_data *md;
Grant Grundlerb0934102015-09-23 18:30:33 -07001199 int i, err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001200 __u64 num_of_cmds;
1201
Shawn Lin83c742c2016-03-16 18:15:47 +08001202 /*
1203 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1204 * whole block device, not on a partition. This prevents overspray
1205 * between sibling partitions.
1206 */
1207 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1208 return -EPERM;
1209
Jon Huntera5f57742015-09-22 10:27:53 +01001210 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
1211 sizeof(num_of_cmds)))
1212 return -EFAULT;
1213
1214 if (num_of_cmds > MMC_IOC_MAX_CMDS)
1215 return -EINVAL;
1216
1217 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
1218 if (!idata)
1219 return -ENOMEM;
1220
1221 for (i = 0; i < num_of_cmds; i++) {
1222 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
1223 if (IS_ERR(idata[i])) {
1224 err = PTR_ERR(idata[i]);
1225 num_of_cmds = i;
1226 goto cmd_err;
1227 }
1228 }
1229
1230 md = mmc_blk_get(bdev->bd_disk);
Olof Johanssonf00ab142016-02-09 09:34:30 -08001231 if (!md) {
1232 err = -EINVAL;
Jon Huntera5f57742015-09-22 10:27:53 +01001233 goto cmd_err;
Olof Johanssonf00ab142016-02-09 09:34:30 -08001234 }
Jon Huntera5f57742015-09-22 10:27:53 +01001235
1236 card = md->queue.card;
1237 if (IS_ERR(card)) {
1238 err = PTR_ERR(card);
1239 goto cmd_done;
1240 }
1241
1242 mmc_get_card(card);
1243
Grant Grundlerb0934102015-09-23 18:30:33 -07001244 for (i = 0; i < num_of_cmds && !ioc_err; i++)
1245 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001246
Adrian Hunter3c866562016-05-04 14:38:12 +03001247 /* Always switch back to main area after RPMB access */
1248 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1249 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1250
Jon Huntera5f57742015-09-22 10:27:53 +01001251 mmc_put_card(card);
1252
1253 /* copy to user if data and response */
Grant Grundlerb0934102015-09-23 18:30:33 -07001254 for (i = 0; i < num_of_cmds && !err; i++)
Jon Huntera5f57742015-09-22 10:27:53 +01001255 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001256
1257cmd_done:
1258 mmc_blk_put(md);
1259cmd_err:
1260 for (i = 0; i < num_of_cmds; i++) {
1261 kfree(idata[i]->buf);
1262 kfree(idata[i]);
1263 }
1264 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001265 return ioc_err ? ioc_err : err;
Jon Huntera5f57742015-09-22 10:27:53 +01001266}
1267
John Calixtocb87ea22011-04-26 18:56:29 -04001268static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
1269 unsigned int cmd, unsigned long arg)
1270{
Jon Huntera5f57742015-09-22 10:27:53 +01001271 switch (cmd) {
1272 case MMC_IOC_CMD:
1273 return mmc_blk_ioctl_cmd(bdev,
1274 (struct mmc_ioc_cmd __user *)arg);
Krishna Kondae6711632014-12-04 15:20:57 +02001275 case MMC_IOC_RPMB_CMD:
1276 return mmc_blk_ioctl_rpmb_cmd(bdev,
1277 (struct mmc_ioc_rpmb __user *)arg);
Jon Huntera5f57742015-09-22 10:27:53 +01001278 case MMC_IOC_MULTI_CMD:
1279 return mmc_blk_ioctl_multi_cmd(bdev,
1280 (struct mmc_ioc_multi_cmd __user *)arg);
1281 default:
1282 return -EINVAL;
1283 }
John Calixtocb87ea22011-04-26 18:56:29 -04001284}
1285
1286#ifdef CONFIG_COMPAT
1287static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
1288 unsigned int cmd, unsigned long arg)
1289{
1290 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
1291}
1292#endif
1293
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001294static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -05001295 .open = mmc_blk_open,
1296 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001297 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -04001299 .ioctl = mmc_blk_ioctl,
1300#ifdef CONFIG_COMPAT
1301 .compat_ioctl = mmc_blk_compat_ioctl,
1302#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303};
1304
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001305static int mmc_blk_cmdq_switch(struct mmc_card *card,
1306 struct mmc_blk_data *md, bool enable)
1307{
1308 int ret = 0;
1309 bool cmdq_mode = !!mmc_card_cmdq(card);
1310
1311 if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
1312 !card->ext_csd.cmdq_support ||
1313 (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
1314 (cmdq_mode == enable))
1315 return 0;
1316
1317 if (enable) {
1318 ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
1319 if (ret) {
1320 pr_err("%s: failed (%d) to set block-size to %d\n",
1321 __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
1322 goto out;
1323 }
1324 }
1325
1326 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1327 EXT_CSD_CMDQ, enable,
1328 card->ext_csd.generic_cmd6_time);
1329 if (ret) {
1330 pr_err("%s: cmdq mode %sable failed %d\n",
1331 md->disk->disk_name, enable ? "en" : "dis", ret);
1332 goto out;
1333 }
1334
1335 if (enable)
1336 mmc_card_set_cmdq(card);
1337 else
1338 mmc_card_clr_cmdq(card);
1339out:
1340 return ret;
1341}
1342
Andrei Warkentin371a6892011-04-11 18:10:25 -05001343static inline int mmc_blk_part_switch(struct mmc_card *card,
1344 struct mmc_blk_data *md)
1345{
1346 int ret;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001347 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001348
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001349 if ((main_md->part_curr == md->part_type) &&
1350 (card->part_curr == md->part_type))
Andrei Warkentin371a6892011-04-11 18:10:25 -05001351 return 0;
1352
1353 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001354 u8 part_config = card->ext_csd.part_config;
1355
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001356 if (md->part_type) {
1357 /* disable CQ mode for non-user data partitions */
1358 ret = mmc_blk_cmdq_switch(card, md, false);
1359 if (ret)
1360 return ret;
1361 }
1362
Adrian Hunter57da0c02016-05-04 14:38:13 +03001363 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1364 mmc_retune_pause(card->host);
1365
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001366 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1367 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -05001368
1369 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001370 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -05001371 card->ext_csd.part_time);
Adrian Hunter57da0c02016-05-04 14:38:13 +03001372 if (ret) {
1373 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1374 mmc_retune_unpause(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001375 return ret;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001376 }
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001377
1378 card->ext_csd.part_config = part_config;
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001379 card->part_curr = md->part_type;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001380
1381 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
1382 mmc_retune_unpause(card->host);
Adrian Hunter67716322011-08-29 16:42:15 +03001383 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05001384
1385 main_md->part_curr = md->part_type;
1386 return 0;
1387}
1388
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001389static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
1390{
1391 int err;
Ben Dooks051913d2009-06-08 23:33:57 +01001392 u32 result;
1393 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001394
Venkatraman Sad5fd972011-08-25 00:30:50 +05301395 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -04001396 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -04001397 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001398
1399 struct scatterlist sg;
1400
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001401 cmd.opcode = MMC_APP_CMD;
1402 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -07001403 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001404
1405 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -07001406 if (err)
1407 return (u32)-1;
1408 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001409 return (u32)-1;
1410
1411 memset(&cmd, 0, sizeof(struct mmc_command));
1412
1413 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1414 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -07001415 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001416
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001417 data.blksz = 4;
1418 data.blocks = 1;
1419 data.flags = MMC_DATA_READ;
1420 data.sg = &sg;
1421 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +05301422 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001423
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001424 mrq.cmd = &cmd;
1425 mrq.data = &data;
1426
Ben Dooks051913d2009-06-08 23:33:57 +01001427 blocks = kmalloc(4, GFP_KERNEL);
1428 if (!blocks)
1429 return (u32)-1;
1430
1431 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001432
1433 mmc_wait_for_req(card->host, &mrq);
1434
Ben Dooks051913d2009-06-08 23:33:57 +01001435 result = ntohl(*blocks);
1436 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001437
Ben Dooks051913d2009-06-08 23:33:57 +01001438 if (cmd.error || data.error)
1439 result = (u32)-1;
1440
1441 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001442}
1443
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001444static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +03001445{
Chris Ball1278dba2011-04-13 23:40:30 -04001446 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +03001447 int err;
1448
Adrian Hunter504f1912008-10-16 12:55:25 +03001449 cmd.opcode = MMC_SEND_STATUS;
1450 if (!mmc_host_is_spi(card->host))
1451 cmd.arg = card->rca << 16;
1452 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001453 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1454 if (err == 0)
1455 *status = cmd.resp[0];
1456 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +03001457}
1458
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001459static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
Ulf Hansson95a91292014-01-29 13:11:27 +01001460 bool hw_busy_detect, struct request *req, int *gen_err)
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001461{
1462 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1463 int err = 0;
1464 u32 status;
1465
1466 do {
1467 err = get_card_status(card, &status, 5);
1468 if (err) {
1469 pr_err("%s: error %d requesting status\n",
1470 req->rq_disk->disk_name, err);
1471 return err;
1472 }
1473
1474 if (status & R1_ERROR) {
1475 pr_err("%s: %s: error sending status cmd, status %#x\n",
1476 req->rq_disk->disk_name, __func__, status);
1477 *gen_err = 1;
1478 }
1479
Ulf Hansson95a91292014-01-29 13:11:27 +01001480 /* We may rely on the host hw to handle busy detection.*/
1481 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
1482 hw_busy_detect)
1483 break;
1484
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001485 /*
1486 * Timeout if the device never becomes ready for data and never
1487 * leaves the program state.
1488 */
1489 if (time_after(jiffies, timeout)) {
1490 pr_err("%s: Card stuck in programming state! %s %s\n",
1491 mmc_hostname(card->host),
1492 req->rq_disk->disk_name, __func__);
1493 return -ETIMEDOUT;
1494 }
1495
1496 /*
1497 * Some cards mishandle the status bits,
1498 * so make sure to check both the busy
1499 * indication and the card state.
1500 */
1501 } while (!(status & R1_READY_FOR_DATA) ||
1502 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1503
1504 return err;
1505}
1506
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001507static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
1508 struct request *req, int *gen_err, u32 *stop_status)
1509{
1510 struct mmc_host *host = card->host;
1511 struct mmc_command cmd = {0};
1512 int err;
1513 bool use_r1b_resp = rq_data_dir(req) == WRITE;
1514
1515 /*
1516 * Normally we use R1B responses for WRITE, but in cases where the host
1517 * has specified a max_busy_timeout we need to validate it. A failure
1518 * means we need to prevent the host from doing hw busy detection, which
1519 * is done by converting to a R1 response instead.
1520 */
1521 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1522 use_r1b_resp = false;
1523
1524 cmd.opcode = MMC_STOP_TRANSMISSION;
1525 if (use_r1b_resp) {
1526 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1527 cmd.busy_timeout = timeout_ms;
1528 } else {
1529 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1530 }
1531
1532 err = mmc_wait_for_cmd(host, &cmd, 5);
1533 if (err)
1534 return err;
1535
1536 *stop_status = cmd.resp[0];
1537
1538 /* No need to check card status in case of READ. */
1539 if (rq_data_dir(req) == READ)
1540 return 0;
1541
1542 if (!mmc_host_is_spi(host) &&
1543 (*stop_status & R1_ERROR)) {
1544 pr_err("%s: %s: general error sending stop command, resp %#x\n",
1545 req->rq_disk->disk_name, __func__, *stop_status);
1546 *gen_err = 1;
1547 }
1548
1549 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
1550}
1551
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301552#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001553#define ERR_RETRY 2
1554#define ERR_ABORT 1
1555#define ERR_CONTINUE 0
1556
1557static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1558 bool status_valid, u32 status)
1559{
1560 switch (error) {
1561 case -EILSEQ:
1562 /* response crc error, retry the r/w cmd */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001563 pr_err_ratelimited(
1564 "%s: response CRC error sending %s command, card status %#x\n",
1565 req->rq_disk->disk_name,
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001566 name, status);
1567 return ERR_RETRY;
1568
1569 case -ETIMEDOUT:
Talel Shenhar0821fe852015-01-28 14:44:57 +02001570 pr_err_ratelimited(
1571 "%s: timed out sending %s command, card status %#x\n",
1572 req->rq_disk->disk_name, name, status);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001573
1574 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301575 if (!status_valid) {
Talel Shenhar0821fe852015-01-28 14:44:57 +02001576 pr_err_ratelimited("%s: status not valid, retrying timeout\n",
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301577 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001578 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301579 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001580
1581 /*
1582 * If it was a r/w cmd crc error, or illegal command
1583 * (eg, issued in wrong state) then retry - we should
1584 * have corrected the state problem above.
1585 */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301586 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
Talel Shenhar0821fe852015-01-28 14:44:57 +02001587 pr_err_ratelimited(
1588 "%s: command error, retrying timeout\n",
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301589 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001590 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301591 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001592
1593 /* Otherwise abort the command */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001594 pr_err_ratelimited(
1595 "%s: not retrying timeout\n",
1596 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001597 return ERR_ABORT;
1598
1599 default:
1600 /* We don't understand the error code the driver gave us */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001601 pr_err_ratelimited(
1602 "%s: unknown error %d sending read/write command, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001603 req->rq_disk->disk_name, error, status);
1604 return ERR_ABORT;
1605 }
1606}
1607
1608/*
1609 * Initial r/w and stop cmd error recovery.
1610 * We don't know whether the card received the r/w cmd or not, so try to
1611 * restore things back to a sane state. Essentially, we do this as follows:
1612 * - Obtain card status. If the first attempt to obtain card status fails,
1613 * the status word will reflect the failed status cmd, not the failed
1614 * r/w cmd. If we fail to obtain card status, it suggests we can no
1615 * longer communicate with the card.
1616 * - Check the card state. If the card received the cmd but there was a
1617 * transient problem with the response, it might still be in a data transfer
1618 * mode. Try to send it a stop command. If this fails, we can't recover.
1619 * - If the r/w cmd failed due to a response CRC error, it was probably
1620 * transient, so retry the cmd.
1621 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1622 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1623 * illegal cmd, retry.
1624 * Otherwise we don't understand what happened, so abort.
1625 */
1626static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001627 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001628{
1629 bool prev_cmd_status_valid = true;
1630 u32 status, stop_status = 0;
1631 int err, retry;
1632
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301633 if (mmc_card_removed(card))
1634 return ERR_NOMEDIUM;
1635
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001636 /*
1637 * Try to get card status which indicates both the card state
1638 * and why there was no response. If the first attempt fails,
1639 * we can't be sure the returned status is for the r/w command.
1640 */
1641 for (retry = 2; retry >= 0; retry--) {
1642 err = get_card_status(card, &status, 0);
1643 if (!err)
1644 break;
1645
Adrian Hunter6f398ad2015-05-07 13:10:23 +03001646 /* Re-tune if needed */
1647 mmc_retune_recheck(card->host);
1648
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001649 prev_cmd_status_valid = false;
1650 pr_err("%s: error %d sending status command, %sing\n",
1651 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1652 }
1653
1654 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301655 if (err) {
1656 /* Check if the card is removed */
1657 if (mmc_detect_card_removed(card->host))
1658 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001659 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301660 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001661
Adrian Hunter67716322011-08-29 16:42:15 +03001662 /* Flag ECC errors */
1663 if ((status & R1_CARD_ECC_FAILED) ||
1664 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1665 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1666 *ecc_err = 1;
1667
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001668 /* Flag General errors */
1669 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1670 if ((status & R1_ERROR) ||
1671 (brq->stop.resp[0] & R1_ERROR)) {
1672 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1673 req->rq_disk->disk_name, __func__,
1674 brq->stop.resp[0], status);
1675 *gen_err = 1;
1676 }
1677
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001678 /*
1679 * Check the current card state. If it is in some data transfer
1680 * mode, tell it to stop (and hopefully transition back to TRAN.)
1681 */
1682 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1683 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001684 err = send_stop(card,
1685 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1686 req, gen_err, &stop_status);
1687 if (err) {
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001688 pr_err("%s: error %d sending stop command\n",
1689 req->rq_disk->disk_name, err);
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001690 /*
1691 * If the stop cmd also timed out, the card is probably
1692 * not present, so abort. Other errors are bad news too.
1693 */
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001694 return ERR_ABORT;
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001695 }
1696
Adrian Hunter67716322011-08-29 16:42:15 +03001697 if (stop_status & R1_CARD_ECC_FAILED)
1698 *ecc_err = 1;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001699 }
1700
1701 /* Check for set block count errors */
1702 if (brq->sbc.error)
1703 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1704 prev_cmd_status_valid, status);
1705
1706 /* Check for r/w command errors */
1707 if (brq->cmd.error)
1708 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1709 prev_cmd_status_valid, status);
1710
Adrian Hunter67716322011-08-29 16:42:15 +03001711 /* Data errors */
1712 if (!brq->stop.error)
1713 return ERR_CONTINUE;
1714
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001715 /* Now for stop errors. These aren't fatal to the transfer. */
Johan Rudholm5e1344e2014-09-17 09:50:42 +02001716 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001717 req->rq_disk->disk_name, brq->stop.error,
1718 brq->cmd.resp[0], status);
1719
1720 /*
1721 * Subsitute in our own stop status as this will give the error
1722 * state which happened during the execution of the r/w command.
1723 */
1724 if (stop_status) {
1725 brq->stop.resp[0] = stop_status;
1726 brq->stop.error = 0;
1727 }
1728 return ERR_CONTINUE;
1729}
1730
Adrian Hunter67716322011-08-29 16:42:15 +03001731static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1732 int type)
1733{
1734 int err;
1735
1736 if (md->reset_done & type)
1737 return -EEXIST;
1738
1739 md->reset_done |= type;
1740 err = mmc_hw_reset(host);
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301741 if (err && err != -EOPNOTSUPP) {
1742 /* We failed to reset so we need to abort the request */
1743 pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
1744 __func__, err);
1745 return -ENODEV;
1746 }
1747
Adrian Hunter67716322011-08-29 16:42:15 +03001748 /* Ensure we switch back to the correct partition */
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301749 if (host->card) {
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001750 struct mmc_blk_data *main_md =
1751 dev_get_drvdata(&host->card->dev);
Adrian Hunter67716322011-08-29 16:42:15 +03001752 int part_err;
1753
1754 main_md->part_curr = main_md->part_type;
1755 part_err = mmc_blk_part_switch(host->card, md);
1756 if (part_err) {
1757 /*
1758 * We have failed to get back into the correct
1759 * partition, so we need to abort the whole request.
1760 */
1761 return -ENODEV;
1762 }
1763 }
1764 return err;
1765}
1766
1767static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1768{
1769 md->reset_done &= ~type;
1770}
1771
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +08001772int mmc_access_rpmb(struct mmc_queue *mq)
1773{
1774 struct mmc_blk_data *md = mq->data;
1775 /*
1776 * If this is a RPMB partition access, return ture
1777 */
1778 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1779 return true;
1780
1781 return false;
1782}
1783
Sahitya Tummala9433a132015-06-09 09:38:36 +05301784static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
1785 struct request *req)
1786{
1787 struct mmc_blk_data *md = mq->data;
1788 struct mmc_card *card = md->queue.card;
1789 struct mmc_host *host = card->host;
1790 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
1791 struct mmc_cmdq_req *cmdq_req;
1792 struct mmc_queue_req *active_mqrq;
1793
1794 BUG_ON(req->tag > card->ext_csd.cmdq_depth);
1795 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
1796
1797 set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
1798
1799 active_mqrq = &mq->mqrq_cmdq[req->tag];
1800 active_mqrq->req = req;
1801
1802 cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
1803 cmdq_req->cmdq_req_flags |= QBR;
1804 cmdq_req->mrq.cmd = &cmdq_req->cmd;
1805 cmdq_req->tag = req->tag;
1806 return cmdq_req;
1807}
1808
1809static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
1810 struct request *req)
1811{
1812 struct mmc_blk_data *md = mq->data;
1813 struct mmc_card *card = md->queue.card;
1814 struct mmc_cmdq_req *cmdq_req = NULL;
1815 struct mmc_host *host = card->host;
1816 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
1817 unsigned int from, nr, arg;
1818 int err = 0;
1819
1820 if (!mmc_can_erase(card)) {
1821 err = -EOPNOTSUPP;
1822 goto out;
1823 }
1824
1825 from = blk_rq_pos(req);
1826 nr = blk_rq_sectors(req);
1827
1828 if (mmc_can_discard(card))
1829 arg = MMC_DISCARD_ARG;
1830 else if (mmc_can_trim(card))
1831 arg = MMC_TRIM_ARG;
1832 else
1833 arg = MMC_ERASE_ARG;
1834
1835 cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
1836 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1837 __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
1838 EXT_CSD_CMD_SET_NORMAL,
1839 INAND_CMD38_ARG_EXT_CSD,
1840 arg == MMC_TRIM_ARG ?
1841 INAND_CMD38_ARG_TRIM :
1842 INAND_CMD38_ARG_ERASE,
1843 0, true, false);
1844 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
1845 if (err)
1846 goto clear_dcmd;
1847 }
1848 err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
1849clear_dcmd:
1850 /* clear pending request */
1851 if (cmdq_req) {
1852 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
1853 &ctx_info->active_reqs));
1854 clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
1855 }
1856out:
1857 blk_end_request(req, err, blk_rq_bytes(req));
1858
1859 if (test_and_clear_bit(0, &ctx_info->req_starved))
1860 blk_run_queue(mq->queue);
1861 mmc_release_host(host);
1862 return err ? 1 : 0;
1863}
1864
Adrian Hunterbd788c92010-08-11 14:17:47 -07001865static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1866{
1867 struct mmc_blk_data *md = mq->data;
1868 struct mmc_card *card = md->queue.card;
1869 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001870 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001871
Adrian Hunterbd788c92010-08-11 14:17:47 -07001872 if (!mmc_can_erase(card)) {
1873 err = -EOPNOTSUPP;
1874 goto out;
1875 }
1876
1877 from = blk_rq_pos(req);
1878 nr = blk_rq_sectors(req);
1879
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001880 if (mmc_can_discard(card))
1881 arg = MMC_DISCARD_ARG;
1882 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -07001883 arg = MMC_TRIM_ARG;
1884 else
1885 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +03001886retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001887 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1888 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1889 INAND_CMD38_ARG_EXT_CSD,
1890 arg == MMC_TRIM_ARG ?
1891 INAND_CMD38_ARG_TRIM :
1892 INAND_CMD38_ARG_ERASE,
1893 0);
1894 if (err)
1895 goto out;
1896 }
Adrian Hunterbd788c92010-08-11 14:17:47 -07001897 err = mmc_erase(card, from, nr, arg);
1898out:
Adrian Hunter67716322011-08-29 16:42:15 +03001899 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1900 goto retry;
1901 if (!err)
1902 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301903 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -07001904
Adrian Hunterbd788c92010-08-11 14:17:47 -07001905 return err ? 0 : 1;
1906}
1907
Sahitya Tummala9433a132015-06-09 09:38:36 +05301908static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
1909 struct request *req)
1910{
1911 struct mmc_blk_data *md = mq->data;
1912 struct mmc_card *card = md->queue.card;
1913 struct mmc_cmdq_req *cmdq_req = NULL;
1914 unsigned int from, nr, arg;
1915 struct mmc_host *host = card->host;
1916 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
1917 int err = 0;
1918
1919 if (!(mmc_can_secure_erase_trim(card))) {
1920 err = -EOPNOTSUPP;
1921 goto out;
1922 }
1923
1924 from = blk_rq_pos(req);
1925 nr = blk_rq_sectors(req);
1926
1927 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1928 arg = MMC_SECURE_TRIM1_ARG;
1929 else
1930 arg = MMC_SECURE_ERASE_ARG;
1931
1932 cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
1933 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1934 __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
1935 EXT_CSD_CMD_SET_NORMAL,
1936 INAND_CMD38_ARG_EXT_CSD,
1937 arg == MMC_SECURE_TRIM1_ARG ?
1938 INAND_CMD38_ARG_SECTRIM1 :
1939 INAND_CMD38_ARG_SECERASE,
1940 0, true, false);
1941 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
1942 if (err)
1943 goto clear_dcmd;
1944 }
1945
1946 err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
1947 if (err)
1948 goto clear_dcmd;
1949
1950 if (arg == MMC_SECURE_TRIM1_ARG) {
1951 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1952 __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
1953 EXT_CSD_CMD_SET_NORMAL,
1954 INAND_CMD38_ARG_EXT_CSD,
1955 INAND_CMD38_ARG_SECTRIM2,
1956 0, true, false);
1957 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
1958 if (err)
1959 goto clear_dcmd;
1960 }
1961
1962 err = mmc_cmdq_erase(cmdq_req, card, from, nr,
1963 MMC_SECURE_TRIM2_ARG);
1964 }
1965clear_dcmd:
1966 /* clear pending request */
1967 if (cmdq_req) {
1968 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
1969 &ctx_info->active_reqs));
1970 clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
1971 }
1972out:
1973 blk_end_request(req, err, blk_rq_bytes(req));
1974
1975 if (test_and_clear_bit(0, &ctx_info->req_starved))
1976 blk_run_queue(mq->queue);
1977 mmc_release_host(host);
1978 return err ? 1 : 0;
1979}
1980
Adrian Hunter49804542010-08-11 14:17:50 -07001981static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1982 struct request *req)
1983{
1984 struct mmc_blk_data *md = mq->data;
1985 struct mmc_card *card = md->queue.card;
Maya Erez775a9362013-04-18 15:41:55 +03001986 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001987 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -07001988
Maya Erez775a9362013-04-18 15:41:55 +03001989 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -07001990 err = -EOPNOTSUPP;
1991 goto out;
1992 }
1993
1994 from = blk_rq_pos(req);
1995 nr = blk_rq_sectors(req);
1996
Maya Erez775a9362013-04-18 15:41:55 +03001997 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1998 arg = MMC_SECURE_TRIM1_ARG;
1999 else
2000 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter28302812012-04-05 14:45:48 +03002001
Adrian Hunter67716322011-08-29 16:42:15 +03002002retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002003 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
2004 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2005 INAND_CMD38_ARG_EXT_CSD,
2006 arg == MMC_SECURE_TRIM1_ARG ?
2007 INAND_CMD38_ARG_SECTRIM1 :
2008 INAND_CMD38_ARG_SECERASE,
2009 0);
2010 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03002011 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002012 }
Adrian Hunter28302812012-04-05 14:45:48 +03002013
Adrian Hunter49804542010-08-11 14:17:50 -07002014 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +03002015 if (err == -EIO)
2016 goto out_retry;
2017 if (err)
2018 goto out;
2019
2020 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002021 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
2022 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2023 INAND_CMD38_ARG_EXT_CSD,
2024 INAND_CMD38_ARG_SECTRIM2,
2025 0);
2026 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03002027 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002028 }
Adrian Hunter28302812012-04-05 14:45:48 +03002029
Adrian Hunter49804542010-08-11 14:17:50 -07002030 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +03002031 if (err == -EIO)
2032 goto out_retry;
2033 if (err)
2034 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002035 }
Adrian Hunter28302812012-04-05 14:45:48 +03002036
Adrian Hunter28302812012-04-05 14:45:48 +03002037out_retry:
2038 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03002039 goto retry;
2040 if (!err)
2041 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03002042out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302043 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07002044
Adrian Hunter49804542010-08-11 14:17:50 -07002045 return err ? 0 : 1;
2046}
2047
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002048static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
2049{
2050 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002051 struct mmc_card *card = md->queue.card;
2052 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002053
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002054 ret = mmc_flush_cache(card);
Talel Shenhar8a8e3b42015-02-11 12:58:16 +02002055 if (ret == -ENODEV) {
2056 pr_err("%s: %s: restart mmc card",
2057 req->rq_disk->disk_name, __func__);
2058 if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
2059 pr_err("%s: %s: fail to restart mmc",
2060 req->rq_disk->disk_name, __func__);
2061 else
2062 mmc_blk_reset_success(md, MMC_BLK_FLUSH);
2063 }
2064
2065 if (ret) {
2066 pr_err("%s: %s: notify flush error to upper layers",
2067 req->rq_disk->disk_name, __func__);
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002068 ret = -EIO;
Talel Shenhar8a8e3b42015-02-11 12:58:16 +02002069 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002070
Mark Salyzyn6904e432016-01-28 11:12:25 -08002071#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2072 else if (atomic_read(&mq->cache_size)) {
2073 long used = mmc_blk_cache_used(mq, jiffies);
2074
2075 if (used) {
2076 int speed = atomic_read(&mq->max_write_speed);
2077
2078 if (speed_valid(speed)) {
2079 unsigned long msecs = jiffies_to_msecs(
2080 size_and_speed_to_jiffies(
2081 used, speed));
2082 if (msecs)
2083 msleep(msecs);
2084 }
2085 }
2086 }
2087#endif
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302088 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002089
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002090 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002091}
2092
2093/*
2094 * Reformat current write as a reliable write, supporting
2095 * both legacy and the enhanced reliable write MMC cards.
2096 * In each transfer we'll handle only as much as a single
2097 * reliable write can handle, thus finish the request in
2098 * partial completions.
2099 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002100static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
2101 struct mmc_card *card,
2102 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002103{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002104 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
2105 /* Legacy mode imposes restrictions on transfers. */
2106 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
2107 brq->data.blocks = 1;
2108
2109 if (brq->data.blocks > card->ext_csd.rel_sectors)
2110 brq->data.blocks = card->ext_csd.rel_sectors;
2111 else if (brq->data.blocks < card->ext_csd.rel_sectors)
2112 brq->data.blocks = 1;
2113 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002114}
2115
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01002116#define CMD_ERRORS \
2117 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
2118 R1_ADDRESS_ERROR | /* Misaligned address */ \
2119 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
2120 R1_WP_VIOLATION | /* Tried to write to protected block */ \
2121 R1_CC_ERROR | /* Card controller error */ \
2122 R1_ERROR) /* General/unknown error */
2123
Per Forlinee8a43a2011-07-01 18:55:33 +02002124static int mmc_blk_err_check(struct mmc_card *card,
2125 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02002126{
Per Forlinee8a43a2011-07-01 18:55:33 +02002127 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
2128 mmc_active);
2129 struct mmc_blk_request *brq = &mq_mrq->brq;
2130 struct request *req = mq_mrq->req;
Adrian Hunterb8360a42015-05-07 13:10:24 +03002131 int need_retune = card->host->need_retune;
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002132 int ecc_err = 0, gen_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02002133
2134 /*
2135 * sbc.error indicates a problem with the set block count
2136 * command. No data will have been transferred.
2137 *
2138 * cmd.error indicates a problem with the r/w command. No
2139 * data will have been transferred.
2140 *
2141 * stop.error indicates a problem with the stop command. Data
2142 * may have been transferred, or may still be transferring.
2143 */
Adrian Hunter67716322011-08-29 16:42:15 +03002144 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
2145 brq->data.error) {
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002146 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02002147 case ERR_RETRY:
2148 return MMC_BLK_RETRY;
2149 case ERR_ABORT:
2150 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05302151 case ERR_NOMEDIUM:
2152 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02002153 case ERR_CONTINUE:
2154 break;
2155 }
2156 }
2157
2158 /*
2159 * Check for errors relating to the execution of the
2160 * initial command - such as address errors. No data
2161 * has been transferred.
2162 */
2163 if (brq->cmd.resp[0] & CMD_ERRORS) {
2164 pr_err("%s: r/w command failed, status = %#x\n",
2165 req->rq_disk->disk_name, brq->cmd.resp[0]);
2166 return MMC_BLK_ABORT;
2167 }
2168
2169 /*
2170 * Everything else is either success, or a data error of some
2171 * kind. If it was a write, we may have transitioned to
2172 * program mode, which we have to wait for it to complete.
2173 */
2174 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Ulf Hanssonc49433f2014-01-29 11:01:55 +01002175 int err;
Trey Ramsay8fee4762012-11-16 09:31:41 -06002176
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002177 /* Check stop command response */
2178 if (brq->stop.resp[0] & R1_ERROR) {
2179 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
2180 req->rq_disk->disk_name, __func__,
2181 brq->stop.resp[0]);
2182 gen_err = 1;
2183 }
2184
Ulf Hansson95a91292014-01-29 13:11:27 +01002185 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
2186 &gen_err);
Ulf Hanssonc49433f2014-01-29 11:01:55 +01002187 if (err)
2188 return MMC_BLK_CMD_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02002189 }
2190
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002191 /* if general error occurs, retry the write operation. */
2192 if (gen_err) {
2193 pr_warn("%s: retrying write for general error\n",
2194 req->rq_disk->disk_name);
2195 return MMC_BLK_RETRY;
2196 }
2197
Per Forlind78d4a82011-07-01 18:55:30 +02002198 if (brq->data.error) {
Adrian Hunterb8360a42015-05-07 13:10:24 +03002199 if (need_retune && !brq->retune_retry_done) {
Russell King09faf612016-01-29 09:44:00 +00002200 pr_debug("%s: retrying because a re-tune was needed\n",
2201 req->rq_disk->disk_name);
Adrian Hunterb8360a42015-05-07 13:10:24 +03002202 brq->retune_retry_done = 1;
2203 return MMC_BLK_RETRY;
2204 }
Per Forlind78d4a82011-07-01 18:55:30 +02002205 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
2206 req->rq_disk->disk_name, brq->data.error,
2207 (unsigned)blk_rq_pos(req),
2208 (unsigned)blk_rq_sectors(req),
2209 brq->cmd.resp[0], brq->stop.resp[0]);
2210
2211 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03002212 if (ecc_err)
2213 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02002214 return MMC_BLK_DATA_ERR;
2215 } else {
2216 return MMC_BLK_CMD_ERR;
2217 }
2218 }
2219
Adrian Hunter67716322011-08-29 16:42:15 +03002220 if (!brq->data.bytes_xfered)
2221 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02002222
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002223 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
2224 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
2225 return MMC_BLK_PARTIAL;
2226 else
2227 return MMC_BLK_SUCCESS;
2228 }
2229
Adrian Hunter67716322011-08-29 16:42:15 +03002230 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
2231 return MMC_BLK_PARTIAL;
2232
2233 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02002234}
2235
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002236static int mmc_blk_packed_err_check(struct mmc_card *card,
2237 struct mmc_async_req *areq)
2238{
2239 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
2240 mmc_active);
2241 struct request *req = mq_rq->req;
2242 struct mmc_packed *packed = mq_rq->packed;
2243 int err, check, status;
2244 u8 *ext_csd;
2245
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002246 packed->retries--;
2247 check = mmc_blk_err_check(card, areq);
2248 err = get_card_status(card, &status, 0);
2249 if (err) {
2250 pr_err("%s: error %d sending status command\n",
2251 req->rq_disk->disk_name, err);
2252 return MMC_BLK_ABORT;
2253 }
2254
2255 if (status & R1_EXCEPTION_EVENT) {
Ulf Hansson86817ff2014-10-17 11:39:05 +02002256 err = mmc_get_ext_csd(card, &ext_csd);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002257 if (err) {
2258 pr_err("%s: error %d sending ext_csd\n",
2259 req->rq_disk->disk_name, err);
Ulf Hansson86817ff2014-10-17 11:39:05 +02002260 return MMC_BLK_ABORT;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002261 }
2262
2263 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
2264 EXT_CSD_PACKED_FAILURE) &&
2265 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2266 EXT_CSD_PACKED_GENERIC_ERROR)) {
2267 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2268 EXT_CSD_PACKED_INDEXED_ERROR) {
2269 packed->idx_failure =
2270 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
2271 check = MMC_BLK_PARTIAL;
2272 }
2273 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
2274 "failure index: %d\n",
2275 req->rq_disk->disk_name, packed->nr_entries,
2276 packed->blocks, packed->idx_failure);
2277 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002278 kfree(ext_csd);
2279 }
2280
2281 return check;
2282}
2283
Per Forlin54d49d72011-07-01 18:55:29 +02002284static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
2285 struct mmc_card *card,
2286 int disable_multi,
2287 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288{
Per Forlin54d49d72011-07-01 18:55:29 +02002289 u32 readcmd, writecmd;
2290 struct mmc_blk_request *brq = &mqrq->brq;
2291 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05302293 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002295 /*
2296 * Reliable writes are used to implement Forced Unit Access and
Luca Porziod3df0462015-11-06 15:12:26 +00002297 * are supported only on MMCs.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002298 */
Luca Porziod3df0462015-11-06 15:12:26 +00002299 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002300 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002301 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002302
Per Forlin54d49d72011-07-01 18:55:29 +02002303 memset(brq, 0, sizeof(struct mmc_blk_request));
2304 brq->mrq.cmd = &brq->cmd;
2305 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Per Forlin54d49d72011-07-01 18:55:29 +02002307 brq->cmd.arg = blk_rq_pos(req);
2308 if (!mmc_card_blockaddr(card))
2309 brq->cmd.arg <<= 9;
2310 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2311 brq->data.blksz = 512;
2312 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2313 brq->stop.arg = 0;
Per Forlin54d49d72011-07-01 18:55:29 +02002314 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Asutosh Dasf0665412012-07-27 18:10:19 +05302316 brq->data.fault_injected = false;
Per Forlin54d49d72011-07-01 18:55:29 +02002317 /*
2318 * The block layer doesn't support all sector count
2319 * restrictions, so we need to be prepared for too big
2320 * requests.
2321 */
2322 if (brq->data.blocks > card->host->max_blk_count)
2323 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002325 if (brq->data.blocks > 1) {
2326 /*
2327 * After a read error, we redo the request one sector
2328 * at a time in order to accurately determine which
2329 * sectors can be read successfully.
2330 */
2331 if (disable_multi)
2332 brq->data.blocks = 1;
2333
Kuninori Morimoto2e47e842014-09-02 19:08:53 -07002334 /*
2335 * Some controllers have HW issues while operating
2336 * in multiple I/O mode
2337 */
2338 if (card->host->ops->multi_io_quirk)
2339 brq->data.blocks = card->host->ops->multi_io_quirk(card,
2340 (rq_data_dir(req) == READ) ?
2341 MMC_DATA_READ : MMC_DATA_WRITE,
2342 brq->data.blocks);
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002343 }
Per Forlin54d49d72011-07-01 18:55:29 +02002344
2345 if (brq->data.blocks > 1 || do_rel_wr) {
2346 /* SPI multiblock writes terminate using a special
2347 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02002348 */
Per Forlin54d49d72011-07-01 18:55:29 +02002349 if (!mmc_host_is_spi(card->host) ||
2350 rq_data_dir(req) == READ)
2351 brq->mrq.stop = &brq->stop;
2352 readcmd = MMC_READ_MULTIPLE_BLOCK;
2353 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
2354 } else {
2355 brq->mrq.stop = NULL;
2356 readcmd = MMC_READ_SINGLE_BLOCK;
2357 writecmd = MMC_WRITE_BLOCK;
2358 }
2359 if (rq_data_dir(req) == READ) {
2360 brq->cmd.opcode = readcmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002361 brq->data.flags = MMC_DATA_READ;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002362 if (brq->mrq.stop)
2363 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
2364 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002365 } else {
2366 brq->cmd.opcode = writecmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002367 brq->data.flags = MMC_DATA_WRITE;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002368 if (brq->mrq.stop)
2369 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
2370 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002371 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02002372
Per Forlin54d49d72011-07-01 18:55:29 +02002373 if (do_rel_wr)
2374 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01002375
Per Forlin54d49d72011-07-01 18:55:29 +02002376 /*
Saugata Das42659002011-12-21 13:09:17 +05302377 * Data tag is used only during writing meta data to speed
2378 * up write and any subsequent read of this meta data
2379 */
2380 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2381 (req->cmd_flags & REQ_META) &&
2382 (rq_data_dir(req) == WRITE) &&
2383 ((brq->data.blocks * brq->data.blksz) >=
2384 card->ext_csd.data_tag_unit_size);
2385
2386 /*
Per Forlin54d49d72011-07-01 18:55:29 +02002387 * Pre-defined multi-block transfers are preferable to
2388 * open ended-ones (and necessary for reliable writes).
2389 * However, it is not sufficient to just send CMD23,
2390 * and avoid the final CMD12, as on an error condition
2391 * CMD12 (stop) needs to be sent anyway. This, coupled
2392 * with Auto-CMD23 enhancements provided by some
2393 * hosts, means that the complexity of dealing
2394 * with this is best left to the host. If CMD23 is
2395 * supported by card and host, we'll fill sbc in and let
2396 * the host deal with handling it correctly. This means
2397 * that for hosts that don't expose MMC_CAP_CMD23, no
2398 * change of behavior will be observed.
2399 *
2400 * N.B: Some MMC cards experience perf degradation.
2401 * We'll avoid using CMD23-bounded multiblock writes for
2402 * these, while retaining features like reliable writes.
2403 */
Saugata Das42659002011-12-21 13:09:17 +05302404 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
2405 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
2406 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02002407 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2408 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05302409 (do_rel_wr ? (1 << 31) : 0) |
2410 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02002411 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2412 brq->mrq.sbc = &brq->sbc;
2413 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002414
Per Forlin54d49d72011-07-01 18:55:29 +02002415 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002416
Per Forlin54d49d72011-07-01 18:55:29 +02002417 brq->data.sg = mqrq->sg;
2418 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002419
Per Forlin54d49d72011-07-01 18:55:29 +02002420 /*
2421 * Adjust the sg list so it is the same size as the
2422 * request.
2423 */
2424 if (brq->data.blocks != blk_rq_sectors(req)) {
2425 int i, data_size = brq->data.blocks << 9;
2426 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02002427
Per Forlin54d49d72011-07-01 18:55:29 +02002428 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
2429 data_size -= sg->length;
2430 if (data_size <= 0) {
2431 sg->length += data_size;
2432 i++;
2433 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01002434 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002435 }
Per Forlin54d49d72011-07-01 18:55:29 +02002436 brq->data.sg_len = i;
2437 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002438
Per Forlinee8a43a2011-07-01 18:55:33 +02002439 mqrq->mmc_active.mrq = &brq->mrq;
Sahitya Tummalac44de842015-05-08 11:12:30 +05302440 mqrq->mmc_active.mrq->req = mqrq->req;
Per Forlinee8a43a2011-07-01 18:55:33 +02002441 mqrq->mmc_active.err_check = mmc_blk_err_check;
2442
Per Forlin54d49d72011-07-01 18:55:29 +02002443 mmc_queue_bounce_pre(mqrq);
2444}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002446static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
2447 struct mmc_card *card)
2448{
2449 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
2450 unsigned int max_seg_sz = queue_max_segment_size(q);
2451 unsigned int len, nr_segs = 0;
2452
2453 do {
2454 len = min(hdr_sz, max_seg_sz);
2455 hdr_sz -= len;
2456 nr_segs++;
2457 } while (hdr_sz);
2458
2459 return nr_segs;
2460}
2461
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002462/**
2463 * mmc_blk_disable_wr_packing() - disables packing mode
2464 * @mq: MMC queue.
2465 *
2466 */
2467void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
2468{
2469 if (mq) {
2470 mq->wr_packing_enabled = false;
2471 mq->num_of_potential_packed_wr_reqs = 0;
2472 }
2473}
2474EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
2475
Lee Susman841fd132013-04-23 17:59:26 +03002476static int get_packed_trigger(int potential, struct mmc_card *card,
2477 struct request *req, int curr_trigger)
2478{
2479 static int num_mean_elements = 1;
2480 static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2481 unsigned int trigger = curr_trigger;
2482 unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
2483
2484 /* scale down the upper bound to 75% */
2485 pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
2486
2487 /*
2488 * since the most common calls for this function are with small
2489 * potential write values and since we don't want these calls to affect
2490 * the packed trigger, set a lower bound and ignore calls with
2491 * potential lower than that bound
2492 */
2493 if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
2494 return trigger;
2495
2496 /*
2497 * this is to prevent integer overflow in the following calculation:
2498 * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
2499 */
2500 if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
2501 num_mean_elements = 1;
2502 mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2503 }
2504
2505 /*
2506 * get next mean value based on previous mean value and current
2507 * potential packed writes. Calculation is as follows:
2508 * mean_pot[i+1] =
2509 * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
2510 */
2511 mean_potential *= num_mean_elements;
2512 /*
2513 * add num_mean_elements so that the division of two integers doesn't
2514 * lower mean_potential too much
2515 */
2516 if (potential > mean_potential)
2517 mean_potential += num_mean_elements;
2518 mean_potential += potential;
2519 /* this is for gaining more precision when dividing two integers */
2520 mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
2521 /* this completes the mean calculation */
2522 mean_potential /= ++num_mean_elements;
2523 mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
2524
2525 /*
2526 * if current potential packed writes is greater than the mean potential
2527 * then the heuristic is that the following workload will contain many
2528 * write requests, therefore we lower the packed trigger. In the
2529 * opposite case we want to increase the trigger in order to get less
2530 * packing events.
2531 */
2532 if (potential >= mean_potential)
2533 trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
2534 PCKD_TRGR_LOWER_BOUND : trigger - 1;
2535 else
2536 trigger = (trigger >= pckd_trgr_upper_bound) ?
2537 pckd_trgr_upper_bound : trigger + 1;
2538
2539 /*
2540 * an urgent read request indicates a packed list being interrupted
2541 * by this read, therefore we aim for less packing, hence the trigger
2542 * gets increased
2543 */
2544 if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
2545 trigger += PCKD_TRGR_URGENT_PENALTY;
2546
2547 return trigger;
2548}
2549
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002550static void mmc_blk_write_packing_control(struct mmc_queue *mq,
2551 struct request *req)
2552{
2553 struct mmc_host *host = mq->card->host;
2554 int data_dir;
2555
2556 if (!(host->caps2 & MMC_CAP2_PACKED_WR))
2557 return;
2558
Maya Erez8e2b3c32012-12-02 13:27:15 +02002559 /* Support for the write packing on eMMC 4.5 or later */
2560 if (mq->card->ext_csd.rev <= 5)
2561 return;
2562
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002563 /*
2564 * In case the packing control is not supported by the host, it should
2565 * not have an effect on the write packing. Therefore we have to enable
2566 * the write packing
2567 */
2568 if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
2569 mq->wr_packing_enabled = true;
2570 return;
2571 }
2572
2573 if (!req || (req && (req->cmd_flags & REQ_PREFLUSH))) {
2574 if (mq->num_of_potential_packed_wr_reqs >
2575 mq->num_wr_reqs_to_start_packing)
2576 mq->wr_packing_enabled = true;
Lee Susman841fd132013-04-23 17:59:26 +03002577 mq->num_wr_reqs_to_start_packing =
2578 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2579 mq->card, req,
2580 mq->num_wr_reqs_to_start_packing);
Tatyana Brokhman843915a2012-10-07 10:26:27 +02002581 mq->num_of_potential_packed_wr_reqs = 0;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002582 return;
2583 }
2584
2585 data_dir = rq_data_dir(req);
2586
2587 if (data_dir == READ) {
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002588 mmc_blk_disable_wr_packing(mq);
Lee Susman841fd132013-04-23 17:59:26 +03002589 mq->num_wr_reqs_to_start_packing =
2590 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2591 mq->card, req,
2592 mq->num_wr_reqs_to_start_packing);
2593 mq->num_of_potential_packed_wr_reqs = 0;
2594 mq->wr_packing_enabled = false;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002595 return;
2596 } else if (data_dir == WRITE) {
2597 mq->num_of_potential_packed_wr_reqs++;
2598 }
2599
2600 if (mq->num_of_potential_packed_wr_reqs >
2601 mq->num_wr_reqs_to_start_packing)
2602 mq->wr_packing_enabled = true;
2603}
2604
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002605struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
2606{
2607 if (!card)
2608 return NULL;
2609
2610 return &card->wr_pack_stats;
2611}
2612EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
2613
2614void mmc_blk_init_packed_statistics(struct mmc_card *card)
2615{
2616 int max_num_of_packed_reqs = 0;
2617
2618 if (!card || !card->wr_pack_stats.packing_events)
2619 return;
2620
2621 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
2622
2623 spin_lock(&card->wr_pack_stats.lock);
2624 memset(card->wr_pack_stats.packing_events, 0,
2625 (max_num_of_packed_reqs + 1) *
2626 sizeof(*card->wr_pack_stats.packing_events));
2627 memset(&card->wr_pack_stats.pack_stop_reason, 0,
2628 sizeof(card->wr_pack_stats.pack_stop_reason));
2629 card->wr_pack_stats.enabled = true;
2630 spin_unlock(&card->wr_pack_stats.lock);
2631}
2632EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
2633
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002634static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
2635{
2636 struct request_queue *q = mq->queue;
2637 struct mmc_card *card = mq->card;
2638 struct request *cur = req, *next = NULL;
2639 struct mmc_blk_data *md = mq->data;
2640 struct mmc_queue_req *mqrq = mq->mqrq_cur;
2641 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
2642 unsigned int req_sectors = 0, phys_segments = 0;
2643 unsigned int max_blk_count, max_phys_segs;
2644 bool put_back = true;
2645 u8 max_packed_rw = 0;
2646 u8 reqs = 0;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002647 struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002648
Shawn Lin96e52da2016-08-26 08:49:55 +08002649 /*
2650 * We don't need to check packed for any further
2651 * operation of packed stuff as we set MMC_PACKED_NONE
2652 * and return zero for reqs if geting null packed. Also
2653 * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
2654 * it again when removing blk req.
2655 */
2656 if (!mqrq->packed) {
2657 md->flags &= (~MMC_BLK_PACKED_CMD);
2658 goto no_packed;
2659 }
2660
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002661 if (!(md->flags & MMC_BLK_PACKED_CMD))
2662 goto no_packed;
2663
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002664 if (!mq->wr_packing_enabled)
2665 goto no_packed;
2666
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002667 if ((rq_data_dir(cur) == WRITE) &&
2668 mmc_host_packed_wr(card->host))
2669 max_packed_rw = card->ext_csd.max_packed_writes;
2670
2671 if (max_packed_rw == 0)
2672 goto no_packed;
2673
2674 if (mmc_req_rel_wr(cur) &&
2675 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
2676 goto no_packed;
2677
2678 if (mmc_large_sector(card) &&
2679 !IS_ALIGNED(blk_rq_sectors(cur), 8))
2680 goto no_packed;
2681
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002682 if (cur->cmd_flags & REQ_FUA)
2683 goto no_packed;
2684
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002685 mmc_blk_clear_packed(mqrq);
2686
2687 max_blk_count = min(card->host->max_blk_count,
2688 card->host->max_req_size >> 9);
2689 if (unlikely(max_blk_count > 0xffff))
2690 max_blk_count = 0xffff;
2691
2692 max_phys_segs = queue_max_segments(q);
2693 req_sectors += blk_rq_sectors(cur);
2694 phys_segments += cur->nr_phys_segments;
2695
2696 if (rq_data_dir(cur) == WRITE) {
2697 req_sectors += mmc_large_sector(card) ? 8 : 1;
2698 phys_segments += mmc_calc_packed_hdr_segs(q, card);
2699 }
2700
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002701 spin_lock(&stats->lock);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002702 do {
2703 if (reqs >= max_packed_rw - 1) {
2704 put_back = false;
2705 break;
2706 }
2707
2708 spin_lock_irq(q->queue_lock);
2709 next = blk_fetch_request(q);
2710 spin_unlock_irq(q->queue_lock);
2711 if (!next) {
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002712 MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002713 put_back = false;
2714 break;
2715 }
2716
2717 if (mmc_large_sector(card) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002718 !IS_ALIGNED(blk_rq_sectors(next), 8)) {
2719 MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002720 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002721 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002722
Mike Christie3a5e02c2016-06-05 14:32:23 -05002723 if (req_op(next) == REQ_OP_DISCARD ||
Adrian Hunter7afafc82016-08-16 10:59:35 +03002724 req_op(next) == REQ_OP_SECURE_ERASE ||
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002725 req_op(next) == REQ_OP_FLUSH) {
2726 if (req_op(next) != REQ_OP_SECURE_ERASE)
2727 MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002728 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002729 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002730
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002731 if (next->cmd_flags & REQ_FUA) {
2732 MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
2733 break;
2734 }
2735
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002736 if (rq_data_dir(cur) != rq_data_dir(next)) {
2737 MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002738 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002739 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002740
2741 if (mmc_req_rel_wr(next) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002742 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
2743 MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002744 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002745 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002746
2747 req_sectors += blk_rq_sectors(next);
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002748 if (req_sectors > max_blk_count) {
2749 if (stats->enabled)
2750 stats->pack_stop_reason[EXCEEDS_SECTORS]++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002751 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002752 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002753
2754 phys_segments += next->nr_phys_segments;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002755 if (phys_segments > max_phys_segs) {
2756 MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002757 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002758 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002759
Maya Erez5a8dae12014-12-04 15:13:59 +02002760 if (mq->no_pack_for_random) {
2761 if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
2762 blk_rq_pos(next)) {
2763 MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
2764 put_back = 1;
2765 break;
2766 }
2767 }
2768
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002769 if (rq_data_dir(next) == WRITE)
2770 mq->num_of_potential_packed_wr_reqs++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002771 list_add_tail(&next->queuelist, &mqrq->packed->list);
2772 cur = next;
2773 reqs++;
2774 } while (1);
2775
2776 if (put_back) {
2777 spin_lock_irq(q->queue_lock);
2778 blk_requeue_request(q, next);
2779 spin_unlock_irq(q->queue_lock);
2780 }
2781
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002782 if (stats->enabled) {
2783 if (reqs + 1 <= card->ext_csd.max_packed_writes)
2784 stats->packing_events[reqs + 1]++;
2785 if (reqs + 1 == max_packed_rw)
2786 MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
2787 }
2788
2789 spin_unlock(&stats->lock);
2790
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002791 if (reqs > 0) {
2792 list_add(&req->queuelist, &mqrq->packed->list);
2793 mqrq->packed->nr_entries = ++reqs;
2794 mqrq->packed->retries = reqs;
2795 return reqs;
2796 }
2797
2798no_packed:
2799 mqrq->cmd_type = MMC_PACKED_NONE;
2800 return 0;
2801}
2802
2803static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
2804 struct mmc_card *card,
2805 struct mmc_queue *mq)
2806{
2807 struct mmc_blk_request *brq = &mqrq->brq;
2808 struct request *req = mqrq->req;
2809 struct request *prq;
2810 struct mmc_blk_data *md = mq->data;
2811 struct mmc_packed *packed = mqrq->packed;
2812 bool do_rel_wr, do_data_tag;
Jiri Slaby3f2d2662016-10-03 10:58:28 +02002813 __le32 *packed_cmd_hdr;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002814 u8 hdr_blocks;
2815 u8 i = 1;
2816
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002817 mqrq->cmd_type = MMC_PACKED_WRITE;
2818 packed->blocks = 0;
2819 packed->idx_failure = MMC_PACKED_NR_IDX;
2820
2821 packed_cmd_hdr = packed->cmd_hdr;
2822 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002823 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
2824 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002825 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
2826
2827 /*
2828 * Argument for each entry of packed group
2829 */
2830 list_for_each_entry(prq, &packed->list, queuelist) {
2831 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
2832 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2833 (prq->cmd_flags & REQ_META) &&
2834 (rq_data_dir(prq) == WRITE) &&
Adrian Hunterd806b462016-06-10 16:22:16 +03002835 blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002836 /* Argument of CMD23 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002837 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002838 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
2839 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002840 blk_rq_sectors(prq));
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002841 /* Argument of CMD18 or CMD25 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002842 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002843 mmc_card_blockaddr(card) ?
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002844 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002845 packed->blocks += blk_rq_sectors(prq);
2846 i++;
2847 }
2848
2849 memset(brq, 0, sizeof(struct mmc_blk_request));
2850 brq->mrq.cmd = &brq->cmd;
2851 brq->mrq.data = &brq->data;
2852 brq->mrq.sbc = &brq->sbc;
2853 brq->mrq.stop = &brq->stop;
2854
2855 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2856 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2857 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2858
2859 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2860 brq->cmd.arg = blk_rq_pos(req);
2861 if (!mmc_card_blockaddr(card))
2862 brq->cmd.arg <<= 9;
2863 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2864
2865 brq->data.blksz = 512;
2866 brq->data.blocks = packed->blocks + hdr_blocks;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002867 brq->data.flags = MMC_DATA_WRITE;
Asutosh Dasf0665412012-07-27 18:10:19 +05302868 brq->data.fault_injected = false;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002869
2870 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2871 brq->stop.arg = 0;
2872 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2873
2874 mmc_set_data_timeout(&brq->data, card);
2875
2876 brq->data.sg = mqrq->sg;
2877 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2878
2879 mqrq->mmc_active.mrq = &brq->mrq;
Tatyana Brokhman71aefb82012-10-09 13:50:56 +02002880
2881 /*
2882 * This is intended for packed commands tests usage - in case these
2883 * functions are not in use the respective pointers are NULL
2884 */
2885 if (mq->err_check_fn)
2886 mqrq->mmc_active.err_check = mq->err_check_fn;
2887 else
2888 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2889
2890 if (mq->packed_test_fn)
2891 mq->packed_test_fn(mq->queue, mqrq);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002892
2893 mmc_queue_bounce_pre(mqrq);
2894}
2895
Adrian Hunter67716322011-08-29 16:42:15 +03002896static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2897 struct mmc_blk_request *brq, struct request *req,
2898 int ret)
2899{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002900 struct mmc_queue_req *mq_rq;
2901 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2902
Adrian Hunter67716322011-08-29 16:42:15 +03002903 /*
2904 * If this is an SD card and we're writing, we can first
2905 * mark the known good sectors as ok.
2906 *
2907 * If the card is not SD, we can still ok written sectors
2908 * as reported by the controller (which might be less than
2909 * the real number of written sectors, but never more).
2910 */
2911 if (mmc_card_sd(card)) {
2912 u32 blocks;
Asutosh Dasf0665412012-07-27 18:10:19 +05302913 if (!brq->data.fault_injected) {
2914 blocks = mmc_sd_num_wr_blocks(card);
2915 if (blocks != (u32)-1)
2916 ret = blk_end_request(req, 0, blocks << 9);
2917 } else
2918 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002919 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002920 if (!mmc_packed_cmd(mq_rq->cmd_type))
2921 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002922 }
2923 return ret;
2924}
2925
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002926static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2927{
2928 struct request *prq;
2929 struct mmc_packed *packed = mq_rq->packed;
2930 int idx = packed->idx_failure, i = 0;
2931 int ret = 0;
2932
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002933 while (!list_empty(&packed->list)) {
2934 prq = list_entry_rq(packed->list.next);
2935 if (idx == i) {
2936 /* retry from error index */
2937 packed->nr_entries -= idx;
2938 mq_rq->req = prq;
2939 ret = 1;
2940
2941 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2942 list_del_init(&prq->queuelist);
2943 mmc_blk_clear_packed(mq_rq);
2944 }
2945 return ret;
2946 }
2947 list_del_init(&prq->queuelist);
2948 blk_end_request(prq, 0, blk_rq_bytes(prq));
2949 i++;
2950 }
2951
2952 mmc_blk_clear_packed(mq_rq);
2953 return ret;
2954}
2955
2956static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2957{
2958 struct request *prq;
2959 struct mmc_packed *packed = mq_rq->packed;
2960
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002961 while (!list_empty(&packed->list)) {
2962 prq = list_entry_rq(packed->list.next);
2963 list_del_init(&prq->queuelist);
2964 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2965 }
2966
2967 mmc_blk_clear_packed(mq_rq);
2968}
2969
2970static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2971 struct mmc_queue_req *mq_rq)
2972{
2973 struct request *prq;
2974 struct request_queue *q = mq->queue;
2975 struct mmc_packed *packed = mq_rq->packed;
2976
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002977 while (!list_empty(&packed->list)) {
2978 prq = list_entry_rq(packed->list.prev);
2979 if (prq->queuelist.prev != &packed->list) {
2980 list_del_init(&prq->queuelist);
2981 spin_lock_irq(q->queue_lock);
2982 blk_requeue_request(mq->queue, prq);
2983 spin_unlock_irq(q->queue_lock);
2984 } else {
2985 list_del_init(&prq->queuelist);
2986 }
2987 }
2988
2989 mmc_blk_clear_packed(mq_rq);
2990}
2991
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07002992static int mmc_blk_cmdq_start_req(struct mmc_host *host,
2993 struct mmc_cmdq_req *cmdq_req)
2994{
2995 struct mmc_request *mrq = &cmdq_req->mrq;
2996
2997 mrq->done = mmc_blk_cmdq_req_done;
2998 return mmc_cmdq_start_req(host, cmdq_req);
2999}
3000
Asutosh Das5238e022015-04-23 16:00:45 +05303001/* prepare for non-data commands */
3002static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
3003 struct mmc_queue_req *mqrq, struct mmc_queue *mq)
3004{
3005 struct request *req = mqrq->req;
3006 struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req;
3007
3008 memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
3009
3010 cmdq_req->mrq.data = NULL;
3011 cmdq_req->cmd_flags = req->cmd_flags;
3012 cmdq_req->mrq.req = mqrq->req;
3013 req->special = mqrq;
3014 cmdq_req->cmdq_req_flags |= DCMD;
3015 cmdq_req->mrq.cmdq_req = cmdq_req;
3016
3017 return &mqrq->cmdq_req;
3018}
3019
3020
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003021#define IS_RT_CLASS_REQ(x) \
3022 (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
3023
3024static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
3025 struct mmc_queue_req *mqrq, struct mmc_queue *mq)
3026{
3027 struct mmc_card *card = mq->card;
3028 struct request *req = mqrq->req;
3029 struct mmc_blk_data *md = mq->data;
3030 bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
3031 bool do_data_tag;
3032 bool read_dir = (rq_data_dir(req) == READ);
3033 bool prio = IS_RT_CLASS_REQ(req);
3034 struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
3035
3036 memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
3037
3038 cmdq_rq->tag = req->tag;
3039 if (read_dir) {
3040 cmdq_rq->cmdq_req_flags |= DIR;
3041 cmdq_rq->data.flags = MMC_DATA_READ;
3042 } else {
3043 cmdq_rq->data.flags = MMC_DATA_WRITE;
3044 }
3045 if (prio)
3046 cmdq_rq->cmdq_req_flags |= PRIO;
3047
3048 if (do_rel_wr)
3049 cmdq_rq->cmdq_req_flags |= REL_WR;
3050
3051 cmdq_rq->data.blocks = blk_rq_sectors(req);
3052 cmdq_rq->blk_addr = blk_rq_pos(req);
3053 cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
3054
3055 mmc_set_data_timeout(&cmdq_rq->data, card);
3056
3057 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
3058 (req->cmd_flags & REQ_META) &&
3059 (rq_data_dir(req) == WRITE) &&
3060 ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
3061 card->ext_csd.data_tag_unit_size);
3062 if (do_data_tag)
3063 cmdq_rq->cmdq_req_flags |= DAT_TAG;
3064 cmdq_rq->data.sg = mqrq->sg;
3065 cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
3066
3067 /*
3068 * Adjust the sg list so it is the same size as the
3069 * request.
3070 */
3071 if (cmdq_rq->data.blocks > card->host->max_blk_count)
3072 cmdq_rq->data.blocks = card->host->max_blk_count;
3073
3074 if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
3075 int i, data_size = cmdq_rq->data.blocks << 9;
3076 struct scatterlist *sg;
3077
3078 for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
3079 data_size -= sg->length;
3080 if (data_size <= 0) {
3081 sg->length += data_size;
3082 i++;
3083 break;
3084 }
3085 }
3086 cmdq_rq->data.sg_len = i;
3087 }
3088
3089 mqrq->cmdq_req.cmd_flags = req->cmd_flags;
3090 mqrq->cmdq_req.mrq.req = mqrq->req;
3091 mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
3092 mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
3093 mqrq->req->special = mqrq;
3094
3095 pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
3096 mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
3097 mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
3098 cmdq_rq, cmdq_rq->blk_addr,
3099 (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
3100
3101 return &mqrq->cmdq_req;
3102}
3103
3104static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
3105{
3106 struct mmc_queue_req *active_mqrq;
3107 struct mmc_card *card = mq->card;
3108 struct mmc_host *host = card->host;
3109 struct mmc_cmdq_req *mc_rq;
3110 int ret = 0;
3111
3112 BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
3113 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
3114
3115 active_mqrq = &mq->mqrq_cmdq[req->tag];
3116 active_mqrq->req = req;
3117
3118 mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
3119
3120 ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
3121 return ret;
3122}
3123
Asutosh Das5238e022015-04-23 16:00:45 +05303124/*
3125 * Issues a flush (dcmd) request
3126 */
3127int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req)
3128{
3129 int err;
3130 struct mmc_queue_req *active_mqrq;
3131 struct mmc_card *card = mq->card;
3132 struct mmc_host *host;
3133 struct mmc_cmdq_req *cmdq_req;
3134 struct mmc_cmdq_context_info *ctx_info;
3135
3136 BUG_ON(!card);
3137 host = card->host;
3138 BUG_ON(!host);
3139 BUG_ON(req->tag > card->ext_csd.cmdq_depth);
3140 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
3141
3142 ctx_info = &host->cmdq_ctx;
3143
3144 set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
3145
3146 active_mqrq = &mq->mqrq_cmdq[req->tag];
3147 active_mqrq->req = req;
3148
3149 cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
3150 cmdq_req->cmdq_req_flags |= QBR;
3151 cmdq_req->mrq.cmd = &cmdq_req->cmd;
3152 cmdq_req->tag = req->tag;
3153
3154 err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd);
3155 if (err) {
3156 pr_err("%s: failed (%d) preparing flush req\n",
3157 mmc_hostname(host), err);
3158 return err;
3159 }
3160 err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
3161 return err;
3162}
3163EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
3164
Asutosh Das02e30862015-05-20 16:52:04 +05303165static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
3166{
3167 if (!host->cmdq_ops->reset)
3168 return;
3169
3170 if (!test_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state)) {
3171 if (mmc_cmdq_halt(host, true)) {
3172 pr_err("%s: halt failed\n", mmc_hostname(host));
3173 goto reset;
3174 }
3175 }
3176
3177 if (clear_all)
3178 mmc_cmdq_discard_queue(host, 0);
3179reset:
3180 mmc_hw_reset(host);
3181 host->cmdq_ops->reset(host, true);
3182 clear_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
3183}
3184
Asutosh Dasa0ba4922015-04-23 16:01:57 +05303185static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq)
3186{
3187 int err;
3188 struct mmc_card *card = mq->card;
3189 struct mmc_host *host = card->host;
3190
3191 err = mmc_cmdq_halt(host, true);
3192 if (err) {
3193 pr_err("%s: halt: failed: %d\n", __func__, err);
3194 return;
3195 }
3196
3197 mmc_claim_host(card->host);
3198 /* disable CQ mode in card */
3199 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
3200 EXT_CSD_CMDQ, 0,
3201 card->ext_csd.generic_cmd6_time);
3202 if (err) {
3203 pr_err("%s: failed to switch card to legacy mode: %d\n",
3204 __func__, err);
3205 goto out;
3206 } else {
3207 host->card->cmdq_init = false;
3208 }
3209out:
3210 mmc_release_host(card->host);
3211}
3212
Asutosh Dasfa8836b2015-03-02 23:14:05 +05303213static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req)
3214{
3215 struct mmc_queue *mq = req->q->queuedata;
3216 struct mmc_host *host = mq->card->host;
3217 struct mmc_queue_req *mq_rq = req->special;
3218 struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
3219 struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
3220
3221 host->cmdq_ops->dumpstate(host);
3222 if (cmdq_req->cmdq_req_flags & DCMD)
3223 mrq->cmd->error = -ETIMEDOUT;
3224 else
3225 mrq->data->error = -ETIMEDOUT;
3226
3227 host->err_mrq = mrq;
3228 mrq->done(mrq);
3229
3230 return BLK_EH_NOT_HANDLED;
3231}
3232
Asutosh Das02e30862015-05-20 16:52:04 +05303233static void mmc_blk_cmdq_err(struct mmc_queue *mq)
3234{
3235 int err;
3236 int retry = 0;
3237 int gen_err;
3238 u32 status;
3239
3240 struct mmc_host *host = mq->card->host;
3241 struct mmc_request *mrq = host->err_mrq;
3242 struct mmc_card *card = mq->card;
3243 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
3244
3245 err = mmc_cmdq_halt(host, true);
3246 if (err) {
3247 pr_err("halt: failed: %d\n", err);
3248 goto reset;
3249 }
3250
3251 /* RED error - Fatal: requires reset */
3252 if (mrq->cmdq_req->resp_err) {
3253 pr_crit("%s: Response error detected: Device in bad state\n",
3254 mmc_hostname(host));
3255 blk_end_request_all(mrq->req, -EIO);
3256 goto reset;
3257 }
3258
3259 if (mrq->data->error) {
3260 blk_end_request_all(mrq->req, mrq->data->error);
3261 for (; retry < MAX_RETRIES; retry++) {
3262 err = get_card_status(card, &status, 0);
3263 if (!err)
3264 break;
3265 }
3266
3267 if (err) {
3268 pr_err("%s: No response from card !!!\n",
3269 mmc_hostname(host));
3270 goto reset;
3271 }
3272
3273 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
3274 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
3275 err = send_stop(card, MMC_CMDQ_STOP_TIMEOUT_MS,
3276 mrq->req, &gen_err, &status);
3277 if (err) {
3278 pr_err("%s: error %d sending stop (%d) command\n",
3279 mrq->req->rq_disk->disk_name,
3280 err, status);
3281 goto reset;
3282 }
3283 }
3284
3285 if (mmc_cmdq_discard_queue(host, mrq->req->tag))
3286 goto reset;
3287 else
3288 goto unhalt;
3289 }
3290
3291 /* DCMD commands */
3292 if (mrq->cmd->error)
3293 blk_end_request_all(mrq->req, mrq->cmd->error);
3294
3295reset:
3296 spin_lock_irq(mq->queue->queue_lock);
3297 blk_queue_invalidate_tags(mrq->req->q);
3298 spin_unlock_irq(mq->queue->queue_lock);
3299 mmc_blk_cmdq_reset(host, true);
3300 goto out;
3301
3302unhalt:
3303 mmc_cmdq_halt(host, false);
3304
3305out:
3306 if (test_and_clear_bit(0, &ctx_info->req_starved))
3307 blk_run_queue(mrq->req->q);
3308}
3309
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003310/* invoked by block layer in softirq context */
3311void mmc_blk_cmdq_complete_rq(struct request *rq)
3312{
3313 struct mmc_queue_req *mq_rq = rq->special;
3314 struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
3315 struct mmc_host *host = mrq->host;
3316 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
3317 struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
3318 struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
3319 int err = 0;
3320
3321 if (mrq->cmd && mrq->cmd->error)
3322 err = mrq->cmd->error;
3323 else if (mrq->data && mrq->data->error)
3324 err = mrq->data->error;
3325
Asutosh Das02e30862015-05-20 16:52:04 +05303326 /* clear pending request */
3327 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
3328 &ctx_info->active_reqs));
3329
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003330 mmc_cmdq_post_req(host, mrq, err);
3331 if (err) {
3332 pr_err("%s: %s: txfr error: %d\n", mmc_hostname(mrq->host),
3333 __func__, err);
Asutosh Das02e30862015-05-20 16:52:04 +05303334 if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
3335 pr_err("%s: CQ in error state, ending current req: %d\n",
3336 __func__, err);
3337 blk_end_request_all(rq, err);
3338 } else {
3339 set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
3340 schedule_work(&mq->cmdq_err_work);
3341 }
3342 goto out;
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003343 }
3344
Asutosh Das5238e022015-04-23 16:00:45 +05303345 if (cmdq_req->cmdq_req_flags & DCMD) {
3346 clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
Asutosh Das02e30862015-05-20 16:52:04 +05303347 blk_end_request_all(rq, err);
Asutosh Das5238e022015-04-23 16:00:45 +05303348 goto out;
3349 }
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003350
3351 blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
3352
Asutosh Das5238e022015-04-23 16:00:45 +05303353out:
Asutosh Das02e30862015-05-20 16:52:04 +05303354 if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state) &&
3355 test_and_clear_bit(0, &ctx_info->req_starved))
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003356 blk_run_queue(mq->queue);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003357 mmc_release_host(host);
Asutosh Dasa0ba4922015-04-23 16:01:57 +05303358
3359 if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
3360 complete(&mq->cmdq_shutdown_complete);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003361 return;
3362}
3363
3364/*
3365 * Complete reqs from block layer softirq context
3366 * Invoked in irq context
3367 */
3368void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
3369{
3370 struct request *req = mrq->req;
3371
3372 blk_complete_request(req);
3373}
3374EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
3375
Per Forlinee8a43a2011-07-01 18:55:33 +02003376static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02003377{
3378 struct mmc_blk_data *md = mq->data;
3379 struct mmc_card *card = md->queue.card;
3380 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunterb8360a42015-05-07 13:10:24 +03003381 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02003382 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02003383 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05303384 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02003385 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003386 const u8 packed_nr = 2;
3387 u8 reqs = 0;
Mark Salyzyn6904e432016-01-28 11:12:25 -08003388#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3389 unsigned long waitfor = jiffies;
3390#endif
Per Forlinee8a43a2011-07-01 18:55:33 +02003391
3392 if (!rqc && !mq->mqrq_prev->req)
3393 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02003394
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003395 if (rqc)
3396 reqs = mmc_blk_prep_packed_list(mq, rqc);
3397
Per Forlin54d49d72011-07-01 18:55:29 +02003398 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02003399 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05303400 /*
3401 * When 4KB native sector is enabled, only 8 blocks
3402 * multiple read or write is allowed
3403 */
Yuan, Juntaoe87c8562016-05-13 07:59:24 +00003404 if (mmc_large_sector(card) &&
3405 !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05303406 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
3407 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003408 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05303409 goto cmd_abort;
3410 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003411
3412 if (reqs >= packed_nr)
3413 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
3414 card, mq);
3415 else
3416 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02003417 areq = &mq->mqrq_cur->mmc_active;
3418 } else
3419 areq = NULL;
3420 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003421 if (!areq) {
3422 if (status == MMC_BLK_NEW_REQUEST)
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003423 set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Per Forlinee8a43a2011-07-01 18:55:33 +02003424 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003425 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02003426
Per Forlinee8a43a2011-07-01 18:55:33 +02003427 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
3428 brq = &mq_rq->brq;
3429 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03003430 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02003431 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02003432
Per Forlind78d4a82011-07-01 18:55:30 +02003433 switch (status) {
3434 case MMC_BLK_SUCCESS:
3435 case MMC_BLK_PARTIAL:
3436 /*
3437 * A block was successfully transferred.
3438 */
Adrian Hunter67716322011-08-29 16:42:15 +03003439 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003440
Mark Salyzyn6904e432016-01-28 11:12:25 -08003441 mmc_blk_simulate_delay(mq, rqc, waitfor);
3442
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003443 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3444 ret = mmc_blk_end_packed_req(mq_rq);
3445 break;
3446 } else {
3447 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02003448 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003449 }
3450
Adrian Hunter67716322011-08-29 16:42:15 +03003451 /*
3452 * If the blk_end_request function returns non-zero even
3453 * though all data has been transferred and no errors
3454 * were returned by the host controller, it's a bug.
3455 */
Per Forlinee8a43a2011-07-01 18:55:33 +02003456 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05303457 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02003458 __func__, blk_rq_bytes(req),
3459 brq->data.bytes_xfered);
3460 rqc = NULL;
3461 goto cmd_abort;
3462 }
Per Forlind78d4a82011-07-01 18:55:30 +02003463 break;
3464 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03003465 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
Ding Wang29535f72015-05-18 20:14:15 +08003466 if (mmc_blk_reset(md, card->host, type))
3467 goto cmd_abort;
3468 if (!ret)
3469 goto start_new_req;
3470 break;
Per Forlind78d4a82011-07-01 18:55:30 +02003471 case MMC_BLK_RETRY:
Adrian Hunterb8360a42015-05-07 13:10:24 +03003472 retune_retry_done = brq->retune_retry_done;
Maya Erezf93ca0a2014-12-09 23:34:41 +02003473 if (retry++ < MMC_BLK_MAX_RETRIES)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01003474 break;
Adrian Hunter67716322011-08-29 16:42:15 +03003475 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02003476 case MMC_BLK_ABORT:
Maya Erezf93ca0a2014-12-09 23:34:41 +02003477 if (!mmc_blk_reset(md, card->host, type) &&
3478 (retry++ < (MMC_BLK_MAX_RETRIES + 1)))
Adrian Hunter67716322011-08-29 16:42:15 +03003479 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01003480 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03003481 case MMC_BLK_DATA_ERR: {
3482 int err;
3483
3484 err = mmc_blk_reset(md, card->host, type);
3485 if (!err)
3486 break;
Sahitya Tummalad0a19842014-10-31 09:46:20 +05303487 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03003488 }
3489 case MMC_BLK_ECC_ERR:
3490 if (brq->data.blocks > 1) {
3491 /* Redo read one sector at a time */
Joe Perches66061102014-09-12 14:56:56 -07003492 pr_warn("%s: retrying using single block read\n",
3493 req->rq_disk->disk_name);
Adrian Hunter67716322011-08-29 16:42:15 +03003494 disable_multi = 1;
3495 break;
3496 }
Per Forlind78d4a82011-07-01 18:55:30 +02003497 /*
3498 * After an error, we redo I/O one sector at a
3499 * time, so we only reach here after trying to
3500 * read a single sector.
3501 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05303502 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02003503 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02003504 if (!ret)
3505 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02003506 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05303507 case MMC_BLK_NOMEDIUM:
3508 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003509 default:
3510 pr_err("%s: Unhandled return value (%d)",
3511 req->rq_disk->disk_name, status);
3512 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01003513 }
3514
Per Forlinee8a43a2011-07-01 18:55:33 +02003515 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003516 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3517 if (!mq_rq->packed->retries)
3518 goto cmd_abort;
3519 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
3520 mmc_start_req(card->host,
3521 &mq_rq->mmc_active, NULL);
3522 } else {
3523
3524 /*
3525 * In case of a incomplete request
3526 * prepare it again and resend.
3527 */
3528 mmc_blk_rw_rq_prep(mq_rq, card,
3529 disable_multi, mq);
3530 mmc_start_req(card->host,
3531 &mq_rq->mmc_active, NULL);
3532 }
Adrian Hunterb8360a42015-05-07 13:10:24 +03003533 mq_rq->brq.retune_retry_done = retune_retry_done;
Per Forlinee8a43a2011-07-01 18:55:33 +02003534 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 } while (ret);
3536
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 return 1;
3538
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01003539 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003540 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3541 mmc_blk_abort_packed_req(mq_rq);
3542 } else {
3543 if (mmc_card_removed(card))
3544 req->cmd_flags |= REQ_QUIET;
3545 while (ret)
3546 ret = blk_end_request(req, -EIO,
3547 blk_rq_cur_bytes(req));
3548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
Per Forlinee8a43a2011-07-01 18:55:33 +02003550 start_new_req:
3551 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09003552 if (mmc_card_removed(card)) {
3553 rqc->cmd_flags |= REQ_QUIET;
3554 blk_end_request_all(rqc, -EIO);
3555 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003556 /*
3557 * If current request is packed, it needs to put back.
3558 */
3559 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
3560 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
3561
Seungwon Jeon7a819022013-01-22 19:48:07 +09003562 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
3563 mmc_start_req(card->host,
3564 &mq->mqrq_cur->mmc_active, NULL);
3565 }
Per Forlinee8a43a2011-07-01 18:55:33 +02003566 }
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 return 0;
3569}
3570
Asutosh Das8b594832015-04-23 09:55:43 +05303571static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
3572 struct mmc_blk_data *md)
3573{
3574 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
3575 struct mmc_host *host = card->host;
3576 struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
3577 u8 part_config = card->ext_csd.part_config;
3578
3579 if ((main_md->part_curr == md->part_type) &&
3580 (card->part_curr == md->part_type))
3581 return 0;
3582
3583 WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) &&
3584 card->ext_csd.cmdq_support &&
3585 (md->flags & MMC_BLK_CMD_QUEUE)));
3586
3587 if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state))
3588 WARN_ON(mmc_cmdq_halt(host, true));
3589
3590 /* disable CQ mode in card */
3591 if (mmc_card_cmdq(card)) {
3592 WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
3593 EXT_CSD_CMDQ, 0,
3594 card->ext_csd.generic_cmd6_time));
3595 mmc_card_clr_cmdq(card);
3596 }
3597
3598 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
3599 part_config |= md->part_type;
3600
3601 WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
3602 EXT_CSD_PART_CONFIG, part_config,
3603 card->ext_csd.part_time));
3604
3605 card->ext_csd.part_config = part_config;
3606 card->part_curr = md->part_type;
3607
3608 main_md->part_curr = md->part_type;
3609
3610 WARN_ON(mmc_blk_cmdq_switch(card, md, true));
3611 WARN_ON(mmc_cmdq_halt(host, false));
3612
3613 return 0;
3614}
3615
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003616static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
3617{
3618 int ret;
3619 struct mmc_blk_data *md = mq->data;
3620 struct mmc_card *card = md->queue.card;
3621
3622 mmc_claim_host(card->host);
Asutosh Das8b594832015-04-23 09:55:43 +05303623 ret = mmc_blk_cmdq_part_switch(card, md);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003624 if (ret) {
3625 pr_err("%s: %s: partition switch failed %d\n",
3626 md->disk->disk_name, __func__, ret);
Asutosh Das5238e022015-04-23 16:00:45 +05303627 if (req)
3628 blk_end_request_all(req, ret);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003629 mmc_release_host(card->host);
3630 goto switch_failure;
3631 }
3632
Asutosh Das5238e022015-04-23 16:00:45 +05303633 if (req) {
Sahitya Tummala9433a132015-06-09 09:38:36 +05303634 if (req_op(req) == REQ_OP_DISCARD) {
3635 ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
3636 } else if (req_op(req) == REQ_OP_SECURE_ERASE) {
3637 if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3638 ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req);
3639 else
3640 ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
3641 } else if (req_op(req) == REQ_OP_FLUSH) {
Asutosh Das5238e022015-04-23 16:00:45 +05303642 ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
Sahitya Tummala9433a132015-06-09 09:38:36 +05303643 } else {
Asutosh Das5238e022015-04-23 16:00:45 +05303644 ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
Sahitya Tummala9433a132015-06-09 09:38:36 +05303645 }
Asutosh Das5238e022015-04-23 16:00:45 +05303646 }
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003647
3648switch_failure:
3649 return ret;
3650}
3651
Linus Walleij29eb7bd2016-09-20 11:34:38 +02003652int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
Adrian Hunterbd788c92010-08-11 14:17:47 -07003653{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003654 int ret;
3655 struct mmc_blk_data *md = mq->data;
3656 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003657 struct mmc_host *host = card->host;
3658 unsigned long flags;
Adrian Hunter869c5542016-08-25 14:11:43 -06003659 bool req_is_special = mmc_req_is_special(req);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003660
Per Forlinee8a43a2011-07-01 18:55:33 +02003661 if (req && !mq->mqrq_prev->req)
3662 /* claim host only for the first request */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003663 mmc_get_card(card);
Per Forlinee8a43a2011-07-01 18:55:33 +02003664
Andrei Warkentin371a6892011-04-11 18:10:25 -05003665 ret = mmc_blk_part_switch(card, md);
3666 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03003667 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05303668 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03003669 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003670 ret = 0;
3671 goto out;
3672 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003673
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003674 mmc_blk_write_packing_control(mq, req);
3675
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003676 clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Mike Christiec2df40d2016-06-05 14:32:17 -05003677 if (req && req_op(req) == REQ_OP_DISCARD) {
Per Forlinee8a43a2011-07-01 18:55:33 +02003678 /* complete ongoing async transfer before issuing discard */
3679 if (card->host->areq)
3680 mmc_blk_issue_rw_rq(mq, NULL);
Christoph Hellwig288dab82016-06-09 16:00:36 +02003681 ret = mmc_blk_issue_discard_rq(mq, req);
3682 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
3683 /* complete ongoing async transfer before issuing secure erase*/
3684 if (card->host->areq)
3685 mmc_blk_issue_rw_rq(mq, NULL);
Maya Erez0c0609f2014-12-09 23:31:55 +02003686 if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3687 ret = mmc_blk_issue_secdiscard_rq(mq, req);
3688 else
3689 ret = mmc_blk_issue_discard_rq(mq, req);
Mike Christie3a5e02c2016-06-05 14:32:23 -05003690 } else if (req && req_op(req) == REQ_OP_FLUSH) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09003691 /* complete ongoing async transfer before issuing flush */
3692 if (card->host->areq)
3693 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003694 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003695 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003696 if (!req && host->areq) {
3697 spin_lock_irqsave(&host->context_info.lock, flags);
3698 host->context_info.is_waiting_last_req = true;
3699 spin_unlock_irqrestore(&host->context_info.lock, flags);
3700 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003701 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003702 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003703
Andrei Warkentin371a6892011-04-11 18:10:25 -05003704out:
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003705 if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
3706 req_is_special)
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09003707 /*
3708 * Release host when there are no more requests
3709 * and after special request(discard, flush) is done.
3710 * In case sepecial request, there is no reentry to
3711 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
3712 */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003713 mmc_put_card(card);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003714 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07003715}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
Russell Kinga6f6c962006-01-03 22:38:44 +00003717static inline int mmc_blk_readonly(struct mmc_card *card)
3718{
3719 return mmc_card_readonly(card) ||
3720 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
3721}
3722
Andrei Warkentin371a6892011-04-11 18:10:25 -05003723static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3724 struct device *parent,
3725 sector_t size,
3726 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003727 const char *subname,
3728 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729{
3730 struct mmc_blk_data *md;
3731 int devidx, ret;
3732
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003733again:
3734 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
3735 return ERR_PTR(-ENOMEM);
3736
3737 spin_lock(&mmc_blk_lock);
3738 ret = ida_get_new(&mmc_blk_ida, &devidx);
3739 spin_unlock(&mmc_blk_lock);
3740
3741 if (ret == -EAGAIN)
3742 goto again;
3743 else if (ret)
3744 return ERR_PTR(ret);
3745
3746 if (devidx >= max_devices) {
3747 ret = -ENOSPC;
3748 goto out;
3749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07003751 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00003752 if (!md) {
3753 ret = -ENOMEM;
3754 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 }
Russell Kinga6f6c962006-01-03 22:38:44 +00003756
Johan Rudholmadd710e2011-12-02 08:51:06 +01003757 md->area_type = area_type;
3758
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003759 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00003760 * Set the read-only status based on the supported commands
3761 * and the write protect switch.
3762 */
3763 md->read_only = mmc_blk_readonly(card);
3764
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003765 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00003766 if (md->disk == NULL) {
3767 ret = -ENOMEM;
3768 goto err_kfree;
3769 }
3770
3771 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003772 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00003773 md->usage = 1;
3774
Asutosh Das963469b2015-05-21 13:29:51 +05303775 ret = mmc_init_queue(&md->queue, card, &md->lock, subname, area_type);
Russell Kinga6f6c962006-01-03 22:38:44 +00003776 if (ret)
3777 goto err_putdisk;
3778
Russell Kinga6f6c962006-01-03 22:38:44 +00003779 md->queue.data = md;
3780
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003781 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003782 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00003783 md->disk->fops = &mmc_bdops;
3784 md->disk->private_data = md;
3785 md->disk->queue = md->queue.queue;
Dan Williams307d8e62016-06-20 10:40:44 -07003786 md->parent = parent;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003787 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Cross382c55f2015-10-22 10:00:41 -07003788 md->disk->flags = GENHD_FL_EXT_DEVT;
Ulf Hanssonf5b4d712014-09-03 11:02:23 +02003789 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
Loic Pallardy53d8f972012-08-06 17:12:28 +02003790 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00003791
3792 /*
3793 * As discussed on lkml, GENHD_FL_REMOVABLE should:
3794 *
3795 * - be set for removable media with permanent block devices
3796 * - be unset for removable block devices with permanent media
3797 *
3798 * Since MMC block devices clearly fall under the second
3799 * case, we do not set GENHD_FL_REMOVABLE. Userspace
3800 * should use the block device creation/destruction hotplug
3801 * messages to tell when the card is present.
3802 */
3803
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003804 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
Ulf Hansson9aaf3432016-04-06 16:12:08 +02003805 "mmcblk%u%s", card->host->index, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00003806
Saugata Dasa5075eb2012-05-17 16:32:21 +05303807 if (mmc_card_mmc(card))
3808 blk_queue_logical_block_size(md->queue.queue,
3809 card->ext_csd.data_sector_size);
3810 else
3811 blk_queue_logical_block_size(md->queue.queue, 512);
3812
Andrei Warkentin371a6892011-04-11 18:10:25 -05003813 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003814
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003815 if (mmc_host_cmd23(card->host)) {
Daniel Glöckner0ed50ab2016-08-30 14:17:30 +02003816 if ((mmc_card_mmc(card) &&
3817 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003818 (mmc_card_sd(card) &&
3819 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
3820 md->flags |= MMC_BLK_CMD23;
3821 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003822
3823 if (mmc_card_mmc(card) &&
3824 md->flags & MMC_BLK_CMD23 &&
3825 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
Asutosh Das5238e022015-04-23 16:00:45 +05303826 card->ext_csd.rel_sectors)) {
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003827 md->flags |= MMC_BLK_REL_WR;
Jens Axboee9d5c742016-03-30 10:17:20 -06003828 blk_queue_write_cache(md->queue.queue, true, true);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003829 }
3830
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003831 if (card->cmdq_init) {
3832 md->flags |= MMC_BLK_CMD_QUEUE;
3833 md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
3834 md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
Asutosh Das02e30862015-05-20 16:52:04 +05303835 md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
Asutosh Dasfa8836b2015-03-02 23:14:05 +05303836 md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out;
Asutosh Dasa0ba4922015-04-23 16:01:57 +05303837 md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown;
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003838 }
3839
3840 if (mmc_card_mmc(card) && !card->cmdq_init &&
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003841 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
3842 (md->flags & MMC_BLK_CMD23) &&
3843 card->ext_csd.packed_event_en) {
3844 if (!mmc_packed_init(&md->queue, card))
3845 md->flags |= MMC_BLK_PACKED_CMD;
3846 }
3847
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00003849
3850 err_putdisk:
3851 put_disk(md->disk);
3852 err_kfree:
3853 kfree(md);
3854 out:
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003855 spin_lock(&mmc_blk_lock);
3856 ida_remove(&mmc_blk_ida, devidx);
3857 spin_unlock(&mmc_blk_lock);
Russell Kinga6f6c962006-01-03 22:38:44 +00003858 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859}
3860
Andrei Warkentin371a6892011-04-11 18:10:25 -05003861static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
3862{
3863 sector_t size;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003864
3865 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
3866 /*
3867 * The EXT_CSD sector count is in number or 512 byte
3868 * sectors.
3869 */
3870 size = card->ext_csd.sectors;
3871 } else {
3872 /*
3873 * The CSD capacity field is in units of read_blkbits.
3874 * set_capacity takes units of 512 bytes.
3875 */
Kuninori Morimoto087de9e2015-05-11 07:35:28 +00003876 size = (typeof(sector_t))card->csd.capacity
3877 << (card->csd.read_blkbits - 9);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003878 }
3879
Tobias Klauser7a30f2a2015-01-21 15:56:44 +01003880 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003881 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003882}
3883
3884static int mmc_blk_alloc_part(struct mmc_card *card,
3885 struct mmc_blk_data *md,
3886 unsigned int part_type,
3887 sector_t size,
3888 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003889 const char *subname,
3890 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05003891{
3892 char cap_str[10];
3893 struct mmc_blk_data *part_md;
3894
3895 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003896 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003897 if (IS_ERR(part_md))
3898 return PTR_ERR(part_md);
3899 part_md->part_type = part_type;
3900 list_add(&part_md->part, &md->part);
3901
James Bottomleyb9f28d82015-03-05 18:47:01 -08003902 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
Andrei Warkentin371a6892011-04-11 18:10:25 -05003903 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05303904 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05003905 part_md->disk->disk_name, mmc_card_id(card),
3906 mmc_card_name(card), part_md->part_type, cap_str);
3907 return 0;
3908}
3909
Namjae Jeone0c368d2011-10-06 23:41:38 +09003910/* MMC Physical partitions consist of two boot partitions and
3911 * up to four general purpose partitions.
3912 * For each partition enabled in EXT_CSD a block device will be allocatedi
3913 * to provide access to the partition.
3914 */
3915
Andrei Warkentin371a6892011-04-11 18:10:25 -05003916static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
3917{
Namjae Jeone0c368d2011-10-06 23:41:38 +09003918 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003919
3920 if (!mmc_card_mmc(card))
3921 return 0;
3922
Namjae Jeone0c368d2011-10-06 23:41:38 +09003923 for (idx = 0; idx < card->nr_parts; idx++) {
3924 if (card->part[idx].size) {
3925 ret = mmc_blk_alloc_part(card, md,
3926 card->part[idx].part_cfg,
3927 card->part[idx].size >> 9,
3928 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003929 card->part[idx].name,
3930 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09003931 if (ret)
3932 return ret;
3933 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003934 }
3935
3936 return ret;
3937}
3938
Andrei Warkentin371a6892011-04-11 18:10:25 -05003939static void mmc_blk_remove_req(struct mmc_blk_data *md)
3940{
Johan Rudholmadd710e2011-12-02 08:51:06 +01003941 struct mmc_card *card;
3942
Andrei Warkentin371a6892011-04-11 18:10:25 -05003943 if (md) {
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003944 /*
3945 * Flush remaining requests and free queues. It
3946 * is freeing the queue that stops new requests
3947 * from being accepted.
3948 */
Franck Jullien8efb83a2013-07-24 15:17:48 +02003949 card = md->queue.card;
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003950 mmc_cleanup_queue(&md->queue);
3951 if (md->flags & MMC_BLK_PACKED_CMD)
3952 mmc_packed_clean(&md->queue);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003953 if (md->flags & MMC_BLK_CMD_QUEUE)
3954 mmc_cmdq_clean(&md->queue, card);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003955 device_remove_file(disk_to_dev(md->disk),
3956 &md->num_wr_reqs_to_start_packing);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003957 if (md->disk->flags & GENHD_FL_UP) {
3958 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003959 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
3960 card->ext_csd.boot_ro_lockable)
3961 device_remove_file(disk_to_dev(md->disk),
3962 &md->power_ro_lock);
Mark Salyzyn6904e432016-01-28 11:12:25 -08003963#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3964 device_remove_file(disk_to_dev(md->disk),
3965 &dev_attr_max_write_speed);
3966 device_remove_file(disk_to_dev(md->disk),
3967 &dev_attr_max_read_speed);
3968 device_remove_file(disk_to_dev(md->disk),
3969 &dev_attr_cache_size);
3970#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05003971
Andrei Warkentin371a6892011-04-11 18:10:25 -05003972 del_gendisk(md->disk);
3973 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003974 mmc_blk_put(md);
3975 }
3976}
3977
3978static void mmc_blk_remove_parts(struct mmc_card *card,
3979 struct mmc_blk_data *md)
3980{
3981 struct list_head *pos, *q;
3982 struct mmc_blk_data *part_md;
3983
3984 list_for_each_safe(pos, q, &md->part) {
3985 part_md = list_entry(pos, struct mmc_blk_data, part);
3986 list_del(pos);
3987 mmc_blk_remove_req(part_md);
3988 }
3989}
3990
3991static int mmc_add_disk(struct mmc_blk_data *md)
3992{
3993 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01003994 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003995
Dan Williams307d8e62016-06-20 10:40:44 -07003996 device_add_disk(md->parent, md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003997 md->force_ro.show = force_ro_show;
3998 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05303999 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004000 md->force_ro.attr.name = "force_ro";
4001 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
4002 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
4003 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01004004 goto force_ro_fail;
Mark Salyzyn6904e432016-01-28 11:12:25 -08004005#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
4006 atomic_set(&md->queue.max_write_speed, max_write_speed);
4007 ret = device_create_file(disk_to_dev(md->disk),
4008 &dev_attr_max_write_speed);
4009 if (ret)
4010 goto max_write_speed_fail;
4011 atomic_set(&md->queue.max_read_speed, max_read_speed);
4012 ret = device_create_file(disk_to_dev(md->disk),
4013 &dev_attr_max_read_speed);
4014 if (ret)
4015 goto max_read_speed_fail;
4016 atomic_set(&md->queue.cache_size, cache_size);
4017 atomic_long_set(&md->queue.cache_used, 0);
4018 md->queue.cache_jiffies = jiffies;
4019 ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
4020 if (ret)
4021 goto cache_size_fail;
4022#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01004023
4024 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
4025 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04004026 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01004027
4028 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
4029 mode = S_IRUGO;
4030 else
4031 mode = S_IRUGO | S_IWUSR;
4032
4033 md->power_ro_lock.show = power_ro_lock_show;
4034 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01004035 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01004036 md->power_ro_lock.attr.mode = mode;
4037 md->power_ro_lock.attr.name =
4038 "ro_lock_until_next_power_on";
4039 ret = device_create_file(disk_to_dev(md->disk),
4040 &md->power_ro_lock);
4041 if (ret)
4042 goto power_ro_lock_fail;
4043 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02004044
4045 md->num_wr_reqs_to_start_packing.show =
4046 num_wr_reqs_to_start_packing_show;
4047 md->num_wr_reqs_to_start_packing.store =
4048 num_wr_reqs_to_start_packing_store;
4049 sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
4050 md->num_wr_reqs_to_start_packing.attr.name =
4051 "num_wr_reqs_to_start_packing";
4052 md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
4053 ret = device_create_file(disk_to_dev(md->disk),
4054 &md->num_wr_reqs_to_start_packing);
4055 if (ret)
Maya Erez17022402014-12-04 00:15:42 +02004056 goto num_wr_reqs_to_start_packing_fail;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02004057
Maya Erez5a8dae12014-12-04 15:13:59 +02004058 md->no_pack_for_random.show = no_pack_for_random_show;
4059 md->no_pack_for_random.store = no_pack_for_random_store;
4060 sysfs_attr_init(&md->no_pack_for_random.attr);
4061 md->no_pack_for_random.attr.name = "no_pack_for_random";
4062 md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
4063 ret = device_create_file(disk_to_dev(md->disk),
4064 &md->no_pack_for_random);
4065 if (ret)
4066 goto no_pack_for_random_fails;
4067
Johan Rudholmadd710e2011-12-02 08:51:06 +01004068 return ret;
4069
Maya Erez5a8dae12014-12-04 15:13:59 +02004070no_pack_for_random_fails:
4071 device_remove_file(disk_to_dev(md->disk),
4072 &md->num_wr_reqs_to_start_packing);
Maya Erez17022402014-12-04 00:15:42 +02004073num_wr_reqs_to_start_packing_fail:
4074 device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
Johan Rudholmadd710e2011-12-02 08:51:06 +01004075power_ro_lock_fail:
Mark Salyzyn6904e432016-01-28 11:12:25 -08004076#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
4077 device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
4078cache_size_fail:
4079 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
4080max_read_speed_fail:
4081 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
4082max_write_speed_fail:
4083#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01004084 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
4085force_ro_fail:
4086 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004087
4088 return ret;
4089}
4090
Andrei Warkentin6f60c222011-04-11 19:11:04 -04004091static const struct mmc_fixup blk_fixups[] =
4092{
Chris Ballc59d4472011-11-11 22:01:43 -05004093 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
4094 MMC_QUIRK_INAND_CMD38),
4095 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
4096 MMC_QUIRK_INAND_CMD38),
4097 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
4098 MMC_QUIRK_INAND_CMD38),
4099 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
4100 MMC_QUIRK_INAND_CMD38),
4101 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
4102 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004103
4104 /*
4105 * Some MMC cards experience performance degradation with CMD23
4106 * instead of CMD12-bounded multiblock transfers. For now we'll
4107 * black list what's bad...
4108 * - Certain Toshiba cards.
4109 *
4110 * N.B. This doesn't affect SD cards.
4111 */
Yangbo Lu7d70d472015-07-10 11:44:03 +08004112 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
4113 MMC_QUIRK_BLK_NO_CMD23),
4114 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
4115 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05004116 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004117 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05004118 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004119 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05004120 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004121 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004122
4123 /*
Matt Gumbel32ecd322016-05-20 10:33:46 +03004124 * Some MMC cards need longer data read timeout than indicated in CSD.
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004125 */
Chris Ballc59d4472011-11-11 22:01:43 -05004126 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004127 MMC_QUIRK_LONG_READ_TIME),
Matt Gumbel32ecd322016-05-20 10:33:46 +03004128 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
4129 MMC_QUIRK_LONG_READ_TIME),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004130
Ian Chen3550ccd2012-08-29 15:05:36 +09004131 /*
Guoping Yu3c984a92014-08-06 12:44:55 +08004132 * Some Samsung MMC cards need longer data read timeout than
4133 * indicated in CSD.
4134 */
4135 MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
4136 MMC_QUIRK_LONG_READ_TIME),
4137
4138 /*
Ian Chen3550ccd2012-08-29 15:05:36 +09004139 * On these Samsung MoviNAND parts, performing secure erase or
4140 * secure trim can result in unrecoverable corruption due to a
4141 * firmware bug.
4142 */
4143 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4144 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4145 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4146 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4147 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4148 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4149 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4150 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4151 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4152 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4153 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4154 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4155 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4156 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4157 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4158 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4159
Shawn Linb5b4ff02015-08-12 13:08:32 +08004160 /*
4161 * On Some Kingston eMMCs, performing trim can result in
4162 * unrecoverable data conrruption occasionally due to a firmware bug.
4163 */
4164 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
4165 MMC_QUIRK_TRIM_BROKEN),
4166 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
4167 MMC_QUIRK_TRIM_BROKEN),
4168
Pratibhasagar V8d664e32014-12-03 18:26:42 +02004169 /* Some INAND MCP devices advertise incorrect timeout values */
4170 MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
4171 MMC_QUIRK_INAND_DATA_TIMEOUT),
4172
Andrei Warkentin6f60c222011-04-11 19:11:04 -04004173 END_FIXUP
4174};
4175
Ulf Hansson96541ba2015-04-14 13:06:12 +02004176static int mmc_blk_probe(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177{
Andrei Warkentin371a6892011-04-11 18:10:25 -05004178 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02004179 char cap_str[10];
4180
Pierre Ossman912490d2005-05-21 10:27:02 +01004181 /*
4182 * Check that the card supports the command class(es) we need.
4183 */
4184 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 return -ENODEV;
4186
Lukas Czerner5204d002014-06-18 13:18:07 +02004187 mmc_fixup_device(card, blk_fixups);
4188
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 md = mmc_blk_alloc(card);
4190 if (IS_ERR(md))
4191 return PTR_ERR(md);
4192
James Bottomleyb9f28d82015-03-05 18:47:01 -08004193 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02004194 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05304195 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02004197 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198
Andrei Warkentin371a6892011-04-11 18:10:25 -05004199 if (mmc_blk_alloc_parts(card, md))
4200 goto out;
4201
Ulf Hansson96541ba2015-04-14 13:06:12 +02004202 dev_set_drvdata(&card->dev, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04004203
Andrei Warkentin371a6892011-04-11 18:10:25 -05004204 if (mmc_add_disk(md))
4205 goto out;
4206
4207 list_for_each_entry(part_md, &md->part, part) {
4208 if (mmc_add_disk(part_md))
4209 goto out;
4210 }
Ulf Hanssone94cfef2013-05-02 14:02:38 +02004211
4212 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
4213 pm_runtime_use_autosuspend(&card->dev);
4214
4215 /*
4216 * Don't enable runtime PM for SD-combo cards here. Leave that
4217 * decision to be taken during the SDIO init sequence instead.
4218 */
4219 if (card->type != MMC_TYPE_SD_COMBO) {
4220 pm_runtime_set_active(&card->dev);
4221 pm_runtime_enable(&card->dev);
4222 }
4223
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 return 0;
4225
4226 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05004227 mmc_blk_remove_parts(card, md);
4228 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01004229 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230}
4231
Ulf Hansson96541ba2015-04-14 13:06:12 +02004232static void mmc_blk_remove(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233{
Ulf Hansson96541ba2015-04-14 13:06:12 +02004234 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235
Andrei Warkentin371a6892011-04-11 18:10:25 -05004236 mmc_blk_remove_parts(card, md);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02004237 pm_runtime_get_sync(&card->dev);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03004238 mmc_claim_host(card->host);
4239 mmc_blk_part_switch(card, md);
4240 mmc_release_host(card->host);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02004241 if (card->type != MMC_TYPE_SD_COMBO)
4242 pm_runtime_disable(&card->dev);
4243 pm_runtime_put_noidle(&card->dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004244 mmc_blk_remove_req(md);
Ulf Hansson96541ba2015-04-14 13:06:12 +02004245 dev_set_drvdata(&card->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246}
4247
Ulf Hansson96541ba2015-04-14 13:06:12 +02004248static int _mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249{
Andrei Warkentin371a6892011-04-11 18:10:25 -05004250 struct mmc_blk_data *part_md;
Ulf Hansson96541ba2015-04-14 13:06:12 +02004251 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304252 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
4254 if (md) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05304255 rc = mmc_queue_suspend(&md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304256 if (rc)
4257 goto out;
Andrei Warkentin371a6892011-04-11 18:10:25 -05004258 list_for_each_entry(part_md, &md->part, part) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05304259 rc = mmc_queue_suspend(&part_md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304260 if (rc)
4261 goto out_resume;
Andrei Warkentin371a6892011-04-11 18:10:25 -05004262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 }
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304264 goto out;
4265
4266 out_resume:
4267 mmc_queue_resume(&md->queue);
4268 list_for_each_entry(part_md, &md->part, part) {
4269 mmc_queue_resume(&part_md->queue);
4270 }
4271 out:
4272 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273}
4274
Ulf Hansson96541ba2015-04-14 13:06:12 +02004275static void mmc_blk_shutdown(struct mmc_card *card)
Ulf Hansson76287742013-06-10 17:03:40 +02004276{
Ulf Hansson96541ba2015-04-14 13:06:12 +02004277 _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02004278}
4279
Ulf Hansson0967edc2014-10-06 11:29:42 +02004280#ifdef CONFIG_PM_SLEEP
4281static int mmc_blk_suspend(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02004282{
Ulf Hansson96541ba2015-04-14 13:06:12 +02004283 struct mmc_card *card = mmc_dev_to_card(dev);
4284
4285 return _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02004286}
4287
Ulf Hansson0967edc2014-10-06 11:29:42 +02004288static int mmc_blk_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289{
Andrei Warkentin371a6892011-04-11 18:10:25 -05004290 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02004291 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292
4293 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05004294 /*
4295 * Resume involves the card going into idle state,
4296 * so current partition is always the main one.
4297 */
4298 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004300 list_for_each_entry(part_md, &md->part, part) {
4301 mmc_queue_resume(&part_md->queue);
4302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 }
4304 return 0;
4305}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306#endif
4307
Ulf Hansson0967edc2014-10-06 11:29:42 +02004308static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
4309
Ulf Hansson96541ba2015-04-14 13:06:12 +02004310static struct mmc_driver mmc_driver = {
4311 .drv = {
4312 .name = "mmcblk",
4313 .pm = &mmc_blk_pm_ops,
4314 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 .probe = mmc_blk_probe,
4316 .remove = mmc_blk_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02004317 .shutdown = mmc_blk_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318};
4319
4320static int __init mmc_blk_init(void)
4321{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09004322 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
Olof Johansson5e71b7a2010-09-17 21:19:57 -04004324 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
4325 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
4326
Ben Hutchingsa26eba62014-11-06 03:35:09 +00004327 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
Olof Johansson5e71b7a2010-09-17 21:19:57 -04004328
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02004329 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
4330 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09004333 res = mmc_register_driver(&mmc_driver);
4334 if (res)
4335 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09004337 return 0;
4338 out2:
4339 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 out:
4341 return res;
4342}
4343
4344static void __exit mmc_blk_exit(void)
4345{
4346 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02004347 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348}
4349
4350module_init(mmc_blk_init);
4351module_exit(mmc_blk_exit);
4352
4353MODULE_LICENSE("GPL");
4354MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
4355