blob: 47fe11a037babea4b2b31084433730b061712744 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Sujit Reddy Thumma55291992014-12-09 20:40:16 +020033#include <linux/bitops.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020034#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040035#include <linux/delay.h>
36#include <linux/capability.h>
37#include <linux/compat.h>
Ulf Hanssone94cfef2013-05-02 14:02:38 +020038#include <linux/pm_runtime.h>
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -070039#include <linux/ioprio.h>
Ulf Hanssonb10fa992016-04-07 14:36:46 +020040#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
John Calixtocb87ea22011-04-26 18:56:29 -040042#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020044#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010045#include <linux/mmc/mmc.h>
46#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/uaccess.h>
49
Pierre Ossman98ac2162006-12-23 20:03:02 +010050#include "queue.h"
Baoyou Xie48ab0862016-09-30 09:37:38 +080051#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000053MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040054#ifdef MODULE_PARAM_PREFIX
55#undef MODULE_PARAM_PREFIX
56#endif
57#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010058
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050059#define INAND_CMD38_ARG_EXT_CSD 113
60#define INAND_CMD38_ARG_ERASE 0x00
61#define INAND_CMD38_ARG_TRIM 0x01
62#define INAND_CMD38_ARG_SECERASE 0x80
63#define INAND_CMD38_ARG_SECTRIM1 0x81
64#define INAND_CMD38_ARG_SECTRIM2 0x88
Subhash Jadavani2fbab612014-12-04 15:16:17 +020065#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
Maya Erez775a9362013-04-18 15:41:55 +030066#define MMC_SANITIZE_REQ_TIMEOUT 240000
67#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
Asutosh Das02e30862015-05-20 16:52:04 +053068#define MMC_CMDQ_STOP_TIMEOUT_MS 100
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050069
Luca Porziod3df0462015-11-06 15:12:26 +000070#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090071 (rq_data_dir(req) == WRITE))
72#define PACKED_CMD_VER 0x01
73#define PACKED_CMD_WR 0x02
Lee Susman841fd132013-04-23 17:59:26 +030074#define PACKED_TRIGGER_MAX_ELEMENTS 5000
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090075
Maya Erezf93ca0a2014-12-09 23:34:41 +020076#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
Tatyana Brokhman08238ce2012-10-07 10:33:13 +020077#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
78 do { \
79 if (stats->enabled) \
80 stats->pack_stop_reason[reason]++; \
81 } while (0)
82
Asutosh Das02e30862015-05-20 16:52:04 +053083#define MAX_RETRIES 5
Lee Susman841fd132013-04-23 17:59:26 +030084#define PCKD_TRGR_INIT_MEAN_POTEN 17
85#define PCKD_TRGR_POTEN_LOWER_BOUND 5
86#define PCKD_TRGR_URGENT_PENALTY 2
87#define PCKD_TRGR_LOWER_BOUND 5
88#define PCKD_TRGR_PRECISION_MULTIPLIER 100
89
Sahitya Tummala9433a132015-06-09 09:38:36 +053090static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
91 struct mmc_queue_req *mqrq, struct mmc_queue *mq);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020092static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040093
94/*
95 * The defaults come from config options but can be overriden by module
96 * or bootarg options.
97 */
98static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
99
100/*
101 * We've only got one major, so number of mmcblk devices is
Ben Hutchingsa26eba62014-11-06 03:35:09 +0000102 * limited to (1 << 20) / number of minors per device. It is also
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200103 * limited by the MAX_DEVICES below.
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400104 */
105static int max_devices;
106
Ben Hutchingsa26eba62014-11-06 03:35:09 +0000107#define MAX_DEVICES 256
108
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200109static DEFINE_IDA(mmc_blk_ida);
110static DEFINE_SPINLOCK(mmc_blk_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * There is one mmc_blk_data per slot.
114 */
115struct mmc_blk_data {
116 spinlock_t lock;
Dan Williams307d8e62016-06-20 10:40:44 -0700117 struct device *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct gendisk *disk;
119 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500120 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500122 unsigned int flags;
123#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
124#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900125#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -0700126#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000129 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500130 unsigned int part_type;
Adrian Hunter67716322011-08-29 16:42:15 +0300131 unsigned int reset_done;
132#define MMC_BLK_READ BIT(0)
133#define MMC_BLK_WRITE BIT(1)
134#define MMC_BLK_DISCARD BIT(2)
135#define MMC_BLK_SECDISCARD BIT(3)
Talel Shenhar8a8e3b42015-02-11 12:58:16 +0200136#define MMC_BLK_FLUSH BIT(4)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500137
138 /*
139 * Only set in main mmc_blk_data associated
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200140 * with mmc_card with dev_set_drvdata, and keeps
Andrei Warkentin371a6892011-04-11 18:10:25 -0500141 * track of the current selected device partition.
142 */
143 unsigned int part_curr;
144 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100145 struct device_attribute power_ro_lock;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200146 struct device_attribute num_wr_reqs_to_start_packing;
Maya Erez5a8dae12014-12-04 15:13:59 +0200147 struct device_attribute no_pack_for_random;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100148 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149};
150
Arjan van de Vena621aae2006-01-12 18:43:35 +0000151static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900153enum {
154 MMC_PACKED_NR_IDX = -1,
155 MMC_PACKED_NR_ZERO,
156 MMC_PACKED_NR_SINGLE,
157};
158
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400159module_param(perdev_minors, int, 0444);
160MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
161
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200162static inline int mmc_blk_part_switch(struct mmc_card *card,
163 struct mmc_blk_data *md);
164static int get_card_status(struct mmc_card *card, u32 *status, int retries);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -0700165static int mmc_blk_cmdq_switch(struct mmc_card *card,
166 struct mmc_blk_data *md, bool enable);
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200167
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900168static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
169{
170 struct mmc_packed *packed = mqrq->packed;
171
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900172 mqrq->cmd_type = MMC_PACKED_NONE;
173 packed->nr_entries = MMC_PACKED_NR_ZERO;
174 packed->idx_failure = MMC_PACKED_NR_IDX;
175 packed->retries = 0;
176 packed->blocks = 0;
177}
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
180{
181 struct mmc_blk_data *md;
182
Arjan van de Vena621aae2006-01-12 18:43:35 +0000183 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 md = disk->private_data;
185 if (md && md->usage == 0)
186 md = NULL;
187 if (md)
188 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000189 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 return md;
192}
193
Andrei Warkentin371a6892011-04-11 18:10:25 -0500194static inline int mmc_get_devidx(struct gendisk *disk)
195{
Colin Cross382c55f2015-10-22 10:00:41 -0700196 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500197 return devidx;
198}
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200static void mmc_blk_put(struct mmc_blk_data *md)
201{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000202 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 md->usage--;
204 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500205 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800206 blk_cleanup_queue(md->queue.queue);
207
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200208 spin_lock(&mmc_blk_lock);
209 ida_remove(&mmc_blk_ida, devidx);
210 spin_unlock(&mmc_blk_lock);
David Woodhouse1dff3142007-11-21 18:45:12 +0100211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 kfree(md);
214 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000215 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Johan Rudholmadd710e2011-12-02 08:51:06 +0100218static ssize_t power_ro_lock_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 int ret;
222 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200223 struct mmc_card *card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100224 int locked = 0;
225
Asutosh Das507d9a72014-12-09 10:15:53 +0200226 if (!md)
227 return -EINVAL;
228
229 card = md->queue.card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100230 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
231 locked = 2;
232 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
233 locked = 1;
234
235 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
236
Tomas Winkler9098f842015-07-16 15:50:45 +0200237 mmc_blk_put(md);
238
Johan Rudholmadd710e2011-12-02 08:51:06 +0100239 return ret;
240}
241
242static ssize_t power_ro_lock_store(struct device *dev,
243 struct device_attribute *attr, const char *buf, size_t count)
244{
245 int ret;
246 struct mmc_blk_data *md, *part_md;
247 struct mmc_card *card;
248 unsigned long set;
249
250 if (kstrtoul(buf, 0, &set))
251 return -EINVAL;
252
253 if (set != 1)
254 return count;
255
256 md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200257 if (!md)
258 return -EINVAL;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100259 card = md->queue.card;
260
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200261 mmc_get_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100262
263 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
264 card->ext_csd.boot_ro_lock |
265 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
266 card->ext_csd.part_time);
267 if (ret)
268 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
269 else
270 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
271
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200272 mmc_put_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100273
274 if (!ret) {
275 pr_info("%s: Locking boot partition ro until next power on\n",
276 md->disk->disk_name);
277 set_disk_ro(md->disk, 1);
278
279 list_for_each_entry(part_md, &md->part, part)
280 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
281 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
282 set_disk_ro(part_md->disk, 1);
283 }
284 }
285
286 mmc_blk_put(md);
287 return count;
288}
289
Andrei Warkentin371a6892011-04-11 18:10:25 -0500290static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
291 char *buf)
292{
293 int ret;
294 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
295
Asutosh Das507d9a72014-12-09 10:15:53 +0200296 if (!md)
297 return -EINVAL;
298
Baruch Siach0031a982014-09-22 10:12:51 +0300299 ret = snprintf(buf, PAGE_SIZE, "%d\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -0500300 get_disk_ro(dev_to_disk(dev)) ^
301 md->read_only);
302 mmc_blk_put(md);
303 return ret;
304}
305
306static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
307 const char *buf, size_t count)
308{
309 int ret;
310 char *end;
311 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
312 unsigned long set = simple_strtoul(buf, &end, 0);
Asutosh Das507d9a72014-12-09 10:15:53 +0200313
314 if (!md)
315 return -EINVAL;
316
Andrei Warkentin371a6892011-04-11 18:10:25 -0500317 if (end == buf) {
318 ret = -EINVAL;
319 goto out;
320 }
321
322 set_disk_ro(dev_to_disk(dev), set || md->read_only);
323 ret = count;
324out:
325 mmc_blk_put(md);
326 return ret;
327}
328
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200329static ssize_t
Maya Erez5a8dae12014-12-04 15:13:59 +0200330no_pack_for_random_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
332{
333 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
334 int ret;
335
Asutosh Das507d9a72014-12-09 10:15:53 +0200336 if (!md)
337 return -EINVAL;
Maya Erez5a8dae12014-12-04 15:13:59 +0200338 ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
339
340 mmc_blk_put(md);
341 return ret;
342}
343
344static ssize_t
345no_pack_for_random_store(struct device *dev,
346 struct device_attribute *attr,
347 const char *buf, size_t count)
348{
349 int value;
350 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200351 struct mmc_card *card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200352 int ret = count;
353
Asutosh Das507d9a72014-12-09 10:15:53 +0200354 if (!md)
355 return -EINVAL;
356
357 card = md->queue.card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200358 if (!card) {
359 ret = -EINVAL;
360 goto exit;
361 }
362
363 sscanf(buf, "%d", &value);
364
365 if (value < 0) {
366 pr_err("%s: value %d is not valid. old value remains = %d",
367 mmc_hostname(card->host), value,
368 md->queue.no_pack_for_random);
369 ret = -EINVAL;
370 goto exit;
371 }
372
373 md->queue.no_pack_for_random = (value > 0) ? true : false;
374
375 pr_debug("%s: no_pack_for_random: new value = %d",
376 mmc_hostname(card->host),
377 md->queue.no_pack_for_random);
378
379exit:
380 mmc_blk_put(md);
381 return ret;
382}
383
384static ssize_t
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200385num_wr_reqs_to_start_packing_show(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
389 int num_wr_reqs_to_start_packing;
390 int ret;
391
Asutosh Das507d9a72014-12-09 10:15:53 +0200392 if (!md)
393 return -EINVAL;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200394 num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
395
396 ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
397
398 mmc_blk_put(md);
399 return ret;
400}
401
402static ssize_t
403num_wr_reqs_to_start_packing_store(struct device *dev,
404 struct device_attribute *attr,
405 const char *buf, size_t count)
406{
407 int value;
408 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200409 struct mmc_card *card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200410 int ret = count;
411
Asutosh Das507d9a72014-12-09 10:15:53 +0200412 if (!md)
413 return -EINVAL;
414
415 card = md->queue.card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200416 if (!card) {
417 ret = -EINVAL;
418 goto exit;
419 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200420
421 sscanf(buf, "%d", &value);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200422
Yaniv Gardi42399822014-12-04 00:26:23 +0200423 if (value >= 0) {
424 md->queue.num_wr_reqs_to_start_packing =
425 min_t(int, value, (int)card->ext_csd.max_packed_writes);
426
427 pr_debug("%s: trigger to pack: new value = %d",
428 mmc_hostname(card->host),
429 md->queue.num_wr_reqs_to_start_packing);
430 } else {
431 pr_err("%s: value %d is not valid. old value remains = %d",
432 mmc_hostname(card->host), value,
433 md->queue.num_wr_reqs_to_start_packing);
434 ret = -EINVAL;
435 }
436
437exit:
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200438 mmc_blk_put(md);
Yaniv Gardi42399822014-12-04 00:26:23 +0200439 return ret;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200440}
441
Mark Salyzyn6904e432016-01-28 11:12:25 -0800442#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
443
444static int max_read_speed, max_write_speed, cache_size = 4;
445
446module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
447MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
448module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
449MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
450module_param(cache_size, int, S_IRUSR | S_IRGRP);
451MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
452
453/*
454 * helper macros and expectations:
455 * size - unsigned long number of bytes
456 * jiffies - unsigned long HZ timestamp difference
457 * speed - unsigned KB/s transfer rate
458 */
459#define size_and_speed_to_jiffies(size, speed) \
460 ((size) * HZ / (speed) / 1024UL)
461#define jiffies_and_speed_to_size(jiffies, speed) \
462 (((speed) * (jiffies) * 1024UL) / HZ)
463#define jiffies_and_size_to_speed(jiffies, size) \
464 ((size) * HZ / (jiffies) / 1024UL)
465
466/* Limits to report warning */
467/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
468#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
469#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
470
471#define speed_valid(speed) ((speed) > 0)
472
473static const char off[] = "off\n";
474
475static int max_speed_show(int speed, char *buf)
476{
477 if (speed)
478 return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
479 else
480 return scnprintf(buf, PAGE_SIZE, off);
481}
482
483static int max_speed_store(const char *buf, struct request_queue *q)
484{
485 unsigned int limit, set = 0;
486
487 if (!strncasecmp(off, buf, sizeof(off) - 2))
488 return set;
489 if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
490 return -EINVAL;
491 if (set == 0)
492 return set;
493 limit = MAX_SPEED(q);
494 if (set > limit)
495 pr_warn("max speed %u ineffective above %u\n", set, limit);
496 limit = MIN_SPEED(q);
497 if (set < limit)
498 pr_warn("max speed %u painful below %u\n", set, limit);
499 return set;
500}
501
502static ssize_t max_write_speed_show(struct device *dev,
503 struct device_attribute *attr, char *buf)
504{
505 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
506 int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
507
508 mmc_blk_put(md);
509 return ret;
510}
511
512static ssize_t max_write_speed_store(struct device *dev,
513 struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
517 int set = max_speed_store(buf, md->queue.queue);
518
519 if (set < 0) {
520 mmc_blk_put(md);
521 return set;
522 }
523
524 atomic_set(&md->queue.max_write_speed, set);
525 mmc_blk_put(md);
526 return count;
527}
528
529static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
530 max_write_speed_show, max_write_speed_store);
531
532static ssize_t max_read_speed_show(struct device *dev,
533 struct device_attribute *attr, char *buf)
534{
535 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
536 int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
537
538 mmc_blk_put(md);
539 return ret;
540}
541
542static ssize_t max_read_speed_store(struct device *dev,
543 struct device_attribute *attr,
544 const char *buf, size_t count)
545{
546 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
547 int set = max_speed_store(buf, md->queue.queue);
548
549 if (set < 0) {
550 mmc_blk_put(md);
551 return set;
552 }
553
554 atomic_set(&md->queue.max_read_speed, set);
555 mmc_blk_put(md);
556 return count;
557}
558
559static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
560 max_read_speed_show, max_read_speed_store);
561
562static ssize_t cache_size_show(struct device *dev,
563 struct device_attribute *attr, char *buf)
564{
565 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
566 struct mmc_queue *mq = &md->queue;
567 int cache_size = atomic_read(&mq->cache_size);
568 int ret;
569
570 if (!cache_size)
571 ret = scnprintf(buf, PAGE_SIZE, off);
572 else {
573 int speed = atomic_read(&mq->max_write_speed);
574
575 if (!speed_valid(speed))
576 ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
577 else { /* We accept race between cache_jiffies and cache_used */
578 unsigned long size = jiffies_and_speed_to_size(
579 jiffies - mq->cache_jiffies, speed);
580 long used = atomic_long_read(&mq->cache_used);
581
582 if (size >= used)
583 size = 0;
584 else
585 size = (used - size) * 100 / cache_size
586 / 1024UL / 1024UL;
587
588 ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
589 cache_size, size);
590 }
591 }
592
593 mmc_blk_put(md);
594 return ret;
595}
596
597static ssize_t cache_size_store(struct device *dev,
598 struct device_attribute *attr,
599 const char *buf, size_t count)
600{
601 struct mmc_blk_data *md;
602 unsigned int set = 0;
603
604 if (strncasecmp(off, buf, sizeof(off) - 2)
605 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
606 return -EINVAL;
607
608 md = mmc_blk_get(dev_to_disk(dev));
609 atomic_set(&md->queue.cache_size, set);
610 mmc_blk_put(md);
611 return count;
612}
613
614static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
615 cache_size_show, cache_size_store);
616
617/* correct for write-back */
618static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
619{
620 long used = 0;
621 int speed = atomic_read(&mq->max_write_speed);
622
623 if (speed_valid(speed)) {
624 unsigned long size = jiffies_and_speed_to_size(
625 waitfor - mq->cache_jiffies, speed);
626 used = atomic_long_read(&mq->cache_used);
627
628 if (size >= used)
629 used = 0;
630 else
631 used -= size;
632 }
633
634 atomic_long_set(&mq->cache_used, used);
635 mq->cache_jiffies = waitfor;
636
637 return used;
638}
639
640static void mmc_blk_simulate_delay(
641 struct mmc_queue *mq,
642 struct request *req,
643 unsigned long waitfor)
644{
645 int max_speed;
646
647 if (!req)
648 return;
649
650 max_speed = (rq_data_dir(req) == READ)
651 ? atomic_read(&mq->max_read_speed)
652 : atomic_read(&mq->max_write_speed);
653 if (speed_valid(max_speed)) {
654 unsigned long bytes = blk_rq_bytes(req);
655
656 if (rq_data_dir(req) != READ) {
657 int cache_size = atomic_read(&mq->cache_size);
658
659 if (cache_size) {
660 unsigned long size = cache_size * 1024L * 1024L;
661 long used = mmc_blk_cache_used(mq, waitfor);
662
663 used += bytes;
664 atomic_long_set(&mq->cache_used, used);
665 bytes = 0;
666 if (used > size)
667 bytes = used - size;
668 }
669 }
670 waitfor += size_and_speed_to_jiffies(bytes, max_speed);
671 if (time_is_after_jiffies(waitfor)) {
672 long msecs = jiffies_to_msecs(waitfor - jiffies);
673
674 if (likely(msecs > 0))
675 msleep(msecs);
676 }
677 }
678}
679
680#else
681
682#define mmc_blk_simulate_delay(mq, req, waitfor)
683
684#endif
685
Al Viroa5a15612008-03-02 10:33:30 -0500686static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
Al Viroa5a15612008-03-02 10:33:30 -0500688 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 int ret = -ENXIO;
690
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200691 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (md) {
693 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500694 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700696
Al Viroa5a15612008-03-02 10:33:30 -0500697 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700698 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700699 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200702 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 return ret;
705}
706
Al Virodb2a1442013-05-05 21:52:57 -0400707static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
Al Viroa5a15612008-03-02 10:33:30 -0500709 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200711 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200713 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
716static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800717mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800719 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
720 geo->heads = 4;
721 geo->sectors = 16;
722 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723}
724
John Calixtocb87ea22011-04-26 18:56:29 -0400725struct mmc_blk_ioc_data {
726 struct mmc_ioc_cmd ic;
727 unsigned char *buf;
728 u64 buf_bytes;
729};
730
731static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
732 struct mmc_ioc_cmd __user *user)
733{
734 struct mmc_blk_ioc_data *idata;
735 int err;
736
yalin wang1ff89502015-11-12 19:27:11 +0800737 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400738 if (!idata) {
739 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400740 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400741 }
742
743 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
744 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400745 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400746 }
747
748 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
749 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
750 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400751 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400752 }
753
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300754 if (!idata->buf_bytes) {
755 idata->buf = NULL;
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100756 return idata;
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300757 }
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100758
yalin wang1ff89502015-11-12 19:27:11 +0800759 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400760 if (!idata->buf) {
761 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400762 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400763 }
764
765 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
766 idata->ic.data_ptr, idata->buf_bytes)) {
767 err = -EFAULT;
768 goto copy_err;
769 }
770
771 return idata;
772
773copy_err:
774 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400775idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400776 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400777out:
John Calixtocb87ea22011-04-26 18:56:29 -0400778 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400779}
780
Jon Huntera5f57742015-09-22 10:27:53 +0100781static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
782 struct mmc_blk_ioc_data *idata)
783{
784 struct mmc_ioc_cmd *ic = &idata->ic;
785
786 if (copy_to_user(&(ic_ptr->response), ic->response,
787 sizeof(ic->response)))
788 return -EFAULT;
789
790 if (!idata->ic.write_flag) {
791 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
792 idata->buf, idata->buf_bytes))
793 return -EFAULT;
794 }
795
796 return 0;
797}
798
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200799static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
800 u32 retries_max)
801{
802 int err;
803 u32 retry_count = 0;
804
805 if (!status || !retries_max)
806 return -EINVAL;
807
808 do {
809 err = get_card_status(card, status, 5);
810 if (err)
811 break;
812
813 if (!R1_STATUS(*status) &&
814 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
815 break; /* RPMB programming operation complete */
816
817 /*
818 * Rechedule to give the MMC device a chance to continue
819 * processing the previous command without being polled too
820 * frequently.
821 */
822 usleep_range(1000, 5000);
823 } while (++retry_count < retries_max);
824
825 if (retry_count == retries_max)
826 err = -EPERM;
827
828 return err;
829}
830
Maya Erez775a9362013-04-18 15:41:55 +0300831static int ioctl_do_sanitize(struct mmc_card *card)
832{
833 int err;
834
Ulf Hanssona2d10862013-12-16 14:37:26 +0100835 if (!mmc_can_sanitize(card)) {
Maya Erez775a9362013-04-18 15:41:55 +0300836 pr_warn("%s: %s - SANITIZE is not supported\n",
837 mmc_hostname(card->host), __func__);
838 err = -EOPNOTSUPP;
839 goto out;
840 }
841
842 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
843 mmc_hostname(card->host), __func__);
844
845 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
846 EXT_CSD_SANITIZE_START, 1,
847 MMC_SANITIZE_REQ_TIMEOUT);
848
849 if (err)
850 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
851 mmc_hostname(card->host), __func__, err);
852
853 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
854 __func__);
855out:
856 return err;
857}
858
Jon Huntera5f57742015-09-22 10:27:53 +0100859static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
860 struct mmc_blk_ioc_data *idata)
John Calixtocb87ea22011-04-26 18:56:29 -0400861{
John Calixtocb87ea22011-04-26 18:56:29 -0400862 struct mmc_command cmd = {0};
863 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530864 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400865 struct scatterlist sg;
866 int err;
867
Jon Huntera5f57742015-09-22 10:27:53 +0100868 if (!card || !md || !idata)
869 return -EINVAL;
John Calixtocb87ea22011-04-26 18:56:29 -0400870
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100871 cmd.opcode = idata->ic.opcode;
872 cmd.arg = idata->ic.arg;
873 cmd.flags = idata->ic.flags;
874
875 if (idata->buf_bytes) {
876 data.sg = &sg;
877 data.sg_len = 1;
878 data.blksz = idata->ic.blksz;
879 data.blocks = idata->ic.blocks;
880
881 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
882
883 if (idata->ic.write_flag)
884 data.flags = MMC_DATA_WRITE;
885 else
886 data.flags = MMC_DATA_READ;
887
888 /* data.flags must already be set before doing this. */
889 mmc_set_data_timeout(&data, card);
890
891 /* Allow overriding the timeout_ns for empirical tuning. */
892 if (idata->ic.data_timeout_ns)
893 data.timeout_ns = idata->ic.data_timeout_ns;
894
895 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
896 /*
897 * Pretend this is a data transfer and rely on the
898 * host driver to compute timeout. When all host
899 * drivers support cmd.cmd_timeout for R1B, this
900 * can be changed to:
901 *
902 * mrq.data = NULL;
903 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
904 */
905 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
906 }
907
908 mrq.data = &data;
909 }
910
911 mrq.cmd = &cmd;
912
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200913 err = mmc_blk_part_switch(card, md);
914 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100915 return err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200916
John Calixtocb87ea22011-04-26 18:56:29 -0400917 if (idata->ic.is_acmd) {
918 err = mmc_app_cmd(card->host, card);
919 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100920 return err;
John Calixtocb87ea22011-04-26 18:56:29 -0400921 }
922
Yaniv Gardia82e4842013-06-05 14:13:08 +0300923 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
924 (cmd.opcode == MMC_SWITCH)) {
Maya Erez775a9362013-04-18 15:41:55 +0300925 err = ioctl_do_sanitize(card);
926
927 if (err)
928 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
929 __func__, err);
930
Jon Huntera5f57742015-09-22 10:27:53 +0100931 return err;
Maya Erez775a9362013-04-18 15:41:55 +0300932 }
933
John Calixtocb87ea22011-04-26 18:56:29 -0400934 mmc_wait_for_req(card->host, &mrq);
935
936 if (cmd.error) {
937 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
938 __func__, cmd.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100939 return cmd.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400940 }
941 if (data.error) {
942 dev_err(mmc_dev(card->host), "%s: data error %d\n",
943 __func__, data.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100944 return data.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400945 }
946
947 /*
948 * According to the SD specs, some commands require a delay after
949 * issuing the command.
950 */
951 if (idata->ic.postsleep_min_us)
952 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
953
Jon Huntera5f57742015-09-22 10:27:53 +0100954 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
John Calixtocb87ea22011-04-26 18:56:29 -0400955
Krishna Kondae6711632014-12-04 15:20:57 +0200956 return err;
957}
958
959struct mmc_blk_ioc_rpmb_data {
960 struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
961};
962
963static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
964 struct mmc_ioc_rpmb __user *user)
965{
966 struct mmc_blk_ioc_rpmb_data *idata;
967 int err, i;
968
969 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
970 if (!idata) {
971 err = -ENOMEM;
972 goto out;
973 }
974
975 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
976 idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
977 if (IS_ERR(idata->data[i])) {
978 err = PTR_ERR(idata->data[i]);
979 goto copy_err;
980 }
981 }
982
983 return idata;
984
985copy_err:
986 while (--i >= 0) {
987 kfree(idata->data[i]->buf);
988 kfree(idata->data[i]);
989 }
990 kfree(idata);
991out:
992 return ERR_PTR(err);
993}
994
995static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
996 struct mmc_ioc_rpmb __user *ic_ptr)
997{
998 struct mmc_blk_ioc_rpmb_data *idata;
999 struct mmc_blk_data *md;
1000 struct mmc_card *card;
1001 struct mmc_command cmd = {0};
1002 struct mmc_data data = {0};
1003 struct mmc_request mrq = {NULL};
1004 struct scatterlist sg;
1005 int err = 0, i = 0;
1006 u32 status = 0;
1007
1008 /* The caller must have CAP_SYS_RAWIO */
1009 if (!capable(CAP_SYS_RAWIO))
1010 return -EPERM;
1011
1012 md = mmc_blk_get(bdev->bd_disk);
1013 /* make sure this is a rpmb partition */
1014 if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
1015 err = -EINVAL;
Asutosh Das507d9a72014-12-09 10:15:53 +02001016 return err;
Krishna Kondae6711632014-12-04 15:20:57 +02001017 }
1018
1019 idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
1020 if (IS_ERR(idata)) {
1021 err = PTR_ERR(idata);
1022 goto cmd_done;
1023 }
1024
1025 card = md->queue.card;
1026 if (IS_ERR(card)) {
1027 err = PTR_ERR(card);
1028 goto idata_free;
1029 }
1030
Maya Erezdd669562015-02-12 20:37:31 +02001031 mmc_get_card(card);
Krishna Kondae6711632014-12-04 15:20:57 +02001032
1033 err = mmc_blk_part_switch(card, md);
1034 if (err)
1035 goto cmd_rel_host;
1036
1037 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1038 struct mmc_blk_ioc_data *curr_data;
1039 struct mmc_ioc_cmd *curr_cmd;
1040
1041 curr_data = idata->data[i];
1042 curr_cmd = &curr_data->ic;
1043 if (!curr_cmd->opcode)
1044 break;
1045
1046 cmd.opcode = curr_cmd->opcode;
1047 cmd.arg = curr_cmd->arg;
1048 cmd.flags = curr_cmd->flags;
1049
1050 if (curr_data->buf_bytes) {
1051 data.sg = &sg;
1052 data.sg_len = 1;
1053 data.blksz = curr_cmd->blksz;
1054 data.blocks = curr_cmd->blocks;
1055
1056 sg_init_one(data.sg, curr_data->buf,
1057 curr_data->buf_bytes);
1058
1059 if (curr_cmd->write_flag)
1060 data.flags = MMC_DATA_WRITE;
1061 else
1062 data.flags = MMC_DATA_READ;
1063
1064 /* data.flags must already be set before doing this. */
1065 mmc_set_data_timeout(&data, card);
1066
1067 /*
1068 * Allow overriding the timeout_ns for empirical tuning.
1069 */
1070 if (curr_cmd->data_timeout_ns)
1071 data.timeout_ns = curr_cmd->data_timeout_ns;
1072
1073 mrq.data = &data;
1074 }
1075
1076 mrq.cmd = &cmd;
1077
1078 err = mmc_set_blockcount(card, data.blocks,
1079 curr_cmd->write_flag & (1 << 31));
1080 if (err)
1081 goto cmd_rel_host;
1082
1083 mmc_wait_for_req(card->host, &mrq);
1084
1085 if (cmd.error) {
1086 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
1087 __func__, cmd.error);
1088 err = cmd.error;
1089 goto cmd_rel_host;
1090 }
1091 if (data.error) {
1092 dev_err(mmc_dev(card->host), "%s: data error %d\n",
1093 __func__, data.error);
1094 err = data.error;
1095 goto cmd_rel_host;
1096 }
1097
1098 if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
1099 sizeof(cmd.resp))) {
1100 err = -EFAULT;
1101 goto cmd_rel_host;
1102 }
1103
1104 if (!curr_cmd->write_flag) {
1105 if (copy_to_user((void __user *)(unsigned long)
1106 curr_cmd->data_ptr,
1107 curr_data->buf,
1108 curr_data->buf_bytes)) {
1109 err = -EFAULT;
1110 goto cmd_rel_host;
1111 }
1112 }
1113
Loic Pallardy8d1e9772012-08-06 17:12:31 +02001114 /*
1115 * Ensure RPMB command has completed by polling CMD13
1116 * "Send Status".
1117 */
1118 err = ioctl_rpmb_card_status_poll(card, &status, 5);
1119 if (err)
1120 dev_err(mmc_dev(card->host),
1121 "%s: Card Status=0x%08X, error %d\n",
1122 __func__, status, err);
1123 }
1124
Krishna Kondae6711632014-12-04 15:20:57 +02001125cmd_rel_host:
1126 mmc_put_card(card);
1127
1128idata_free:
1129 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1130 kfree(idata->data[i]->buf);
1131 kfree(idata->data[i]);
1132 }
1133 kfree(idata);
1134
1135cmd_done:
1136 mmc_blk_put(md);
Jon Huntera5f57742015-09-22 10:27:53 +01001137 return err;
1138}
1139
1140static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1141 struct mmc_ioc_cmd __user *ic_ptr)
1142{
1143 struct mmc_blk_ioc_data *idata;
1144 struct mmc_blk_data *md;
1145 struct mmc_card *card;
Grant Grundlerb0934102015-09-23 18:30:33 -07001146 int err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001147
Shawn Lin83c742c2016-03-16 18:15:47 +08001148 /*
1149 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1150 * whole block device, not on a partition. This prevents overspray
1151 * between sibling partitions.
1152 */
1153 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1154 return -EPERM;
1155
Jon Huntera5f57742015-09-22 10:27:53 +01001156 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
Asutosh Dasbbefab32013-10-07 14:53:32 +05301157 if (IS_ERR_OR_NULL(idata))
Jon Huntera5f57742015-09-22 10:27:53 +01001158 return PTR_ERR(idata);
1159
1160 md = mmc_blk_get(bdev->bd_disk);
1161 if (!md) {
1162 err = -EINVAL;
1163 goto cmd_err;
1164 }
1165
1166 card = md->queue.card;
Asutosh Dasbbefab32013-10-07 14:53:32 +05301167 if (IS_ERR_OR_NULL(card)) {
Jon Huntera5f57742015-09-22 10:27:53 +01001168 err = PTR_ERR(card);
1169 goto cmd_done;
1170 }
1171
1172 mmc_get_card(card);
1173
Grant Grundlerb0934102015-09-23 18:30:33 -07001174 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001175
Adrian Hunter3c866562016-05-04 14:38:12 +03001176 /* Always switch back to main area after RPMB access */
1177 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1178 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1179
Ulf Hanssone94cfef2013-05-02 14:02:38 +02001180 mmc_put_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -04001181
Grant Grundlerb0934102015-09-23 18:30:33 -07001182 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001183
John Calixtocb87ea22011-04-26 18:56:29 -04001184cmd_done:
1185 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +03001186cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -04001187 kfree(idata->buf);
1188 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001189 return ioc_err ? ioc_err : err;
John Calixtocb87ea22011-04-26 18:56:29 -04001190}
1191
Jon Huntera5f57742015-09-22 10:27:53 +01001192static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
1193 struct mmc_ioc_multi_cmd __user *user)
1194{
1195 struct mmc_blk_ioc_data **idata = NULL;
1196 struct mmc_ioc_cmd __user *cmds = user->cmds;
1197 struct mmc_card *card;
1198 struct mmc_blk_data *md;
Grant Grundlerb0934102015-09-23 18:30:33 -07001199 int i, err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001200 __u64 num_of_cmds;
1201
Shawn Lin83c742c2016-03-16 18:15:47 +08001202 /*
1203 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1204 * whole block device, not on a partition. This prevents overspray
1205 * between sibling partitions.
1206 */
1207 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1208 return -EPERM;
1209
Jon Huntera5f57742015-09-22 10:27:53 +01001210 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
1211 sizeof(num_of_cmds)))
1212 return -EFAULT;
1213
1214 if (num_of_cmds > MMC_IOC_MAX_CMDS)
1215 return -EINVAL;
1216
1217 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
1218 if (!idata)
1219 return -ENOMEM;
1220
1221 for (i = 0; i < num_of_cmds; i++) {
1222 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
1223 if (IS_ERR(idata[i])) {
1224 err = PTR_ERR(idata[i]);
1225 num_of_cmds = i;
1226 goto cmd_err;
1227 }
1228 }
1229
1230 md = mmc_blk_get(bdev->bd_disk);
Olof Johanssonf00ab142016-02-09 09:34:30 -08001231 if (!md) {
1232 err = -EINVAL;
Jon Huntera5f57742015-09-22 10:27:53 +01001233 goto cmd_err;
Olof Johanssonf00ab142016-02-09 09:34:30 -08001234 }
Jon Huntera5f57742015-09-22 10:27:53 +01001235
1236 card = md->queue.card;
1237 if (IS_ERR(card)) {
1238 err = PTR_ERR(card);
1239 goto cmd_done;
1240 }
1241
1242 mmc_get_card(card);
1243
Grant Grundlerb0934102015-09-23 18:30:33 -07001244 for (i = 0; i < num_of_cmds && !ioc_err; i++)
1245 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001246
Adrian Hunter3c866562016-05-04 14:38:12 +03001247 /* Always switch back to main area after RPMB access */
1248 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1249 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1250
Jon Huntera5f57742015-09-22 10:27:53 +01001251 mmc_put_card(card);
1252
1253 /* copy to user if data and response */
Grant Grundlerb0934102015-09-23 18:30:33 -07001254 for (i = 0; i < num_of_cmds && !err; i++)
Jon Huntera5f57742015-09-22 10:27:53 +01001255 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001256
1257cmd_done:
1258 mmc_blk_put(md);
1259cmd_err:
1260 for (i = 0; i < num_of_cmds; i++) {
1261 kfree(idata[i]->buf);
1262 kfree(idata[i]);
1263 }
1264 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001265 return ioc_err ? ioc_err : err;
Jon Huntera5f57742015-09-22 10:27:53 +01001266}
1267
John Calixtocb87ea22011-04-26 18:56:29 -04001268static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
1269 unsigned int cmd, unsigned long arg)
1270{
Jon Huntera5f57742015-09-22 10:27:53 +01001271 switch (cmd) {
1272 case MMC_IOC_CMD:
1273 return mmc_blk_ioctl_cmd(bdev,
1274 (struct mmc_ioc_cmd __user *)arg);
Krishna Kondae6711632014-12-04 15:20:57 +02001275 case MMC_IOC_RPMB_CMD:
1276 return mmc_blk_ioctl_rpmb_cmd(bdev,
1277 (struct mmc_ioc_rpmb __user *)arg);
Jon Huntera5f57742015-09-22 10:27:53 +01001278 case MMC_IOC_MULTI_CMD:
1279 return mmc_blk_ioctl_multi_cmd(bdev,
1280 (struct mmc_ioc_multi_cmd __user *)arg);
1281 default:
1282 return -EINVAL;
1283 }
John Calixtocb87ea22011-04-26 18:56:29 -04001284}
1285
1286#ifdef CONFIG_COMPAT
1287static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
1288 unsigned int cmd, unsigned long arg)
1289{
1290 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
1291}
1292#endif
1293
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001294static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -05001295 .open = mmc_blk_open,
1296 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001297 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -04001299 .ioctl = mmc_blk_ioctl,
1300#ifdef CONFIG_COMPAT
1301 .compat_ioctl = mmc_blk_compat_ioctl,
1302#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303};
1304
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001305static int mmc_blk_cmdq_switch(struct mmc_card *card,
1306 struct mmc_blk_data *md, bool enable)
1307{
1308 int ret = 0;
1309 bool cmdq_mode = !!mmc_card_cmdq(card);
Sahitya Tummalad0afd2f2015-05-20 12:15:35 +05301310 struct mmc_host *host = card->host;
1311 struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001312
1313 if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
1314 !card->ext_csd.cmdq_support ||
1315 (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
1316 (cmdq_mode == enable))
1317 return 0;
1318
1319 if (enable) {
1320 ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
1321 if (ret) {
1322 pr_err("%s: failed (%d) to set block-size to %d\n",
1323 __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
1324 goto out;
1325 }
Sahitya Tummalad0afd2f2015-05-20 12:15:35 +05301326
1327 } else {
1328 if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
1329 ret = mmc_cmdq_halt(host, true);
1330 if (ret) {
1331 pr_err("%s: halt: failed: %d\n",
1332 mmc_hostname(host), ret);
1333 goto out;
1334 }
1335 }
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001336 }
1337
1338 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1339 EXT_CSD_CMDQ, enable,
1340 card->ext_csd.generic_cmd6_time);
1341 if (ret) {
1342 pr_err("%s: cmdq mode %sable failed %d\n",
1343 md->disk->disk_name, enable ? "en" : "dis", ret);
1344 goto out;
1345 }
1346
1347 if (enable)
1348 mmc_card_set_cmdq(card);
1349 else
1350 mmc_card_clr_cmdq(card);
1351out:
1352 return ret;
1353}
1354
Andrei Warkentin371a6892011-04-11 18:10:25 -05001355static inline int mmc_blk_part_switch(struct mmc_card *card,
1356 struct mmc_blk_data *md)
1357{
1358 int ret;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001359 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001360
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001361 if ((main_md->part_curr == md->part_type) &&
1362 (card->part_curr == md->part_type))
Andrei Warkentin371a6892011-04-11 18:10:25 -05001363 return 0;
1364
1365 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001366 u8 part_config = card->ext_csd.part_config;
1367
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001368 if (md->part_type) {
1369 /* disable CQ mode for non-user data partitions */
1370 ret = mmc_blk_cmdq_switch(card, md, false);
1371 if (ret)
1372 return ret;
1373 }
1374
Adrian Hunter57da0c02016-05-04 14:38:13 +03001375 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1376 mmc_retune_pause(card->host);
1377
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001378 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1379 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -05001380
1381 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001382 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -05001383 card->ext_csd.part_time);
Adrian Hunter57da0c02016-05-04 14:38:13 +03001384 if (ret) {
1385 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1386 mmc_retune_unpause(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001387 return ret;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001388 }
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001389
1390 card->ext_csd.part_config = part_config;
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001391 card->part_curr = md->part_type;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001392
1393 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
1394 mmc_retune_unpause(card->host);
Adrian Hunter67716322011-08-29 16:42:15 +03001395 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05001396
1397 main_md->part_curr = md->part_type;
1398 return 0;
1399}
1400
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001401static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
1402{
1403 int err;
Ben Dooks051913d2009-06-08 23:33:57 +01001404 u32 result;
1405 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001406
Venkatraman Sad5fd972011-08-25 00:30:50 +05301407 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -04001408 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -04001409 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001410
1411 struct scatterlist sg;
1412
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001413 cmd.opcode = MMC_APP_CMD;
1414 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -07001415 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001416
1417 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -07001418 if (err)
1419 return (u32)-1;
1420 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001421 return (u32)-1;
1422
1423 memset(&cmd, 0, sizeof(struct mmc_command));
1424
1425 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1426 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -07001427 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001428
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001429 data.blksz = 4;
1430 data.blocks = 1;
1431 data.flags = MMC_DATA_READ;
1432 data.sg = &sg;
1433 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +05301434 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001435
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001436 mrq.cmd = &cmd;
1437 mrq.data = &data;
1438
Ben Dooks051913d2009-06-08 23:33:57 +01001439 blocks = kmalloc(4, GFP_KERNEL);
1440 if (!blocks)
1441 return (u32)-1;
1442
1443 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001444
1445 mmc_wait_for_req(card->host, &mrq);
1446
Ben Dooks051913d2009-06-08 23:33:57 +01001447 result = ntohl(*blocks);
1448 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001449
Ben Dooks051913d2009-06-08 23:33:57 +01001450 if (cmd.error || data.error)
1451 result = (u32)-1;
1452
1453 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001454}
1455
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001456static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +03001457{
Chris Ball1278dba2011-04-13 23:40:30 -04001458 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +03001459 int err;
1460
Adrian Hunter504f1912008-10-16 12:55:25 +03001461 cmd.opcode = MMC_SEND_STATUS;
1462 if (!mmc_host_is_spi(card->host))
1463 cmd.arg = card->rca << 16;
1464 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001465 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1466 if (err == 0)
1467 *status = cmd.resp[0];
1468 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +03001469}
1470
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001471static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
Ulf Hansson95a91292014-01-29 13:11:27 +01001472 bool hw_busy_detect, struct request *req, int *gen_err)
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001473{
1474 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1475 int err = 0;
1476 u32 status;
1477
1478 do {
1479 err = get_card_status(card, &status, 5);
1480 if (err) {
1481 pr_err("%s: error %d requesting status\n",
1482 req->rq_disk->disk_name, err);
1483 return err;
1484 }
1485
1486 if (status & R1_ERROR) {
1487 pr_err("%s: %s: error sending status cmd, status %#x\n",
1488 req->rq_disk->disk_name, __func__, status);
1489 *gen_err = 1;
1490 }
1491
Ulf Hansson95a91292014-01-29 13:11:27 +01001492 /* We may rely on the host hw to handle busy detection.*/
1493 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
1494 hw_busy_detect)
1495 break;
1496
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001497 /*
1498 * Timeout if the device never becomes ready for data and never
1499 * leaves the program state.
1500 */
1501 if (time_after(jiffies, timeout)) {
1502 pr_err("%s: Card stuck in programming state! %s %s\n",
1503 mmc_hostname(card->host),
1504 req->rq_disk->disk_name, __func__);
1505 return -ETIMEDOUT;
1506 }
1507
1508 /*
1509 * Some cards mishandle the status bits,
1510 * so make sure to check both the busy
1511 * indication and the card state.
1512 */
1513 } while (!(status & R1_READY_FOR_DATA) ||
1514 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1515
1516 return err;
1517}
1518
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001519static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
1520 struct request *req, int *gen_err, u32 *stop_status)
1521{
1522 struct mmc_host *host = card->host;
1523 struct mmc_command cmd = {0};
1524 int err;
1525 bool use_r1b_resp = rq_data_dir(req) == WRITE;
1526
1527 /*
1528 * Normally we use R1B responses for WRITE, but in cases where the host
1529 * has specified a max_busy_timeout we need to validate it. A failure
1530 * means we need to prevent the host from doing hw busy detection, which
1531 * is done by converting to a R1 response instead.
1532 */
1533 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1534 use_r1b_resp = false;
1535
1536 cmd.opcode = MMC_STOP_TRANSMISSION;
1537 if (use_r1b_resp) {
1538 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1539 cmd.busy_timeout = timeout_ms;
1540 } else {
1541 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1542 }
1543
1544 err = mmc_wait_for_cmd(host, &cmd, 5);
1545 if (err)
1546 return err;
1547
1548 *stop_status = cmd.resp[0];
1549
1550 /* No need to check card status in case of READ. */
1551 if (rq_data_dir(req) == READ)
1552 return 0;
1553
1554 if (!mmc_host_is_spi(host) &&
1555 (*stop_status & R1_ERROR)) {
1556 pr_err("%s: %s: general error sending stop command, resp %#x\n",
1557 req->rq_disk->disk_name, __func__, *stop_status);
1558 *gen_err = 1;
1559 }
1560
1561 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
1562}
1563
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301564#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001565#define ERR_RETRY 2
1566#define ERR_ABORT 1
1567#define ERR_CONTINUE 0
1568
1569static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1570 bool status_valid, u32 status)
1571{
1572 switch (error) {
1573 case -EILSEQ:
1574 /* response crc error, retry the r/w cmd */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001575 pr_err_ratelimited(
1576 "%s: response CRC error sending %s command, card status %#x\n",
1577 req->rq_disk->disk_name,
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001578 name, status);
1579 return ERR_RETRY;
1580
1581 case -ETIMEDOUT:
Talel Shenhar0821fe852015-01-28 14:44:57 +02001582 pr_err_ratelimited(
1583 "%s: timed out sending %s command, card status %#x\n",
1584 req->rq_disk->disk_name, name, status);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001585
1586 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301587 if (!status_valid) {
Talel Shenhar0821fe852015-01-28 14:44:57 +02001588 pr_err_ratelimited("%s: status not valid, retrying timeout\n",
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301589 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001590 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301591 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001592
1593 /*
1594 * If it was a r/w cmd crc error, or illegal command
1595 * (eg, issued in wrong state) then retry - we should
1596 * have corrected the state problem above.
1597 */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301598 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
Talel Shenhar0821fe852015-01-28 14:44:57 +02001599 pr_err_ratelimited(
1600 "%s: command error, retrying timeout\n",
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301601 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001602 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301603 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001604
1605 /* Otherwise abort the command */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001606 pr_err_ratelimited(
1607 "%s: not retrying timeout\n",
1608 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001609 return ERR_ABORT;
1610
1611 default:
1612 /* We don't understand the error code the driver gave us */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001613 pr_err_ratelimited(
1614 "%s: unknown error %d sending read/write command, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001615 req->rq_disk->disk_name, error, status);
1616 return ERR_ABORT;
1617 }
1618}
1619
1620/*
1621 * Initial r/w and stop cmd error recovery.
1622 * We don't know whether the card received the r/w cmd or not, so try to
1623 * restore things back to a sane state. Essentially, we do this as follows:
1624 * - Obtain card status. If the first attempt to obtain card status fails,
1625 * the status word will reflect the failed status cmd, not the failed
1626 * r/w cmd. If we fail to obtain card status, it suggests we can no
1627 * longer communicate with the card.
1628 * - Check the card state. If the card received the cmd but there was a
1629 * transient problem with the response, it might still be in a data transfer
1630 * mode. Try to send it a stop command. If this fails, we can't recover.
1631 * - If the r/w cmd failed due to a response CRC error, it was probably
1632 * transient, so retry the cmd.
1633 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1634 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1635 * illegal cmd, retry.
1636 * Otherwise we don't understand what happened, so abort.
1637 */
1638static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001639 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001640{
1641 bool prev_cmd_status_valid = true;
1642 u32 status, stop_status = 0;
1643 int err, retry;
1644
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301645 if (mmc_card_removed(card))
1646 return ERR_NOMEDIUM;
1647
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001648 /*
1649 * Try to get card status which indicates both the card state
1650 * and why there was no response. If the first attempt fails,
1651 * we can't be sure the returned status is for the r/w command.
1652 */
1653 for (retry = 2; retry >= 0; retry--) {
1654 err = get_card_status(card, &status, 0);
1655 if (!err)
1656 break;
1657
Adrian Hunter6f398ad2015-05-07 13:10:23 +03001658 /* Re-tune if needed */
1659 mmc_retune_recheck(card->host);
1660
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001661 prev_cmd_status_valid = false;
1662 pr_err("%s: error %d sending status command, %sing\n",
1663 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1664 }
1665
1666 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301667 if (err) {
1668 /* Check if the card is removed */
1669 if (mmc_detect_card_removed(card->host))
1670 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001671 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301672 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001673
Adrian Hunter67716322011-08-29 16:42:15 +03001674 /* Flag ECC errors */
1675 if ((status & R1_CARD_ECC_FAILED) ||
1676 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1677 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1678 *ecc_err = 1;
1679
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001680 /* Flag General errors */
1681 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1682 if ((status & R1_ERROR) ||
1683 (brq->stop.resp[0] & R1_ERROR)) {
1684 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1685 req->rq_disk->disk_name, __func__,
1686 brq->stop.resp[0], status);
1687 *gen_err = 1;
1688 }
1689
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001690 /*
1691 * Check the current card state. If it is in some data transfer
1692 * mode, tell it to stop (and hopefully transition back to TRAN.)
1693 */
1694 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1695 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001696 err = send_stop(card,
1697 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1698 req, gen_err, &stop_status);
1699 if (err) {
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001700 pr_err("%s: error %d sending stop command\n",
1701 req->rq_disk->disk_name, err);
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001702 /*
1703 * If the stop cmd also timed out, the card is probably
1704 * not present, so abort. Other errors are bad news too.
1705 */
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001706 return ERR_ABORT;
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001707 }
1708
Adrian Hunter67716322011-08-29 16:42:15 +03001709 if (stop_status & R1_CARD_ECC_FAILED)
1710 *ecc_err = 1;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001711 }
1712
1713 /* Check for set block count errors */
1714 if (brq->sbc.error)
1715 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1716 prev_cmd_status_valid, status);
1717
1718 /* Check for r/w command errors */
1719 if (brq->cmd.error)
1720 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1721 prev_cmd_status_valid, status);
1722
Adrian Hunter67716322011-08-29 16:42:15 +03001723 /* Data errors */
1724 if (!brq->stop.error)
1725 return ERR_CONTINUE;
1726
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001727 /* Now for stop errors. These aren't fatal to the transfer. */
Johan Rudholm5e1344e2014-09-17 09:50:42 +02001728 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001729 req->rq_disk->disk_name, brq->stop.error,
1730 brq->cmd.resp[0], status);
1731
1732 /*
1733 * Subsitute in our own stop status as this will give the error
1734 * state which happened during the execution of the r/w command.
1735 */
1736 if (stop_status) {
1737 brq->stop.resp[0] = stop_status;
1738 brq->stop.error = 0;
1739 }
1740 return ERR_CONTINUE;
1741}
1742
Adrian Hunter67716322011-08-29 16:42:15 +03001743static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1744 int type)
1745{
1746 int err;
1747
1748 if (md->reset_done & type)
1749 return -EEXIST;
1750
1751 md->reset_done |= type;
1752 err = mmc_hw_reset(host);
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301753 if (err && err != -EOPNOTSUPP) {
1754 /* We failed to reset so we need to abort the request */
1755 pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
1756 __func__, err);
1757 return -ENODEV;
1758 }
1759
Adrian Hunter67716322011-08-29 16:42:15 +03001760 /* Ensure we switch back to the correct partition */
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301761 if (host->card) {
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001762 struct mmc_blk_data *main_md =
1763 dev_get_drvdata(&host->card->dev);
Adrian Hunter67716322011-08-29 16:42:15 +03001764 int part_err;
1765
1766 main_md->part_curr = main_md->part_type;
1767 part_err = mmc_blk_part_switch(host->card, md);
1768 if (part_err) {
1769 /*
1770 * We have failed to get back into the correct
1771 * partition, so we need to abort the whole request.
1772 */
1773 return -ENODEV;
1774 }
1775 }
1776 return err;
1777}
1778
1779static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1780{
1781 md->reset_done &= ~type;
1782}
1783
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +08001784int mmc_access_rpmb(struct mmc_queue *mq)
1785{
1786 struct mmc_blk_data *md = mq->data;
1787 /*
1788 * If this is a RPMB partition access, return ture
1789 */
1790 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1791 return true;
1792
1793 return false;
1794}
1795
Sahitya Tummala9433a132015-06-09 09:38:36 +05301796static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
1797 struct request *req)
1798{
1799 struct mmc_blk_data *md = mq->data;
1800 struct mmc_card *card = md->queue.card;
1801 struct mmc_host *host = card->host;
1802 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
1803 struct mmc_cmdq_req *cmdq_req;
1804 struct mmc_queue_req *active_mqrq;
1805
1806 BUG_ON(req->tag > card->ext_csd.cmdq_depth);
1807 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
1808
1809 set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
1810
1811 active_mqrq = &mq->mqrq_cmdq[req->tag];
1812 active_mqrq->req = req;
1813
1814 cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
1815 cmdq_req->cmdq_req_flags |= QBR;
1816 cmdq_req->mrq.cmd = &cmdq_req->cmd;
1817 cmdq_req->tag = req->tag;
1818 return cmdq_req;
1819}
1820
1821static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
1822 struct request *req)
1823{
1824 struct mmc_blk_data *md = mq->data;
1825 struct mmc_card *card = md->queue.card;
1826 struct mmc_cmdq_req *cmdq_req = NULL;
1827 struct mmc_host *host = card->host;
1828 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
1829 unsigned int from, nr, arg;
1830 int err = 0;
1831
1832 if (!mmc_can_erase(card)) {
1833 err = -EOPNOTSUPP;
1834 goto out;
1835 }
1836
1837 from = blk_rq_pos(req);
1838 nr = blk_rq_sectors(req);
1839
1840 if (mmc_can_discard(card))
1841 arg = MMC_DISCARD_ARG;
1842 else if (mmc_can_trim(card))
1843 arg = MMC_TRIM_ARG;
1844 else
1845 arg = MMC_ERASE_ARG;
1846
1847 cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
1848 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1849 __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
1850 EXT_CSD_CMD_SET_NORMAL,
1851 INAND_CMD38_ARG_EXT_CSD,
1852 arg == MMC_TRIM_ARG ?
1853 INAND_CMD38_ARG_TRIM :
1854 INAND_CMD38_ARG_ERASE,
1855 0, true, false);
1856 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
1857 if (err)
1858 goto clear_dcmd;
1859 }
1860 err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
1861clear_dcmd:
1862 /* clear pending request */
1863 if (cmdq_req) {
1864 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
1865 &ctx_info->active_reqs));
1866 clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
1867 }
1868out:
1869 blk_end_request(req, err, blk_rq_bytes(req));
1870
1871 if (test_and_clear_bit(0, &ctx_info->req_starved))
1872 blk_run_queue(mq->queue);
1873 mmc_release_host(host);
1874 return err ? 1 : 0;
1875}
1876
Adrian Hunterbd788c92010-08-11 14:17:47 -07001877static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1878{
1879 struct mmc_blk_data *md = mq->data;
1880 struct mmc_card *card = md->queue.card;
1881 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001882 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001883
Adrian Hunterbd788c92010-08-11 14:17:47 -07001884 if (!mmc_can_erase(card)) {
1885 err = -EOPNOTSUPP;
1886 goto out;
1887 }
1888
1889 from = blk_rq_pos(req);
1890 nr = blk_rq_sectors(req);
1891
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001892 if (mmc_can_discard(card))
1893 arg = MMC_DISCARD_ARG;
1894 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -07001895 arg = MMC_TRIM_ARG;
1896 else
1897 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +03001898retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001899 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1900 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1901 INAND_CMD38_ARG_EXT_CSD,
1902 arg == MMC_TRIM_ARG ?
1903 INAND_CMD38_ARG_TRIM :
1904 INAND_CMD38_ARG_ERASE,
1905 0);
1906 if (err)
1907 goto out;
1908 }
Adrian Hunterbd788c92010-08-11 14:17:47 -07001909 err = mmc_erase(card, from, nr, arg);
1910out:
Adrian Hunter67716322011-08-29 16:42:15 +03001911 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1912 goto retry;
1913 if (!err)
1914 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301915 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -07001916
Adrian Hunterbd788c92010-08-11 14:17:47 -07001917 return err ? 0 : 1;
1918}
1919
Sahitya Tummala9433a132015-06-09 09:38:36 +05301920static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
1921 struct request *req)
1922{
1923 struct mmc_blk_data *md = mq->data;
1924 struct mmc_card *card = md->queue.card;
1925 struct mmc_cmdq_req *cmdq_req = NULL;
1926 unsigned int from, nr, arg;
1927 struct mmc_host *host = card->host;
1928 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
1929 int err = 0;
1930
1931 if (!(mmc_can_secure_erase_trim(card))) {
1932 err = -EOPNOTSUPP;
1933 goto out;
1934 }
1935
1936 from = blk_rq_pos(req);
1937 nr = blk_rq_sectors(req);
1938
1939 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1940 arg = MMC_SECURE_TRIM1_ARG;
1941 else
1942 arg = MMC_SECURE_ERASE_ARG;
1943
1944 cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
1945 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1946 __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
1947 EXT_CSD_CMD_SET_NORMAL,
1948 INAND_CMD38_ARG_EXT_CSD,
1949 arg == MMC_SECURE_TRIM1_ARG ?
1950 INAND_CMD38_ARG_SECTRIM1 :
1951 INAND_CMD38_ARG_SECERASE,
1952 0, true, false);
1953 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
1954 if (err)
1955 goto clear_dcmd;
1956 }
1957
1958 err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
1959 if (err)
1960 goto clear_dcmd;
1961
1962 if (arg == MMC_SECURE_TRIM1_ARG) {
1963 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1964 __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
1965 EXT_CSD_CMD_SET_NORMAL,
1966 INAND_CMD38_ARG_EXT_CSD,
1967 INAND_CMD38_ARG_SECTRIM2,
1968 0, true, false);
1969 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
1970 if (err)
1971 goto clear_dcmd;
1972 }
1973
1974 err = mmc_cmdq_erase(cmdq_req, card, from, nr,
1975 MMC_SECURE_TRIM2_ARG);
1976 }
1977clear_dcmd:
1978 /* clear pending request */
1979 if (cmdq_req) {
1980 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
1981 &ctx_info->active_reqs));
1982 clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
1983 }
1984out:
1985 blk_end_request(req, err, blk_rq_bytes(req));
1986
1987 if (test_and_clear_bit(0, &ctx_info->req_starved))
1988 blk_run_queue(mq->queue);
1989 mmc_release_host(host);
1990 return err ? 1 : 0;
1991}
1992
Adrian Hunter49804542010-08-11 14:17:50 -07001993static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1994 struct request *req)
1995{
1996 struct mmc_blk_data *md = mq->data;
1997 struct mmc_card *card = md->queue.card;
Maya Erez775a9362013-04-18 15:41:55 +03001998 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001999 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -07002000
Maya Erez775a9362013-04-18 15:41:55 +03002001 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -07002002 err = -EOPNOTSUPP;
2003 goto out;
2004 }
2005
2006 from = blk_rq_pos(req);
2007 nr = blk_rq_sectors(req);
2008
Maya Erez775a9362013-04-18 15:41:55 +03002009 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
2010 arg = MMC_SECURE_TRIM1_ARG;
2011 else
2012 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter28302812012-04-05 14:45:48 +03002013
Adrian Hunter67716322011-08-29 16:42:15 +03002014retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002015 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
2016 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2017 INAND_CMD38_ARG_EXT_CSD,
2018 arg == MMC_SECURE_TRIM1_ARG ?
2019 INAND_CMD38_ARG_SECTRIM1 :
2020 INAND_CMD38_ARG_SECERASE,
2021 0);
2022 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03002023 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002024 }
Adrian Hunter28302812012-04-05 14:45:48 +03002025
Adrian Hunter49804542010-08-11 14:17:50 -07002026 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +03002027 if (err == -EIO)
2028 goto out_retry;
2029 if (err)
2030 goto out;
2031
2032 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002033 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
2034 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2035 INAND_CMD38_ARG_EXT_CSD,
2036 INAND_CMD38_ARG_SECTRIM2,
2037 0);
2038 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03002039 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002040 }
Adrian Hunter28302812012-04-05 14:45:48 +03002041
Adrian Hunter49804542010-08-11 14:17:50 -07002042 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +03002043 if (err == -EIO)
2044 goto out_retry;
2045 if (err)
2046 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05002047 }
Adrian Hunter28302812012-04-05 14:45:48 +03002048
Adrian Hunter28302812012-04-05 14:45:48 +03002049out_retry:
2050 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03002051 goto retry;
2052 if (!err)
2053 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03002054out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302055 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07002056
Adrian Hunter49804542010-08-11 14:17:50 -07002057 return err ? 0 : 1;
2058}
2059
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002060static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
2061{
2062 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002063 struct mmc_card *card = md->queue.card;
2064 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002065
Sahitya Tummala61868a42015-05-28 16:54:19 +05302066 if (!req)
2067 return 0;
2068
2069 if (req->cmd_flags & REQ_BARRIER) {
2070 /*
2071 * If eMMC cache flush policy is set to 1, then the device
2072 * shall flush the requests in First-In-First-Out (FIFO) order.
2073 * In this case, as per spec, the host must not send any cache
2074 * barrier requests as they are redundant and add unnecessary
2075 * overhead to both device and host.
2076 */
2077 if (card->ext_csd.cache_flush_policy & 1)
2078 goto end_req;
2079
2080 /*
2081 * In case barrier is not supported or enabled in the device,
2082 * use flush as a fallback option.
2083 */
2084 ret = mmc_cache_barrier(card);
2085 if (ret)
2086 ret = mmc_flush_cache(card);
2087 } else if (req_op(req) == REQ_OP_FLUSH) {
2088 ret = mmc_flush_cache(card);
2089 }
Talel Shenhar8a8e3b42015-02-11 12:58:16 +02002090 if (ret == -ENODEV) {
2091 pr_err("%s: %s: restart mmc card",
2092 req->rq_disk->disk_name, __func__);
2093 if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
2094 pr_err("%s: %s: fail to restart mmc",
2095 req->rq_disk->disk_name, __func__);
2096 else
2097 mmc_blk_reset_success(md, MMC_BLK_FLUSH);
2098 }
2099
2100 if (ret) {
2101 pr_err("%s: %s: notify flush error to upper layers",
2102 req->rq_disk->disk_name, __func__);
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002103 ret = -EIO;
Talel Shenhar8a8e3b42015-02-11 12:58:16 +02002104 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002105
Mark Salyzyn6904e432016-01-28 11:12:25 -08002106#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2107 else if (atomic_read(&mq->cache_size)) {
2108 long used = mmc_blk_cache_used(mq, jiffies);
2109
2110 if (used) {
2111 int speed = atomic_read(&mq->max_write_speed);
2112
2113 if (speed_valid(speed)) {
2114 unsigned long msecs = jiffies_to_msecs(
2115 size_and_speed_to_jiffies(
2116 used, speed));
2117 if (msecs)
2118 msleep(msecs);
2119 }
2120 }
2121 }
2122#endif
Sahitya Tummala61868a42015-05-28 16:54:19 +05302123end_req:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302124 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002125
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002126 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002127}
2128
2129/*
2130 * Reformat current write as a reliable write, supporting
2131 * both legacy and the enhanced reliable write MMC cards.
2132 * In each transfer we'll handle only as much as a single
2133 * reliable write can handle, thus finish the request in
2134 * partial completions.
2135 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002136static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
2137 struct mmc_card *card,
2138 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002139{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002140 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
2141 /* Legacy mode imposes restrictions on transfers. */
2142 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
2143 brq->data.blocks = 1;
2144
2145 if (brq->data.blocks > card->ext_csd.rel_sectors)
2146 brq->data.blocks = card->ext_csd.rel_sectors;
2147 else if (brq->data.blocks < card->ext_csd.rel_sectors)
2148 brq->data.blocks = 1;
2149 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002150}
2151
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01002152#define CMD_ERRORS \
2153 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
2154 R1_ADDRESS_ERROR | /* Misaligned address */ \
2155 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
2156 R1_WP_VIOLATION | /* Tried to write to protected block */ \
2157 R1_CC_ERROR | /* Card controller error */ \
2158 R1_ERROR) /* General/unknown error */
2159
Per Forlinee8a43a2011-07-01 18:55:33 +02002160static int mmc_blk_err_check(struct mmc_card *card,
2161 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02002162{
Per Forlinee8a43a2011-07-01 18:55:33 +02002163 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
2164 mmc_active);
2165 struct mmc_blk_request *brq = &mq_mrq->brq;
2166 struct request *req = mq_mrq->req;
Adrian Hunterb8360a42015-05-07 13:10:24 +03002167 int need_retune = card->host->need_retune;
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002168 int ecc_err = 0, gen_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02002169
2170 /*
2171 * sbc.error indicates a problem with the set block count
2172 * command. No data will have been transferred.
2173 *
2174 * cmd.error indicates a problem with the r/w command. No
2175 * data will have been transferred.
2176 *
2177 * stop.error indicates a problem with the stop command. Data
2178 * may have been transferred, or may still be transferring.
2179 */
Adrian Hunter67716322011-08-29 16:42:15 +03002180 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
2181 brq->data.error) {
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002182 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02002183 case ERR_RETRY:
2184 return MMC_BLK_RETRY;
2185 case ERR_ABORT:
2186 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05302187 case ERR_NOMEDIUM:
2188 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02002189 case ERR_CONTINUE:
2190 break;
2191 }
2192 }
2193
2194 /*
2195 * Check for errors relating to the execution of the
2196 * initial command - such as address errors. No data
2197 * has been transferred.
2198 */
2199 if (brq->cmd.resp[0] & CMD_ERRORS) {
2200 pr_err("%s: r/w command failed, status = %#x\n",
2201 req->rq_disk->disk_name, brq->cmd.resp[0]);
2202 return MMC_BLK_ABORT;
2203 }
2204
2205 /*
2206 * Everything else is either success, or a data error of some
2207 * kind. If it was a write, we may have transitioned to
2208 * program mode, which we have to wait for it to complete.
2209 */
2210 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Ulf Hanssonc49433f2014-01-29 11:01:55 +01002211 int err;
Trey Ramsay8fee4762012-11-16 09:31:41 -06002212
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002213 /* Check stop command response */
2214 if (brq->stop.resp[0] & R1_ERROR) {
2215 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
2216 req->rq_disk->disk_name, __func__,
2217 brq->stop.resp[0]);
2218 gen_err = 1;
2219 }
2220
Ulf Hansson95a91292014-01-29 13:11:27 +01002221 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
2222 &gen_err);
Ulf Hanssonc49433f2014-01-29 11:01:55 +01002223 if (err)
2224 return MMC_BLK_CMD_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02002225 }
2226
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002227 /* if general error occurs, retry the write operation. */
2228 if (gen_err) {
2229 pr_warn("%s: retrying write for general error\n",
2230 req->rq_disk->disk_name);
2231 return MMC_BLK_RETRY;
2232 }
2233
Per Forlind78d4a82011-07-01 18:55:30 +02002234 if (brq->data.error) {
Adrian Hunterb8360a42015-05-07 13:10:24 +03002235 if (need_retune && !brq->retune_retry_done) {
Russell King09faf612016-01-29 09:44:00 +00002236 pr_debug("%s: retrying because a re-tune was needed\n",
2237 req->rq_disk->disk_name);
Adrian Hunterb8360a42015-05-07 13:10:24 +03002238 brq->retune_retry_done = 1;
2239 return MMC_BLK_RETRY;
2240 }
Per Forlind78d4a82011-07-01 18:55:30 +02002241 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
2242 req->rq_disk->disk_name, brq->data.error,
2243 (unsigned)blk_rq_pos(req),
2244 (unsigned)blk_rq_sectors(req),
2245 brq->cmd.resp[0], brq->stop.resp[0]);
2246
2247 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03002248 if (ecc_err)
2249 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02002250 return MMC_BLK_DATA_ERR;
2251 } else {
2252 return MMC_BLK_CMD_ERR;
2253 }
2254 }
2255
Adrian Hunter67716322011-08-29 16:42:15 +03002256 if (!brq->data.bytes_xfered)
2257 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02002258
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002259 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
2260 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
2261 return MMC_BLK_PARTIAL;
2262 else
2263 return MMC_BLK_SUCCESS;
2264 }
2265
Adrian Hunter67716322011-08-29 16:42:15 +03002266 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
2267 return MMC_BLK_PARTIAL;
2268
2269 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02002270}
2271
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002272static int mmc_blk_packed_err_check(struct mmc_card *card,
2273 struct mmc_async_req *areq)
2274{
2275 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
2276 mmc_active);
2277 struct request *req = mq_rq->req;
2278 struct mmc_packed *packed = mq_rq->packed;
2279 int err, check, status;
2280 u8 *ext_csd;
2281
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002282 packed->retries--;
2283 check = mmc_blk_err_check(card, areq);
2284 err = get_card_status(card, &status, 0);
2285 if (err) {
2286 pr_err("%s: error %d sending status command\n",
2287 req->rq_disk->disk_name, err);
2288 return MMC_BLK_ABORT;
2289 }
2290
2291 if (status & R1_EXCEPTION_EVENT) {
Ulf Hansson86817ff2014-10-17 11:39:05 +02002292 err = mmc_get_ext_csd(card, &ext_csd);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002293 if (err) {
2294 pr_err("%s: error %d sending ext_csd\n",
2295 req->rq_disk->disk_name, err);
Ulf Hansson86817ff2014-10-17 11:39:05 +02002296 return MMC_BLK_ABORT;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002297 }
2298
2299 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
2300 EXT_CSD_PACKED_FAILURE) &&
2301 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2302 EXT_CSD_PACKED_GENERIC_ERROR)) {
2303 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2304 EXT_CSD_PACKED_INDEXED_ERROR) {
2305 packed->idx_failure =
2306 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
2307 check = MMC_BLK_PARTIAL;
2308 }
2309 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
2310 "failure index: %d\n",
2311 req->rq_disk->disk_name, packed->nr_entries,
2312 packed->blocks, packed->idx_failure);
2313 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002314 kfree(ext_csd);
2315 }
2316
2317 return check;
2318}
2319
Per Forlin54d49d72011-07-01 18:55:29 +02002320static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
2321 struct mmc_card *card,
2322 int disable_multi,
2323 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324{
Per Forlin54d49d72011-07-01 18:55:29 +02002325 u32 readcmd, writecmd;
2326 struct mmc_blk_request *brq = &mqrq->brq;
2327 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05302329 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002331 /*
2332 * Reliable writes are used to implement Forced Unit Access and
Luca Porziod3df0462015-11-06 15:12:26 +00002333 * are supported only on MMCs.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002334 */
Luca Porziod3df0462015-11-06 15:12:26 +00002335 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002336 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002337 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002338
Per Forlin54d49d72011-07-01 18:55:29 +02002339 memset(brq, 0, sizeof(struct mmc_blk_request));
2340 brq->mrq.cmd = &brq->cmd;
2341 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Per Forlin54d49d72011-07-01 18:55:29 +02002343 brq->cmd.arg = blk_rq_pos(req);
2344 if (!mmc_card_blockaddr(card))
2345 brq->cmd.arg <<= 9;
2346 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2347 brq->data.blksz = 512;
2348 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2349 brq->stop.arg = 0;
Per Forlin54d49d72011-07-01 18:55:29 +02002350 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Asutosh Dasf0665412012-07-27 18:10:19 +05302352 brq->data.fault_injected = false;
Per Forlin54d49d72011-07-01 18:55:29 +02002353 /*
2354 * The block layer doesn't support all sector count
2355 * restrictions, so we need to be prepared for too big
2356 * requests.
2357 */
2358 if (brq->data.blocks > card->host->max_blk_count)
2359 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002361 if (brq->data.blocks > 1) {
2362 /*
2363 * After a read error, we redo the request one sector
2364 * at a time in order to accurately determine which
2365 * sectors can be read successfully.
2366 */
2367 if (disable_multi)
2368 brq->data.blocks = 1;
2369
Kuninori Morimoto2e47e842014-09-02 19:08:53 -07002370 /*
2371 * Some controllers have HW issues while operating
2372 * in multiple I/O mode
2373 */
2374 if (card->host->ops->multi_io_quirk)
2375 brq->data.blocks = card->host->ops->multi_io_quirk(card,
2376 (rq_data_dir(req) == READ) ?
2377 MMC_DATA_READ : MMC_DATA_WRITE,
2378 brq->data.blocks);
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002379 }
Per Forlin54d49d72011-07-01 18:55:29 +02002380
2381 if (brq->data.blocks > 1 || do_rel_wr) {
2382 /* SPI multiblock writes terminate using a special
2383 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02002384 */
Per Forlin54d49d72011-07-01 18:55:29 +02002385 if (!mmc_host_is_spi(card->host) ||
2386 rq_data_dir(req) == READ)
2387 brq->mrq.stop = &brq->stop;
2388 readcmd = MMC_READ_MULTIPLE_BLOCK;
2389 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
2390 } else {
2391 brq->mrq.stop = NULL;
2392 readcmd = MMC_READ_SINGLE_BLOCK;
2393 writecmd = MMC_WRITE_BLOCK;
2394 }
2395 if (rq_data_dir(req) == READ) {
2396 brq->cmd.opcode = readcmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002397 brq->data.flags = MMC_DATA_READ;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002398 if (brq->mrq.stop)
2399 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
2400 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002401 } else {
2402 brq->cmd.opcode = writecmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002403 brq->data.flags = MMC_DATA_WRITE;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002404 if (brq->mrq.stop)
2405 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
2406 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002407 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02002408
Per Forlin54d49d72011-07-01 18:55:29 +02002409 if (do_rel_wr)
2410 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01002411
Per Forlin54d49d72011-07-01 18:55:29 +02002412 /*
Saugata Das42659002011-12-21 13:09:17 +05302413 * Data tag is used only during writing meta data to speed
2414 * up write and any subsequent read of this meta data
2415 */
2416 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2417 (req->cmd_flags & REQ_META) &&
2418 (rq_data_dir(req) == WRITE) &&
2419 ((brq->data.blocks * brq->data.blksz) >=
2420 card->ext_csd.data_tag_unit_size);
2421
2422 /*
Per Forlin54d49d72011-07-01 18:55:29 +02002423 * Pre-defined multi-block transfers are preferable to
2424 * open ended-ones (and necessary for reliable writes).
2425 * However, it is not sufficient to just send CMD23,
2426 * and avoid the final CMD12, as on an error condition
2427 * CMD12 (stop) needs to be sent anyway. This, coupled
2428 * with Auto-CMD23 enhancements provided by some
2429 * hosts, means that the complexity of dealing
2430 * with this is best left to the host. If CMD23 is
2431 * supported by card and host, we'll fill sbc in and let
2432 * the host deal with handling it correctly. This means
2433 * that for hosts that don't expose MMC_CAP_CMD23, no
2434 * change of behavior will be observed.
2435 *
2436 * N.B: Some MMC cards experience perf degradation.
2437 * We'll avoid using CMD23-bounded multiblock writes for
2438 * these, while retaining features like reliable writes.
2439 */
Saugata Das42659002011-12-21 13:09:17 +05302440 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
2441 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
2442 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02002443 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2444 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05302445 (do_rel_wr ? (1 << 31) : 0) |
2446 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02002447 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2448 brq->mrq.sbc = &brq->sbc;
2449 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002450
Per Forlin54d49d72011-07-01 18:55:29 +02002451 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002452
Per Forlin54d49d72011-07-01 18:55:29 +02002453 brq->data.sg = mqrq->sg;
2454 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002455
Per Forlin54d49d72011-07-01 18:55:29 +02002456 /*
2457 * Adjust the sg list so it is the same size as the
2458 * request.
2459 */
2460 if (brq->data.blocks != blk_rq_sectors(req)) {
2461 int i, data_size = brq->data.blocks << 9;
2462 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02002463
Per Forlin54d49d72011-07-01 18:55:29 +02002464 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
2465 data_size -= sg->length;
2466 if (data_size <= 0) {
2467 sg->length += data_size;
2468 i++;
2469 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01002470 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002471 }
Per Forlin54d49d72011-07-01 18:55:29 +02002472 brq->data.sg_len = i;
2473 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002474
Per Forlinee8a43a2011-07-01 18:55:33 +02002475 mqrq->mmc_active.mrq = &brq->mrq;
Sahitya Tummalac44de842015-05-08 11:12:30 +05302476 mqrq->mmc_active.mrq->req = mqrq->req;
Per Forlinee8a43a2011-07-01 18:55:33 +02002477 mqrq->mmc_active.err_check = mmc_blk_err_check;
2478
Per Forlin54d49d72011-07-01 18:55:29 +02002479 mmc_queue_bounce_pre(mqrq);
2480}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002482static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
2483 struct mmc_card *card)
2484{
2485 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
2486 unsigned int max_seg_sz = queue_max_segment_size(q);
2487 unsigned int len, nr_segs = 0;
2488
2489 do {
2490 len = min(hdr_sz, max_seg_sz);
2491 hdr_sz -= len;
2492 nr_segs++;
2493 } while (hdr_sz);
2494
2495 return nr_segs;
2496}
2497
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002498/**
2499 * mmc_blk_disable_wr_packing() - disables packing mode
2500 * @mq: MMC queue.
2501 *
2502 */
2503void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
2504{
2505 if (mq) {
2506 mq->wr_packing_enabled = false;
2507 mq->num_of_potential_packed_wr_reqs = 0;
2508 }
2509}
2510EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
2511
Lee Susman841fd132013-04-23 17:59:26 +03002512static int get_packed_trigger(int potential, struct mmc_card *card,
2513 struct request *req, int curr_trigger)
2514{
2515 static int num_mean_elements = 1;
2516 static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2517 unsigned int trigger = curr_trigger;
2518 unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
2519
2520 /* scale down the upper bound to 75% */
2521 pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
2522
2523 /*
2524 * since the most common calls for this function are with small
2525 * potential write values and since we don't want these calls to affect
2526 * the packed trigger, set a lower bound and ignore calls with
2527 * potential lower than that bound
2528 */
2529 if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
2530 return trigger;
2531
2532 /*
2533 * this is to prevent integer overflow in the following calculation:
2534 * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
2535 */
2536 if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
2537 num_mean_elements = 1;
2538 mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2539 }
2540
2541 /*
2542 * get next mean value based on previous mean value and current
2543 * potential packed writes. Calculation is as follows:
2544 * mean_pot[i+1] =
2545 * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
2546 */
2547 mean_potential *= num_mean_elements;
2548 /*
2549 * add num_mean_elements so that the division of two integers doesn't
2550 * lower mean_potential too much
2551 */
2552 if (potential > mean_potential)
2553 mean_potential += num_mean_elements;
2554 mean_potential += potential;
2555 /* this is for gaining more precision when dividing two integers */
2556 mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
2557 /* this completes the mean calculation */
2558 mean_potential /= ++num_mean_elements;
2559 mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
2560
2561 /*
2562 * if current potential packed writes is greater than the mean potential
2563 * then the heuristic is that the following workload will contain many
2564 * write requests, therefore we lower the packed trigger. In the
2565 * opposite case we want to increase the trigger in order to get less
2566 * packing events.
2567 */
2568 if (potential >= mean_potential)
2569 trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
2570 PCKD_TRGR_LOWER_BOUND : trigger - 1;
2571 else
2572 trigger = (trigger >= pckd_trgr_upper_bound) ?
2573 pckd_trgr_upper_bound : trigger + 1;
2574
2575 /*
2576 * an urgent read request indicates a packed list being interrupted
2577 * by this read, therefore we aim for less packing, hence the trigger
2578 * gets increased
2579 */
2580 if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
2581 trigger += PCKD_TRGR_URGENT_PENALTY;
2582
2583 return trigger;
2584}
2585
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002586static void mmc_blk_write_packing_control(struct mmc_queue *mq,
2587 struct request *req)
2588{
2589 struct mmc_host *host = mq->card->host;
2590 int data_dir;
2591
2592 if (!(host->caps2 & MMC_CAP2_PACKED_WR))
2593 return;
2594
Maya Erez8e2b3c32012-12-02 13:27:15 +02002595 /* Support for the write packing on eMMC 4.5 or later */
2596 if (mq->card->ext_csd.rev <= 5)
2597 return;
2598
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002599 /*
2600 * In case the packing control is not supported by the host, it should
2601 * not have an effect on the write packing. Therefore we have to enable
2602 * the write packing
2603 */
2604 if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
2605 mq->wr_packing_enabled = true;
2606 return;
2607 }
2608
2609 if (!req || (req && (req->cmd_flags & REQ_PREFLUSH))) {
2610 if (mq->num_of_potential_packed_wr_reqs >
2611 mq->num_wr_reqs_to_start_packing)
2612 mq->wr_packing_enabled = true;
Lee Susman841fd132013-04-23 17:59:26 +03002613 mq->num_wr_reqs_to_start_packing =
2614 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2615 mq->card, req,
2616 mq->num_wr_reqs_to_start_packing);
Tatyana Brokhman843915a2012-10-07 10:26:27 +02002617 mq->num_of_potential_packed_wr_reqs = 0;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002618 return;
2619 }
2620
2621 data_dir = rq_data_dir(req);
2622
2623 if (data_dir == READ) {
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002624 mmc_blk_disable_wr_packing(mq);
Lee Susman841fd132013-04-23 17:59:26 +03002625 mq->num_wr_reqs_to_start_packing =
2626 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2627 mq->card, req,
2628 mq->num_wr_reqs_to_start_packing);
2629 mq->num_of_potential_packed_wr_reqs = 0;
2630 mq->wr_packing_enabled = false;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002631 return;
2632 } else if (data_dir == WRITE) {
2633 mq->num_of_potential_packed_wr_reqs++;
2634 }
2635
2636 if (mq->num_of_potential_packed_wr_reqs >
2637 mq->num_wr_reqs_to_start_packing)
2638 mq->wr_packing_enabled = true;
2639}
2640
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002641struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
2642{
2643 if (!card)
2644 return NULL;
2645
2646 return &card->wr_pack_stats;
2647}
2648EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
2649
2650void mmc_blk_init_packed_statistics(struct mmc_card *card)
2651{
2652 int max_num_of_packed_reqs = 0;
2653
2654 if (!card || !card->wr_pack_stats.packing_events)
2655 return;
2656
2657 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
2658
2659 spin_lock(&card->wr_pack_stats.lock);
2660 memset(card->wr_pack_stats.packing_events, 0,
2661 (max_num_of_packed_reqs + 1) *
2662 sizeof(*card->wr_pack_stats.packing_events));
2663 memset(&card->wr_pack_stats.pack_stop_reason, 0,
2664 sizeof(card->wr_pack_stats.pack_stop_reason));
2665 card->wr_pack_stats.enabled = true;
2666 spin_unlock(&card->wr_pack_stats.lock);
2667}
2668EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
2669
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002670static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
2671{
2672 struct request_queue *q = mq->queue;
2673 struct mmc_card *card = mq->card;
2674 struct request *cur = req, *next = NULL;
2675 struct mmc_blk_data *md = mq->data;
2676 struct mmc_queue_req *mqrq = mq->mqrq_cur;
2677 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
2678 unsigned int req_sectors = 0, phys_segments = 0;
2679 unsigned int max_blk_count, max_phys_segs;
2680 bool put_back = true;
2681 u8 max_packed_rw = 0;
2682 u8 reqs = 0;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002683 struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002684
Shawn Lin96e52da2016-08-26 08:49:55 +08002685 /*
2686 * We don't need to check packed for any further
2687 * operation of packed stuff as we set MMC_PACKED_NONE
2688 * and return zero for reqs if geting null packed. Also
2689 * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
2690 * it again when removing blk req.
2691 */
2692 if (!mqrq->packed) {
2693 md->flags &= (~MMC_BLK_PACKED_CMD);
2694 goto no_packed;
2695 }
2696
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002697 if (!(md->flags & MMC_BLK_PACKED_CMD))
2698 goto no_packed;
2699
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002700 if (!mq->wr_packing_enabled)
2701 goto no_packed;
2702
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002703 if ((rq_data_dir(cur) == WRITE) &&
2704 mmc_host_packed_wr(card->host))
2705 max_packed_rw = card->ext_csd.max_packed_writes;
2706
2707 if (max_packed_rw == 0)
2708 goto no_packed;
2709
2710 if (mmc_req_rel_wr(cur) &&
2711 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
2712 goto no_packed;
2713
2714 if (mmc_large_sector(card) &&
2715 !IS_ALIGNED(blk_rq_sectors(cur), 8))
2716 goto no_packed;
2717
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002718 if (cur->cmd_flags & REQ_FUA)
2719 goto no_packed;
2720
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002721 mmc_blk_clear_packed(mqrq);
2722
2723 max_blk_count = min(card->host->max_blk_count,
2724 card->host->max_req_size >> 9);
2725 if (unlikely(max_blk_count > 0xffff))
2726 max_blk_count = 0xffff;
2727
2728 max_phys_segs = queue_max_segments(q);
2729 req_sectors += blk_rq_sectors(cur);
2730 phys_segments += cur->nr_phys_segments;
2731
2732 if (rq_data_dir(cur) == WRITE) {
2733 req_sectors += mmc_large_sector(card) ? 8 : 1;
2734 phys_segments += mmc_calc_packed_hdr_segs(q, card);
2735 }
2736
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002737 spin_lock(&stats->lock);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002738 do {
2739 if (reqs >= max_packed_rw - 1) {
2740 put_back = false;
2741 break;
2742 }
2743
2744 spin_lock_irq(q->queue_lock);
2745 next = blk_fetch_request(q);
2746 spin_unlock_irq(q->queue_lock);
2747 if (!next) {
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002748 MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002749 put_back = false;
2750 break;
2751 }
2752
2753 if (mmc_large_sector(card) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002754 !IS_ALIGNED(blk_rq_sectors(next), 8)) {
2755 MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002756 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002757 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002758
Mike Christie3a5e02c2016-06-05 14:32:23 -05002759 if (req_op(next) == REQ_OP_DISCARD ||
Adrian Hunter7afafc82016-08-16 10:59:35 +03002760 req_op(next) == REQ_OP_SECURE_ERASE ||
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002761 req_op(next) == REQ_OP_FLUSH) {
2762 if (req_op(next) != REQ_OP_SECURE_ERASE)
2763 MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002764 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002765 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002766
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002767 if (next->cmd_flags & REQ_FUA) {
2768 MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
2769 break;
2770 }
2771
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002772 if (rq_data_dir(cur) != rq_data_dir(next)) {
2773 MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002774 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002775 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002776
2777 if (mmc_req_rel_wr(next) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002778 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
2779 MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002780 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002781 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002782
2783 req_sectors += blk_rq_sectors(next);
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002784 if (req_sectors > max_blk_count) {
2785 if (stats->enabled)
2786 stats->pack_stop_reason[EXCEEDS_SECTORS]++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002787 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002788 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002789
2790 phys_segments += next->nr_phys_segments;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002791 if (phys_segments > max_phys_segs) {
2792 MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002793 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002794 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002795
Maya Erez5a8dae12014-12-04 15:13:59 +02002796 if (mq->no_pack_for_random) {
2797 if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
2798 blk_rq_pos(next)) {
2799 MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
2800 put_back = 1;
2801 break;
2802 }
2803 }
2804
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002805 if (rq_data_dir(next) == WRITE)
2806 mq->num_of_potential_packed_wr_reqs++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002807 list_add_tail(&next->queuelist, &mqrq->packed->list);
2808 cur = next;
2809 reqs++;
2810 } while (1);
2811
2812 if (put_back) {
2813 spin_lock_irq(q->queue_lock);
2814 blk_requeue_request(q, next);
2815 spin_unlock_irq(q->queue_lock);
2816 }
2817
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002818 if (stats->enabled) {
2819 if (reqs + 1 <= card->ext_csd.max_packed_writes)
2820 stats->packing_events[reqs + 1]++;
2821 if (reqs + 1 == max_packed_rw)
2822 MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
2823 }
2824
2825 spin_unlock(&stats->lock);
2826
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002827 if (reqs > 0) {
2828 list_add(&req->queuelist, &mqrq->packed->list);
2829 mqrq->packed->nr_entries = ++reqs;
2830 mqrq->packed->retries = reqs;
2831 return reqs;
2832 }
2833
2834no_packed:
2835 mqrq->cmd_type = MMC_PACKED_NONE;
2836 return 0;
2837}
2838
2839static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
2840 struct mmc_card *card,
2841 struct mmc_queue *mq)
2842{
2843 struct mmc_blk_request *brq = &mqrq->brq;
2844 struct request *req = mqrq->req;
2845 struct request *prq;
2846 struct mmc_blk_data *md = mq->data;
2847 struct mmc_packed *packed = mqrq->packed;
2848 bool do_rel_wr, do_data_tag;
Jiri Slaby3f2d2662016-10-03 10:58:28 +02002849 __le32 *packed_cmd_hdr;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002850 u8 hdr_blocks;
2851 u8 i = 1;
2852
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002853 mqrq->cmd_type = MMC_PACKED_WRITE;
2854 packed->blocks = 0;
2855 packed->idx_failure = MMC_PACKED_NR_IDX;
2856
2857 packed_cmd_hdr = packed->cmd_hdr;
2858 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002859 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
2860 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002861 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
2862
2863 /*
2864 * Argument for each entry of packed group
2865 */
2866 list_for_each_entry(prq, &packed->list, queuelist) {
2867 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
2868 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2869 (prq->cmd_flags & REQ_META) &&
2870 (rq_data_dir(prq) == WRITE) &&
Adrian Hunterd806b462016-06-10 16:22:16 +03002871 blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002872 /* Argument of CMD23 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002873 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002874 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
2875 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002876 blk_rq_sectors(prq));
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002877 /* Argument of CMD18 or CMD25 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002878 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002879 mmc_card_blockaddr(card) ?
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002880 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002881 packed->blocks += blk_rq_sectors(prq);
2882 i++;
2883 }
2884
2885 memset(brq, 0, sizeof(struct mmc_blk_request));
2886 brq->mrq.cmd = &brq->cmd;
2887 brq->mrq.data = &brq->data;
2888 brq->mrq.sbc = &brq->sbc;
2889 brq->mrq.stop = &brq->stop;
2890
2891 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2892 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2893 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2894
2895 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2896 brq->cmd.arg = blk_rq_pos(req);
2897 if (!mmc_card_blockaddr(card))
2898 brq->cmd.arg <<= 9;
2899 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2900
2901 brq->data.blksz = 512;
2902 brq->data.blocks = packed->blocks + hdr_blocks;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002903 brq->data.flags = MMC_DATA_WRITE;
Asutosh Dasf0665412012-07-27 18:10:19 +05302904 brq->data.fault_injected = false;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002905
2906 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2907 brq->stop.arg = 0;
2908 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2909
2910 mmc_set_data_timeout(&brq->data, card);
2911
2912 brq->data.sg = mqrq->sg;
2913 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2914
2915 mqrq->mmc_active.mrq = &brq->mrq;
Tatyana Brokhman71aefb82012-10-09 13:50:56 +02002916
2917 /*
2918 * This is intended for packed commands tests usage - in case these
2919 * functions are not in use the respective pointers are NULL
2920 */
2921 if (mq->err_check_fn)
2922 mqrq->mmc_active.err_check = mq->err_check_fn;
2923 else
2924 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2925
2926 if (mq->packed_test_fn)
2927 mq->packed_test_fn(mq->queue, mqrq);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002928
2929 mmc_queue_bounce_pre(mqrq);
2930}
2931
Adrian Hunter67716322011-08-29 16:42:15 +03002932static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2933 struct mmc_blk_request *brq, struct request *req,
2934 int ret)
2935{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002936 struct mmc_queue_req *mq_rq;
2937 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2938
Adrian Hunter67716322011-08-29 16:42:15 +03002939 /*
2940 * If this is an SD card and we're writing, we can first
2941 * mark the known good sectors as ok.
2942 *
2943 * If the card is not SD, we can still ok written sectors
2944 * as reported by the controller (which might be less than
2945 * the real number of written sectors, but never more).
2946 */
2947 if (mmc_card_sd(card)) {
2948 u32 blocks;
Asutosh Dasf0665412012-07-27 18:10:19 +05302949 if (!brq->data.fault_injected) {
2950 blocks = mmc_sd_num_wr_blocks(card);
2951 if (blocks != (u32)-1)
2952 ret = blk_end_request(req, 0, blocks << 9);
2953 } else
2954 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002955 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002956 if (!mmc_packed_cmd(mq_rq->cmd_type))
2957 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002958 }
2959 return ret;
2960}
2961
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002962static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2963{
2964 struct request *prq;
2965 struct mmc_packed *packed = mq_rq->packed;
2966 int idx = packed->idx_failure, i = 0;
2967 int ret = 0;
2968
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002969 while (!list_empty(&packed->list)) {
2970 prq = list_entry_rq(packed->list.next);
2971 if (idx == i) {
2972 /* retry from error index */
2973 packed->nr_entries -= idx;
2974 mq_rq->req = prq;
2975 ret = 1;
2976
2977 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2978 list_del_init(&prq->queuelist);
2979 mmc_blk_clear_packed(mq_rq);
2980 }
2981 return ret;
2982 }
2983 list_del_init(&prq->queuelist);
2984 blk_end_request(prq, 0, blk_rq_bytes(prq));
2985 i++;
2986 }
2987
2988 mmc_blk_clear_packed(mq_rq);
2989 return ret;
2990}
2991
2992static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2993{
2994 struct request *prq;
2995 struct mmc_packed *packed = mq_rq->packed;
2996
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002997 while (!list_empty(&packed->list)) {
2998 prq = list_entry_rq(packed->list.next);
2999 list_del_init(&prq->queuelist);
3000 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
3001 }
3002
3003 mmc_blk_clear_packed(mq_rq);
3004}
3005
3006static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
3007 struct mmc_queue_req *mq_rq)
3008{
3009 struct request *prq;
3010 struct request_queue *q = mq->queue;
3011 struct mmc_packed *packed = mq_rq->packed;
3012
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003013 while (!list_empty(&packed->list)) {
3014 prq = list_entry_rq(packed->list.prev);
3015 if (prq->queuelist.prev != &packed->list) {
3016 list_del_init(&prq->queuelist);
3017 spin_lock_irq(q->queue_lock);
3018 blk_requeue_request(mq->queue, prq);
3019 spin_unlock_irq(q->queue_lock);
3020 } else {
3021 list_del_init(&prq->queuelist);
3022 }
3023 }
3024
3025 mmc_blk_clear_packed(mq_rq);
3026}
3027
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003028static int mmc_blk_cmdq_start_req(struct mmc_host *host,
3029 struct mmc_cmdq_req *cmdq_req)
3030{
3031 struct mmc_request *mrq = &cmdq_req->mrq;
3032
3033 mrq->done = mmc_blk_cmdq_req_done;
3034 return mmc_cmdq_start_req(host, cmdq_req);
3035}
3036
Asutosh Das5238e022015-04-23 16:00:45 +05303037/* prepare for non-data commands */
3038static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
3039 struct mmc_queue_req *mqrq, struct mmc_queue *mq)
3040{
3041 struct request *req = mqrq->req;
3042 struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req;
3043
3044 memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
3045
3046 cmdq_req->mrq.data = NULL;
3047 cmdq_req->cmd_flags = req->cmd_flags;
3048 cmdq_req->mrq.req = mqrq->req;
3049 req->special = mqrq;
3050 cmdq_req->cmdq_req_flags |= DCMD;
3051 cmdq_req->mrq.cmdq_req = cmdq_req;
3052
3053 return &mqrq->cmdq_req;
3054}
3055
3056
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003057#define IS_RT_CLASS_REQ(x) \
3058 (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
3059
3060static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
3061 struct mmc_queue_req *mqrq, struct mmc_queue *mq)
3062{
3063 struct mmc_card *card = mq->card;
3064 struct request *req = mqrq->req;
3065 struct mmc_blk_data *md = mq->data;
3066 bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
3067 bool do_data_tag;
3068 bool read_dir = (rq_data_dir(req) == READ);
3069 bool prio = IS_RT_CLASS_REQ(req);
3070 struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
3071
3072 memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
3073
3074 cmdq_rq->tag = req->tag;
3075 if (read_dir) {
3076 cmdq_rq->cmdq_req_flags |= DIR;
3077 cmdq_rq->data.flags = MMC_DATA_READ;
3078 } else {
3079 cmdq_rq->data.flags = MMC_DATA_WRITE;
3080 }
3081 if (prio)
3082 cmdq_rq->cmdq_req_flags |= PRIO;
3083
3084 if (do_rel_wr)
3085 cmdq_rq->cmdq_req_flags |= REL_WR;
3086
3087 cmdq_rq->data.blocks = blk_rq_sectors(req);
3088 cmdq_rq->blk_addr = blk_rq_pos(req);
3089 cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
3090
3091 mmc_set_data_timeout(&cmdq_rq->data, card);
3092
3093 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
3094 (req->cmd_flags & REQ_META) &&
3095 (rq_data_dir(req) == WRITE) &&
3096 ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
3097 card->ext_csd.data_tag_unit_size);
3098 if (do_data_tag)
3099 cmdq_rq->cmdq_req_flags |= DAT_TAG;
3100 cmdq_rq->data.sg = mqrq->sg;
3101 cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
3102
3103 /*
3104 * Adjust the sg list so it is the same size as the
3105 * request.
3106 */
3107 if (cmdq_rq->data.blocks > card->host->max_blk_count)
3108 cmdq_rq->data.blocks = card->host->max_blk_count;
3109
3110 if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
3111 int i, data_size = cmdq_rq->data.blocks << 9;
3112 struct scatterlist *sg;
3113
3114 for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
3115 data_size -= sg->length;
3116 if (data_size <= 0) {
3117 sg->length += data_size;
3118 i++;
3119 break;
3120 }
3121 }
3122 cmdq_rq->data.sg_len = i;
3123 }
3124
3125 mqrq->cmdq_req.cmd_flags = req->cmd_flags;
3126 mqrq->cmdq_req.mrq.req = mqrq->req;
3127 mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
3128 mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
3129 mqrq->req->special = mqrq;
3130
3131 pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
3132 mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
3133 mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
3134 cmdq_rq, cmdq_rq->blk_addr,
3135 (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
3136
3137 return &mqrq->cmdq_req;
3138}
3139
3140static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
3141{
3142 struct mmc_queue_req *active_mqrq;
3143 struct mmc_card *card = mq->card;
3144 struct mmc_host *host = card->host;
3145 struct mmc_cmdq_req *mc_rq;
3146 int ret = 0;
3147
3148 BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
3149 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
3150
3151 active_mqrq = &mq->mqrq_cmdq[req->tag];
3152 active_mqrq->req = req;
3153
3154 mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
3155
3156 ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
3157 return ret;
3158}
3159
Asutosh Das5238e022015-04-23 16:00:45 +05303160/*
3161 * Issues a flush (dcmd) request
3162 */
3163int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req)
3164{
3165 int err;
3166 struct mmc_queue_req *active_mqrq;
3167 struct mmc_card *card = mq->card;
3168 struct mmc_host *host;
3169 struct mmc_cmdq_req *cmdq_req;
3170 struct mmc_cmdq_context_info *ctx_info;
3171
3172 BUG_ON(!card);
3173 host = card->host;
3174 BUG_ON(!host);
3175 BUG_ON(req->tag > card->ext_csd.cmdq_depth);
3176 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
3177
3178 ctx_info = &host->cmdq_ctx;
3179
3180 set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
3181
3182 active_mqrq = &mq->mqrq_cmdq[req->tag];
3183 active_mqrq->req = req;
3184
3185 cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
3186 cmdq_req->cmdq_req_flags |= QBR;
3187 cmdq_req->mrq.cmd = &cmdq_req->cmd;
3188 cmdq_req->tag = req->tag;
3189
3190 err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd);
3191 if (err) {
3192 pr_err("%s: failed (%d) preparing flush req\n",
3193 mmc_hostname(host), err);
3194 return err;
3195 }
3196 err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
3197 return err;
3198}
3199EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
3200
Asutosh Das02e30862015-05-20 16:52:04 +05303201static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
3202{
3203 if (!host->cmdq_ops->reset)
3204 return;
3205
3206 if (!test_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state)) {
3207 if (mmc_cmdq_halt(host, true)) {
3208 pr_err("%s: halt failed\n", mmc_hostname(host));
3209 goto reset;
3210 }
3211 }
3212
3213 if (clear_all)
3214 mmc_cmdq_discard_queue(host, 0);
3215reset:
3216 mmc_hw_reset(host);
Ritesh Harjanib431b3f2015-05-19 14:27:34 +05303217 mmc_host_clk_hold(host);
Asutosh Das02e30862015-05-20 16:52:04 +05303218 host->cmdq_ops->reset(host, true);
Ritesh Harjanib431b3f2015-05-19 14:27:34 +05303219 mmc_host_clk_release(host);
Asutosh Das02e30862015-05-20 16:52:04 +05303220 clear_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
3221}
3222
Asutosh Dasa0ba4922015-04-23 16:01:57 +05303223static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq)
3224{
3225 int err;
3226 struct mmc_card *card = mq->card;
3227 struct mmc_host *host = card->host;
3228
3229 err = mmc_cmdq_halt(host, true);
3230 if (err) {
3231 pr_err("%s: halt: failed: %d\n", __func__, err);
3232 return;
3233 }
3234
3235 mmc_claim_host(card->host);
3236 /* disable CQ mode in card */
3237 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
3238 EXT_CSD_CMDQ, 0,
3239 card->ext_csd.generic_cmd6_time);
3240 if (err) {
3241 pr_err("%s: failed to switch card to legacy mode: %d\n",
3242 __func__, err);
3243 goto out;
3244 } else {
3245 host->card->cmdq_init = false;
3246 }
3247out:
3248 mmc_release_host(card->host);
3249}
3250
Asutosh Dasfa8836b2015-03-02 23:14:05 +05303251static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req)
3252{
3253 struct mmc_queue *mq = req->q->queuedata;
3254 struct mmc_host *host = mq->card->host;
3255 struct mmc_queue_req *mq_rq = req->special;
3256 struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
3257 struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
3258
3259 host->cmdq_ops->dumpstate(host);
3260 if (cmdq_req->cmdq_req_flags & DCMD)
3261 mrq->cmd->error = -ETIMEDOUT;
3262 else
3263 mrq->data->error = -ETIMEDOUT;
3264
3265 host->err_mrq = mrq;
3266 mrq->done(mrq);
3267
3268 return BLK_EH_NOT_HANDLED;
3269}
3270
Asutosh Das02e30862015-05-20 16:52:04 +05303271static void mmc_blk_cmdq_err(struct mmc_queue *mq)
3272{
3273 int err;
3274 int retry = 0;
3275 int gen_err;
3276 u32 status;
3277
3278 struct mmc_host *host = mq->card->host;
3279 struct mmc_request *mrq = host->err_mrq;
3280 struct mmc_card *card = mq->card;
3281 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
3282
3283 err = mmc_cmdq_halt(host, true);
3284 if (err) {
3285 pr_err("halt: failed: %d\n", err);
3286 goto reset;
3287 }
3288
3289 /* RED error - Fatal: requires reset */
3290 if (mrq->cmdq_req->resp_err) {
3291 pr_crit("%s: Response error detected: Device in bad state\n",
3292 mmc_hostname(host));
3293 blk_end_request_all(mrq->req, -EIO);
3294 goto reset;
3295 }
3296
3297 if (mrq->data->error) {
3298 blk_end_request_all(mrq->req, mrq->data->error);
3299 for (; retry < MAX_RETRIES; retry++) {
3300 err = get_card_status(card, &status, 0);
3301 if (!err)
3302 break;
3303 }
3304
3305 if (err) {
3306 pr_err("%s: No response from card !!!\n",
3307 mmc_hostname(host));
3308 goto reset;
3309 }
3310
3311 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
3312 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
3313 err = send_stop(card, MMC_CMDQ_STOP_TIMEOUT_MS,
3314 mrq->req, &gen_err, &status);
3315 if (err) {
3316 pr_err("%s: error %d sending stop (%d) command\n",
3317 mrq->req->rq_disk->disk_name,
3318 err, status);
3319 goto reset;
3320 }
3321 }
3322
3323 if (mmc_cmdq_discard_queue(host, mrq->req->tag))
3324 goto reset;
3325 else
3326 goto unhalt;
3327 }
3328
3329 /* DCMD commands */
3330 if (mrq->cmd->error)
3331 blk_end_request_all(mrq->req, mrq->cmd->error);
3332
3333reset:
3334 spin_lock_irq(mq->queue->queue_lock);
3335 blk_queue_invalidate_tags(mrq->req->q);
3336 spin_unlock_irq(mq->queue->queue_lock);
3337 mmc_blk_cmdq_reset(host, true);
3338 goto out;
3339
3340unhalt:
3341 mmc_cmdq_halt(host, false);
3342
3343out:
3344 if (test_and_clear_bit(0, &ctx_info->req_starved))
3345 blk_run_queue(mrq->req->q);
3346}
3347
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003348/* invoked by block layer in softirq context */
3349void mmc_blk_cmdq_complete_rq(struct request *rq)
3350{
3351 struct mmc_queue_req *mq_rq = rq->special;
3352 struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
3353 struct mmc_host *host = mrq->host;
3354 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
3355 struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
3356 struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
3357 int err = 0;
3358
3359 if (mrq->cmd && mrq->cmd->error)
3360 err = mrq->cmd->error;
3361 else if (mrq->data && mrq->data->error)
3362 err = mrq->data->error;
3363
Asutosh Das02e30862015-05-20 16:52:04 +05303364 /* clear pending request */
3365 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
3366 &ctx_info->active_reqs));
3367
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003368 mmc_cmdq_post_req(host, mrq, err);
3369 if (err) {
3370 pr_err("%s: %s: txfr error: %d\n", mmc_hostname(mrq->host),
3371 __func__, err);
Asutosh Das02e30862015-05-20 16:52:04 +05303372 if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
3373 pr_err("%s: CQ in error state, ending current req: %d\n",
3374 __func__, err);
3375 blk_end_request_all(rq, err);
3376 } else {
3377 set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
3378 schedule_work(&mq->cmdq_err_work);
3379 }
3380 goto out;
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003381 }
3382
Asutosh Das5238e022015-04-23 16:00:45 +05303383 if (cmdq_req->cmdq_req_flags & DCMD) {
3384 clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
Asutosh Das02e30862015-05-20 16:52:04 +05303385 blk_end_request_all(rq, err);
Asutosh Das5238e022015-04-23 16:00:45 +05303386 goto out;
3387 }
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003388
3389 blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
3390
Asutosh Das5238e022015-04-23 16:00:45 +05303391out:
Asutosh Das02e30862015-05-20 16:52:04 +05303392 if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state) &&
3393 test_and_clear_bit(0, &ctx_info->req_starved))
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003394 blk_run_queue(mq->queue);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003395 mmc_release_host(host);
Asutosh Dasa0ba4922015-04-23 16:01:57 +05303396
3397 if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
3398 complete(&mq->cmdq_shutdown_complete);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003399 return;
3400}
3401
3402/*
3403 * Complete reqs from block layer softirq context
3404 * Invoked in irq context
3405 */
3406void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
3407{
3408 struct request *req = mrq->req;
3409
Ritesh Harjanib431b3f2015-05-19 14:27:34 +05303410 mmc_host_clk_release(mrq->host);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003411 blk_complete_request(req);
3412}
3413EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
3414
Per Forlinee8a43a2011-07-01 18:55:33 +02003415static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02003416{
3417 struct mmc_blk_data *md = mq->data;
3418 struct mmc_card *card = md->queue.card;
3419 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunterb8360a42015-05-07 13:10:24 +03003420 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02003421 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02003422 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05303423 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02003424 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003425 const u8 packed_nr = 2;
3426 u8 reqs = 0;
Mark Salyzyn6904e432016-01-28 11:12:25 -08003427#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3428 unsigned long waitfor = jiffies;
3429#endif
Per Forlinee8a43a2011-07-01 18:55:33 +02003430
3431 if (!rqc && !mq->mqrq_prev->req)
3432 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02003433
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003434 if (rqc)
3435 reqs = mmc_blk_prep_packed_list(mq, rqc);
3436
Per Forlin54d49d72011-07-01 18:55:29 +02003437 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02003438 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05303439 /*
3440 * When 4KB native sector is enabled, only 8 blocks
3441 * multiple read or write is allowed
3442 */
Yuan, Juntaoe87c8562016-05-13 07:59:24 +00003443 if (mmc_large_sector(card) &&
3444 !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05303445 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
3446 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003447 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05303448 goto cmd_abort;
3449 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003450
3451 if (reqs >= packed_nr)
3452 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
3453 card, mq);
3454 else
3455 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02003456 areq = &mq->mqrq_cur->mmc_active;
3457 } else
3458 areq = NULL;
3459 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003460 if (!areq) {
3461 if (status == MMC_BLK_NEW_REQUEST)
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003462 set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Per Forlinee8a43a2011-07-01 18:55:33 +02003463 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003464 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02003465
Per Forlinee8a43a2011-07-01 18:55:33 +02003466 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
3467 brq = &mq_rq->brq;
3468 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03003469 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02003470 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02003471
Per Forlind78d4a82011-07-01 18:55:30 +02003472 switch (status) {
3473 case MMC_BLK_SUCCESS:
3474 case MMC_BLK_PARTIAL:
3475 /*
3476 * A block was successfully transferred.
3477 */
Adrian Hunter67716322011-08-29 16:42:15 +03003478 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003479
Mark Salyzyn6904e432016-01-28 11:12:25 -08003480 mmc_blk_simulate_delay(mq, rqc, waitfor);
3481
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003482 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3483 ret = mmc_blk_end_packed_req(mq_rq);
3484 break;
3485 } else {
3486 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02003487 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003488 }
3489
Adrian Hunter67716322011-08-29 16:42:15 +03003490 /*
3491 * If the blk_end_request function returns non-zero even
3492 * though all data has been transferred and no errors
3493 * were returned by the host controller, it's a bug.
3494 */
Per Forlinee8a43a2011-07-01 18:55:33 +02003495 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05303496 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02003497 __func__, blk_rq_bytes(req),
3498 brq->data.bytes_xfered);
3499 rqc = NULL;
3500 goto cmd_abort;
3501 }
Per Forlind78d4a82011-07-01 18:55:30 +02003502 break;
3503 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03003504 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
Ding Wang29535f72015-05-18 20:14:15 +08003505 if (mmc_blk_reset(md, card->host, type))
3506 goto cmd_abort;
3507 if (!ret)
3508 goto start_new_req;
3509 break;
Per Forlind78d4a82011-07-01 18:55:30 +02003510 case MMC_BLK_RETRY:
Adrian Hunterb8360a42015-05-07 13:10:24 +03003511 retune_retry_done = brq->retune_retry_done;
Maya Erezf93ca0a2014-12-09 23:34:41 +02003512 if (retry++ < MMC_BLK_MAX_RETRIES)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01003513 break;
Adrian Hunter67716322011-08-29 16:42:15 +03003514 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02003515 case MMC_BLK_ABORT:
Maya Erezf93ca0a2014-12-09 23:34:41 +02003516 if (!mmc_blk_reset(md, card->host, type) &&
3517 (retry++ < (MMC_BLK_MAX_RETRIES + 1)))
Adrian Hunter67716322011-08-29 16:42:15 +03003518 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01003519 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03003520 case MMC_BLK_DATA_ERR: {
3521 int err;
3522
3523 err = mmc_blk_reset(md, card->host, type);
3524 if (!err)
3525 break;
Sahitya Tummalad0a19842014-10-31 09:46:20 +05303526 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03003527 }
3528 case MMC_BLK_ECC_ERR:
3529 if (brq->data.blocks > 1) {
3530 /* Redo read one sector at a time */
Joe Perches66061102014-09-12 14:56:56 -07003531 pr_warn("%s: retrying using single block read\n",
3532 req->rq_disk->disk_name);
Adrian Hunter67716322011-08-29 16:42:15 +03003533 disable_multi = 1;
3534 break;
3535 }
Per Forlind78d4a82011-07-01 18:55:30 +02003536 /*
3537 * After an error, we redo I/O one sector at a
3538 * time, so we only reach here after trying to
3539 * read a single sector.
3540 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05303541 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02003542 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02003543 if (!ret)
3544 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02003545 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05303546 case MMC_BLK_NOMEDIUM:
3547 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003548 default:
3549 pr_err("%s: Unhandled return value (%d)",
3550 req->rq_disk->disk_name, status);
3551 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01003552 }
3553
Per Forlinee8a43a2011-07-01 18:55:33 +02003554 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003555 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3556 if (!mq_rq->packed->retries)
3557 goto cmd_abort;
3558 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
3559 mmc_start_req(card->host,
3560 &mq_rq->mmc_active, NULL);
3561 } else {
3562
3563 /*
3564 * In case of a incomplete request
3565 * prepare it again and resend.
3566 */
3567 mmc_blk_rw_rq_prep(mq_rq, card,
3568 disable_multi, mq);
3569 mmc_start_req(card->host,
3570 &mq_rq->mmc_active, NULL);
3571 }
Adrian Hunterb8360a42015-05-07 13:10:24 +03003572 mq_rq->brq.retune_retry_done = retune_retry_done;
Per Forlinee8a43a2011-07-01 18:55:33 +02003573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 } while (ret);
3575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 return 1;
3577
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01003578 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003579 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3580 mmc_blk_abort_packed_req(mq_rq);
3581 } else {
3582 if (mmc_card_removed(card))
3583 req->cmd_flags |= REQ_QUIET;
3584 while (ret)
3585 ret = blk_end_request(req, -EIO,
3586 blk_rq_cur_bytes(req));
3587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588
Per Forlinee8a43a2011-07-01 18:55:33 +02003589 start_new_req:
3590 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09003591 if (mmc_card_removed(card)) {
3592 rqc->cmd_flags |= REQ_QUIET;
3593 blk_end_request_all(rqc, -EIO);
3594 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003595 /*
3596 * If current request is packed, it needs to put back.
3597 */
3598 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
3599 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
3600
Seungwon Jeon7a819022013-01-22 19:48:07 +09003601 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
3602 mmc_start_req(card->host,
3603 &mq->mqrq_cur->mmc_active, NULL);
3604 }
Per Forlinee8a43a2011-07-01 18:55:33 +02003605 }
3606
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 return 0;
3608}
3609
Asutosh Das8b594832015-04-23 09:55:43 +05303610static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
3611 struct mmc_blk_data *md)
3612{
3613 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
3614 struct mmc_host *host = card->host;
3615 struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
3616 u8 part_config = card->ext_csd.part_config;
3617
3618 if ((main_md->part_curr == md->part_type) &&
3619 (card->part_curr == md->part_type))
3620 return 0;
3621
3622 WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) &&
3623 card->ext_csd.cmdq_support &&
3624 (md->flags & MMC_BLK_CMD_QUEUE)));
3625
3626 if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state))
3627 WARN_ON(mmc_cmdq_halt(host, true));
3628
3629 /* disable CQ mode in card */
3630 if (mmc_card_cmdq(card)) {
3631 WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
3632 EXT_CSD_CMDQ, 0,
3633 card->ext_csd.generic_cmd6_time));
3634 mmc_card_clr_cmdq(card);
3635 }
3636
3637 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
3638 part_config |= md->part_type;
3639
3640 WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
3641 EXT_CSD_PART_CONFIG, part_config,
3642 card->ext_csd.part_time));
3643
3644 card->ext_csd.part_config = part_config;
3645 card->part_curr = md->part_type;
3646
3647 main_md->part_curr = md->part_type;
3648
3649 WARN_ON(mmc_blk_cmdq_switch(card, md, true));
3650 WARN_ON(mmc_cmdq_halt(host, false));
3651
3652 return 0;
3653}
3654
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003655static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
3656{
3657 int ret;
3658 struct mmc_blk_data *md = mq->data;
3659 struct mmc_card *card = md->queue.card;
3660
3661 mmc_claim_host(card->host);
Asutosh Das8b594832015-04-23 09:55:43 +05303662 ret = mmc_blk_cmdq_part_switch(card, md);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003663 if (ret) {
3664 pr_err("%s: %s: partition switch failed %d\n",
3665 md->disk->disk_name, __func__, ret);
Asutosh Das5238e022015-04-23 16:00:45 +05303666 if (req)
3667 blk_end_request_all(req, ret);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003668 mmc_release_host(card->host);
3669 goto switch_failure;
3670 }
3671
Asutosh Das5238e022015-04-23 16:00:45 +05303672 if (req) {
Sahitya Tummala9433a132015-06-09 09:38:36 +05303673 if (req_op(req) == REQ_OP_DISCARD) {
3674 ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
3675 } else if (req_op(req) == REQ_OP_SECURE_ERASE) {
3676 if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3677 ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req);
3678 else
3679 ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
3680 } else if (req_op(req) == REQ_OP_FLUSH) {
Asutosh Das5238e022015-04-23 16:00:45 +05303681 ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
Sahitya Tummala9433a132015-06-09 09:38:36 +05303682 } else {
Asutosh Das5238e022015-04-23 16:00:45 +05303683 ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
Sahitya Tummala9433a132015-06-09 09:38:36 +05303684 }
Asutosh Das5238e022015-04-23 16:00:45 +05303685 }
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003686
3687switch_failure:
3688 return ret;
3689}
3690
Linus Walleij29eb7bd2016-09-20 11:34:38 +02003691int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
Adrian Hunterbd788c92010-08-11 14:17:47 -07003692{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003693 int ret;
3694 struct mmc_blk_data *md = mq->data;
3695 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003696 struct mmc_host *host = card->host;
3697 unsigned long flags;
Sahitya Tummala61868a42015-05-28 16:54:19 +05303698 unsigned int cmd_flags = req ? req->cmd_flags : 0;
Adrian Hunter869c5542016-08-25 14:11:43 -06003699 bool req_is_special = mmc_req_is_special(req);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003700
Per Forlinee8a43a2011-07-01 18:55:33 +02003701 if (req && !mq->mqrq_prev->req)
3702 /* claim host only for the first request */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003703 mmc_get_card(card);
Per Forlinee8a43a2011-07-01 18:55:33 +02003704
Andrei Warkentin371a6892011-04-11 18:10:25 -05003705 ret = mmc_blk_part_switch(card, md);
3706 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03003707 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05303708 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03003709 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003710 ret = 0;
3711 goto out;
3712 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003713
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003714 mmc_blk_write_packing_control(mq, req);
3715
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003716 clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Mike Christiec2df40d2016-06-05 14:32:17 -05003717 if (req && req_op(req) == REQ_OP_DISCARD) {
Per Forlinee8a43a2011-07-01 18:55:33 +02003718 /* complete ongoing async transfer before issuing discard */
3719 if (card->host->areq)
3720 mmc_blk_issue_rw_rq(mq, NULL);
Christoph Hellwig288dab82016-06-09 16:00:36 +02003721 ret = mmc_blk_issue_discard_rq(mq, req);
3722 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
3723 /* complete ongoing async transfer before issuing secure erase*/
3724 if (card->host->areq)
3725 mmc_blk_issue_rw_rq(mq, NULL);
Maya Erez0c0609f2014-12-09 23:31:55 +02003726 if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3727 ret = mmc_blk_issue_secdiscard_rq(mq, req);
3728 else
3729 ret = mmc_blk_issue_discard_rq(mq, req);
Sahitya Tummala61868a42015-05-28 16:54:19 +05303730 } else if ((req && req_op(req) == REQ_OP_FLUSH) ||
3731 (cmd_flags & REQ_BARRIER)) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09003732 /* complete ongoing async transfer before issuing flush */
3733 if (card->host->areq)
3734 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003735 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003736 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003737 if (!req && host->areq) {
3738 spin_lock_irqsave(&host->context_info.lock, flags);
3739 host->context_info.is_waiting_last_req = true;
3740 spin_unlock_irqrestore(&host->context_info.lock, flags);
3741 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003742 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003743 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003744
Andrei Warkentin371a6892011-04-11 18:10:25 -05003745out:
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003746 if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
3747 req_is_special)
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09003748 /*
3749 * Release host when there are no more requests
3750 * and after special request(discard, flush) is done.
3751 * In case sepecial request, there is no reentry to
3752 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
3753 */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003754 mmc_put_card(card);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003755 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07003756}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757
Russell Kinga6f6c962006-01-03 22:38:44 +00003758static inline int mmc_blk_readonly(struct mmc_card *card)
3759{
3760 return mmc_card_readonly(card) ||
3761 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
3762}
3763
Andrei Warkentin371a6892011-04-11 18:10:25 -05003764static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3765 struct device *parent,
3766 sector_t size,
3767 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003768 const char *subname,
3769 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770{
3771 struct mmc_blk_data *md;
3772 int devidx, ret;
3773
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003774again:
3775 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
3776 return ERR_PTR(-ENOMEM);
3777
3778 spin_lock(&mmc_blk_lock);
3779 ret = ida_get_new(&mmc_blk_ida, &devidx);
3780 spin_unlock(&mmc_blk_lock);
3781
3782 if (ret == -EAGAIN)
3783 goto again;
3784 else if (ret)
3785 return ERR_PTR(ret);
3786
3787 if (devidx >= max_devices) {
3788 ret = -ENOSPC;
3789 goto out;
3790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07003792 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00003793 if (!md) {
3794 ret = -ENOMEM;
3795 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796 }
Russell Kinga6f6c962006-01-03 22:38:44 +00003797
Johan Rudholmadd710e2011-12-02 08:51:06 +01003798 md->area_type = area_type;
3799
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003800 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00003801 * Set the read-only status based on the supported commands
3802 * and the write protect switch.
3803 */
3804 md->read_only = mmc_blk_readonly(card);
3805
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003806 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00003807 if (md->disk == NULL) {
3808 ret = -ENOMEM;
3809 goto err_kfree;
3810 }
3811
3812 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003813 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00003814 md->usage = 1;
3815
Asutosh Das963469b2015-05-21 13:29:51 +05303816 ret = mmc_init_queue(&md->queue, card, &md->lock, subname, area_type);
Russell Kinga6f6c962006-01-03 22:38:44 +00003817 if (ret)
3818 goto err_putdisk;
3819
Russell Kinga6f6c962006-01-03 22:38:44 +00003820 md->queue.data = md;
3821
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003822 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003823 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00003824 md->disk->fops = &mmc_bdops;
3825 md->disk->private_data = md;
3826 md->disk->queue = md->queue.queue;
Dan Williams307d8e62016-06-20 10:40:44 -07003827 md->parent = parent;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003828 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Cross382c55f2015-10-22 10:00:41 -07003829 md->disk->flags = GENHD_FL_EXT_DEVT;
Ulf Hanssonf5b4d712014-09-03 11:02:23 +02003830 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
Loic Pallardy53d8f972012-08-06 17:12:28 +02003831 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00003832
3833 /*
3834 * As discussed on lkml, GENHD_FL_REMOVABLE should:
3835 *
3836 * - be set for removable media with permanent block devices
3837 * - be unset for removable block devices with permanent media
3838 *
3839 * Since MMC block devices clearly fall under the second
3840 * case, we do not set GENHD_FL_REMOVABLE. Userspace
3841 * should use the block device creation/destruction hotplug
3842 * messages to tell when the card is present.
3843 */
3844
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003845 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
Ulf Hansson9aaf3432016-04-06 16:12:08 +02003846 "mmcblk%u%s", card->host->index, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00003847
Saugata Dasa5075eb2012-05-17 16:32:21 +05303848 if (mmc_card_mmc(card))
3849 blk_queue_logical_block_size(md->queue.queue,
3850 card->ext_csd.data_sector_size);
3851 else
3852 blk_queue_logical_block_size(md->queue.queue, 512);
3853
Andrei Warkentin371a6892011-04-11 18:10:25 -05003854 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003855
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003856 if (mmc_host_cmd23(card->host)) {
Daniel Glöckner0ed50ab2016-08-30 14:17:30 +02003857 if ((mmc_card_mmc(card) &&
3858 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003859 (mmc_card_sd(card) &&
3860 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
3861 md->flags |= MMC_BLK_CMD23;
3862 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003863
3864 if (mmc_card_mmc(card) &&
3865 md->flags & MMC_BLK_CMD23 &&
3866 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
Asutosh Das5238e022015-04-23 16:00:45 +05303867 card->ext_csd.rel_sectors)) {
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003868 md->flags |= MMC_BLK_REL_WR;
Jens Axboee9d5c742016-03-30 10:17:20 -06003869 blk_queue_write_cache(md->queue.queue, true, true);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003870 }
3871
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003872 if (card->cmdq_init) {
3873 md->flags |= MMC_BLK_CMD_QUEUE;
3874 md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
3875 md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
Asutosh Das02e30862015-05-20 16:52:04 +05303876 md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
Asutosh Dasfa8836b2015-03-02 23:14:05 +05303877 md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out;
Asutosh Dasa0ba4922015-04-23 16:01:57 +05303878 md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown;
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003879 }
3880
3881 if (mmc_card_mmc(card) && !card->cmdq_init &&
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003882 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
3883 (md->flags & MMC_BLK_CMD23) &&
3884 card->ext_csd.packed_event_en) {
3885 if (!mmc_packed_init(&md->queue, card))
3886 md->flags |= MMC_BLK_PACKED_CMD;
3887 }
3888
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00003890
3891 err_putdisk:
3892 put_disk(md->disk);
3893 err_kfree:
3894 kfree(md);
3895 out:
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003896 spin_lock(&mmc_blk_lock);
3897 ida_remove(&mmc_blk_ida, devidx);
3898 spin_unlock(&mmc_blk_lock);
Russell Kinga6f6c962006-01-03 22:38:44 +00003899 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900}
3901
Andrei Warkentin371a6892011-04-11 18:10:25 -05003902static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
3903{
3904 sector_t size;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003905
3906 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
3907 /*
3908 * The EXT_CSD sector count is in number or 512 byte
3909 * sectors.
3910 */
3911 size = card->ext_csd.sectors;
3912 } else {
3913 /*
3914 * The CSD capacity field is in units of read_blkbits.
3915 * set_capacity takes units of 512 bytes.
3916 */
Kuninori Morimoto087de9e2015-05-11 07:35:28 +00003917 size = (typeof(sector_t))card->csd.capacity
3918 << (card->csd.read_blkbits - 9);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003919 }
3920
Tobias Klauser7a30f2a2015-01-21 15:56:44 +01003921 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003922 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003923}
3924
3925static int mmc_blk_alloc_part(struct mmc_card *card,
3926 struct mmc_blk_data *md,
3927 unsigned int part_type,
3928 sector_t size,
3929 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003930 const char *subname,
3931 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05003932{
3933 char cap_str[10];
3934 struct mmc_blk_data *part_md;
3935
3936 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003937 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003938 if (IS_ERR(part_md))
3939 return PTR_ERR(part_md);
3940 part_md->part_type = part_type;
3941 list_add(&part_md->part, &md->part);
3942
James Bottomleyb9f28d82015-03-05 18:47:01 -08003943 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
Andrei Warkentin371a6892011-04-11 18:10:25 -05003944 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05303945 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05003946 part_md->disk->disk_name, mmc_card_id(card),
3947 mmc_card_name(card), part_md->part_type, cap_str);
3948 return 0;
3949}
3950
Namjae Jeone0c368d2011-10-06 23:41:38 +09003951/* MMC Physical partitions consist of two boot partitions and
3952 * up to four general purpose partitions.
3953 * For each partition enabled in EXT_CSD a block device will be allocatedi
3954 * to provide access to the partition.
3955 */
3956
Andrei Warkentin371a6892011-04-11 18:10:25 -05003957static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
3958{
Namjae Jeone0c368d2011-10-06 23:41:38 +09003959 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003960
3961 if (!mmc_card_mmc(card))
3962 return 0;
3963
Namjae Jeone0c368d2011-10-06 23:41:38 +09003964 for (idx = 0; idx < card->nr_parts; idx++) {
3965 if (card->part[idx].size) {
3966 ret = mmc_blk_alloc_part(card, md,
3967 card->part[idx].part_cfg,
3968 card->part[idx].size >> 9,
3969 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003970 card->part[idx].name,
3971 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09003972 if (ret)
3973 return ret;
3974 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003975 }
3976
3977 return ret;
3978}
3979
Andrei Warkentin371a6892011-04-11 18:10:25 -05003980static void mmc_blk_remove_req(struct mmc_blk_data *md)
3981{
Johan Rudholmadd710e2011-12-02 08:51:06 +01003982 struct mmc_card *card;
3983
Andrei Warkentin371a6892011-04-11 18:10:25 -05003984 if (md) {
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003985 /*
3986 * Flush remaining requests and free queues. It
3987 * is freeing the queue that stops new requests
3988 * from being accepted.
3989 */
Franck Jullien8efb83a2013-07-24 15:17:48 +02003990 card = md->queue.card;
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003991 mmc_cleanup_queue(&md->queue);
3992 if (md->flags & MMC_BLK_PACKED_CMD)
3993 mmc_packed_clean(&md->queue);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003994 if (md->flags & MMC_BLK_CMD_QUEUE)
3995 mmc_cmdq_clean(&md->queue, card);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003996 device_remove_file(disk_to_dev(md->disk),
3997 &md->num_wr_reqs_to_start_packing);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003998 if (md->disk->flags & GENHD_FL_UP) {
3999 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01004000 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
4001 card->ext_csd.boot_ro_lockable)
4002 device_remove_file(disk_to_dev(md->disk),
4003 &md->power_ro_lock);
Mark Salyzyn6904e432016-01-28 11:12:25 -08004004#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
4005 device_remove_file(disk_to_dev(md->disk),
4006 &dev_attr_max_write_speed);
4007 device_remove_file(disk_to_dev(md->disk),
4008 &dev_attr_max_read_speed);
4009 device_remove_file(disk_to_dev(md->disk),
4010 &dev_attr_cache_size);
4011#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05004012
Andrei Warkentin371a6892011-04-11 18:10:25 -05004013 del_gendisk(md->disk);
4014 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05004015 mmc_blk_put(md);
4016 }
4017}
4018
4019static void mmc_blk_remove_parts(struct mmc_card *card,
4020 struct mmc_blk_data *md)
4021{
4022 struct list_head *pos, *q;
4023 struct mmc_blk_data *part_md;
4024
4025 list_for_each_safe(pos, q, &md->part) {
4026 part_md = list_entry(pos, struct mmc_blk_data, part);
4027 list_del(pos);
4028 mmc_blk_remove_req(part_md);
4029 }
4030}
4031
4032static int mmc_add_disk(struct mmc_blk_data *md)
4033{
4034 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01004035 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05004036
Dan Williams307d8e62016-06-20 10:40:44 -07004037 device_add_disk(md->parent, md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004038 md->force_ro.show = force_ro_show;
4039 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05304040 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004041 md->force_ro.attr.name = "force_ro";
4042 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
4043 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
4044 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01004045 goto force_ro_fail;
Mark Salyzyn6904e432016-01-28 11:12:25 -08004046#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
4047 atomic_set(&md->queue.max_write_speed, max_write_speed);
4048 ret = device_create_file(disk_to_dev(md->disk),
4049 &dev_attr_max_write_speed);
4050 if (ret)
4051 goto max_write_speed_fail;
4052 atomic_set(&md->queue.max_read_speed, max_read_speed);
4053 ret = device_create_file(disk_to_dev(md->disk),
4054 &dev_attr_max_read_speed);
4055 if (ret)
4056 goto max_read_speed_fail;
4057 atomic_set(&md->queue.cache_size, cache_size);
4058 atomic_long_set(&md->queue.cache_used, 0);
4059 md->queue.cache_jiffies = jiffies;
4060 ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
4061 if (ret)
4062 goto cache_size_fail;
4063#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01004064
4065 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
4066 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04004067 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01004068
4069 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
4070 mode = S_IRUGO;
4071 else
4072 mode = S_IRUGO | S_IWUSR;
4073
4074 md->power_ro_lock.show = power_ro_lock_show;
4075 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01004076 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01004077 md->power_ro_lock.attr.mode = mode;
4078 md->power_ro_lock.attr.name =
4079 "ro_lock_until_next_power_on";
4080 ret = device_create_file(disk_to_dev(md->disk),
4081 &md->power_ro_lock);
4082 if (ret)
4083 goto power_ro_lock_fail;
4084 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02004085
4086 md->num_wr_reqs_to_start_packing.show =
4087 num_wr_reqs_to_start_packing_show;
4088 md->num_wr_reqs_to_start_packing.store =
4089 num_wr_reqs_to_start_packing_store;
4090 sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
4091 md->num_wr_reqs_to_start_packing.attr.name =
4092 "num_wr_reqs_to_start_packing";
4093 md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
4094 ret = device_create_file(disk_to_dev(md->disk),
4095 &md->num_wr_reqs_to_start_packing);
4096 if (ret)
Maya Erez17022402014-12-04 00:15:42 +02004097 goto num_wr_reqs_to_start_packing_fail;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02004098
Maya Erez5a8dae12014-12-04 15:13:59 +02004099 md->no_pack_for_random.show = no_pack_for_random_show;
4100 md->no_pack_for_random.store = no_pack_for_random_store;
4101 sysfs_attr_init(&md->no_pack_for_random.attr);
4102 md->no_pack_for_random.attr.name = "no_pack_for_random";
4103 md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
4104 ret = device_create_file(disk_to_dev(md->disk),
4105 &md->no_pack_for_random);
4106 if (ret)
4107 goto no_pack_for_random_fails;
4108
Johan Rudholmadd710e2011-12-02 08:51:06 +01004109 return ret;
4110
Maya Erez5a8dae12014-12-04 15:13:59 +02004111no_pack_for_random_fails:
4112 device_remove_file(disk_to_dev(md->disk),
4113 &md->num_wr_reqs_to_start_packing);
Maya Erez17022402014-12-04 00:15:42 +02004114num_wr_reqs_to_start_packing_fail:
4115 device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
Johan Rudholmadd710e2011-12-02 08:51:06 +01004116power_ro_lock_fail:
Mark Salyzyn6904e432016-01-28 11:12:25 -08004117#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
4118 device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
4119cache_size_fail:
4120 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
4121max_read_speed_fail:
4122 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
4123max_write_speed_fail:
4124#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01004125 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
4126force_ro_fail:
4127 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004128
4129 return ret;
4130}
4131
Andrei Warkentin6f60c222011-04-11 19:11:04 -04004132static const struct mmc_fixup blk_fixups[] =
4133{
Chris Ballc59d4472011-11-11 22:01:43 -05004134 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
4135 MMC_QUIRK_INAND_CMD38),
4136 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
4137 MMC_QUIRK_INAND_CMD38),
4138 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
4139 MMC_QUIRK_INAND_CMD38),
4140 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
4141 MMC_QUIRK_INAND_CMD38),
4142 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
4143 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004144
4145 /*
4146 * Some MMC cards experience performance degradation with CMD23
4147 * instead of CMD12-bounded multiblock transfers. For now we'll
4148 * black list what's bad...
4149 * - Certain Toshiba cards.
4150 *
4151 * N.B. This doesn't affect SD cards.
4152 */
Yangbo Lu7d70d472015-07-10 11:44:03 +08004153 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
4154 MMC_QUIRK_BLK_NO_CMD23),
4155 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
4156 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05004157 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004158 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05004159 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004160 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05004161 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05004162 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004163
4164 /*
Matt Gumbel32ecd322016-05-20 10:33:46 +03004165 * Some MMC cards need longer data read timeout than indicated in CSD.
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004166 */
Chris Ballc59d4472011-11-11 22:01:43 -05004167 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004168 MMC_QUIRK_LONG_READ_TIME),
Matt Gumbel32ecd322016-05-20 10:33:46 +03004169 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
4170 MMC_QUIRK_LONG_READ_TIME),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01004171
Ian Chen3550ccd2012-08-29 15:05:36 +09004172 /*
Guoping Yu3c984a92014-08-06 12:44:55 +08004173 * Some Samsung MMC cards need longer data read timeout than
4174 * indicated in CSD.
4175 */
4176 MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
4177 MMC_QUIRK_LONG_READ_TIME),
4178
4179 /*
Ian Chen3550ccd2012-08-29 15:05:36 +09004180 * On these Samsung MoviNAND parts, performing secure erase or
4181 * secure trim can result in unrecoverable corruption due to a
4182 * firmware bug.
4183 */
4184 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4185 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4186 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4187 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4188 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4189 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4190 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4191 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4192 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4193 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4194 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4195 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4196 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4197 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4198 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
4199 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
4200
Shawn Linb5b4ff02015-08-12 13:08:32 +08004201 /*
4202 * On Some Kingston eMMCs, performing trim can result in
4203 * unrecoverable data conrruption occasionally due to a firmware bug.
4204 */
4205 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
4206 MMC_QUIRK_TRIM_BROKEN),
4207 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
4208 MMC_QUIRK_TRIM_BROKEN),
4209
Pratibhasagar V8d664e32014-12-03 18:26:42 +02004210 /* Some INAND MCP devices advertise incorrect timeout values */
4211 MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
4212 MMC_QUIRK_INAND_DATA_TIMEOUT),
4213
Andrei Warkentin6f60c222011-04-11 19:11:04 -04004214 END_FIXUP
4215};
4216
Ulf Hansson96541ba2015-04-14 13:06:12 +02004217static int mmc_blk_probe(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218{
Andrei Warkentin371a6892011-04-11 18:10:25 -05004219 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02004220 char cap_str[10];
4221
Pierre Ossman912490d2005-05-21 10:27:02 +01004222 /*
4223 * Check that the card supports the command class(es) we need.
4224 */
4225 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 return -ENODEV;
4227
Lukas Czerner5204d002014-06-18 13:18:07 +02004228 mmc_fixup_device(card, blk_fixups);
4229
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 md = mmc_blk_alloc(card);
4231 if (IS_ERR(md))
4232 return PTR_ERR(md);
4233
James Bottomleyb9f28d82015-03-05 18:47:01 -08004234 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02004235 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05304236 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02004238 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
Andrei Warkentin371a6892011-04-11 18:10:25 -05004240 if (mmc_blk_alloc_parts(card, md))
4241 goto out;
4242
Ulf Hansson96541ba2015-04-14 13:06:12 +02004243 dev_set_drvdata(&card->dev, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04004244
Andrei Warkentin371a6892011-04-11 18:10:25 -05004245 if (mmc_add_disk(md))
4246 goto out;
4247
4248 list_for_each_entry(part_md, &md->part, part) {
4249 if (mmc_add_disk(part_md))
4250 goto out;
4251 }
Ulf Hanssone94cfef2013-05-02 14:02:38 +02004252
4253 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
4254 pm_runtime_use_autosuspend(&card->dev);
4255
4256 /*
4257 * Don't enable runtime PM for SD-combo cards here. Leave that
4258 * decision to be taken during the SDIO init sequence instead.
4259 */
4260 if (card->type != MMC_TYPE_SD_COMBO) {
4261 pm_runtime_set_active(&card->dev);
4262 pm_runtime_enable(&card->dev);
4263 }
4264
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 return 0;
4266
4267 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05004268 mmc_blk_remove_parts(card, md);
4269 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01004270 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271}
4272
Ulf Hansson96541ba2015-04-14 13:06:12 +02004273static void mmc_blk_remove(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274{
Ulf Hansson96541ba2015-04-14 13:06:12 +02004275 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276
Andrei Warkentin371a6892011-04-11 18:10:25 -05004277 mmc_blk_remove_parts(card, md);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02004278 pm_runtime_get_sync(&card->dev);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03004279 mmc_claim_host(card->host);
4280 mmc_blk_part_switch(card, md);
4281 mmc_release_host(card->host);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02004282 if (card->type != MMC_TYPE_SD_COMBO)
4283 pm_runtime_disable(&card->dev);
4284 pm_runtime_put_noidle(&card->dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004285 mmc_blk_remove_req(md);
Ulf Hansson96541ba2015-04-14 13:06:12 +02004286 dev_set_drvdata(&card->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287}
4288
Ulf Hansson96541ba2015-04-14 13:06:12 +02004289static int _mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290{
Andrei Warkentin371a6892011-04-11 18:10:25 -05004291 struct mmc_blk_data *part_md;
Ulf Hansson96541ba2015-04-14 13:06:12 +02004292 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304293 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
4295 if (md) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05304296 rc = mmc_queue_suspend(&md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304297 if (rc)
4298 goto out;
Andrei Warkentin371a6892011-04-11 18:10:25 -05004299 list_for_each_entry(part_md, &md->part, part) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05304300 rc = mmc_queue_suspend(&part_md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304301 if (rc)
4302 goto out_resume;
Andrei Warkentin371a6892011-04-11 18:10:25 -05004303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 }
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05304305 goto out;
4306
4307 out_resume:
4308 mmc_queue_resume(&md->queue);
4309 list_for_each_entry(part_md, &md->part, part) {
4310 mmc_queue_resume(&part_md->queue);
4311 }
4312 out:
4313 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314}
4315
Ulf Hansson96541ba2015-04-14 13:06:12 +02004316static void mmc_blk_shutdown(struct mmc_card *card)
Ulf Hansson76287742013-06-10 17:03:40 +02004317{
Ulf Hansson96541ba2015-04-14 13:06:12 +02004318 _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02004319}
4320
Ulf Hansson0967edc2014-10-06 11:29:42 +02004321#ifdef CONFIG_PM_SLEEP
4322static int mmc_blk_suspend(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02004323{
Ulf Hansson96541ba2015-04-14 13:06:12 +02004324 struct mmc_card *card = mmc_dev_to_card(dev);
4325
4326 return _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02004327}
4328
Ulf Hansson0967edc2014-10-06 11:29:42 +02004329static int mmc_blk_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330{
Andrei Warkentin371a6892011-04-11 18:10:25 -05004331 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02004332 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333
4334 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05004335 /*
4336 * Resume involves the card going into idle state,
4337 * so current partition is always the main one.
4338 */
4339 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05004341 list_for_each_entry(part_md, &md->part, part) {
4342 mmc_queue_resume(&part_md->queue);
4343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344 }
4345 return 0;
4346}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347#endif
4348
Ulf Hansson0967edc2014-10-06 11:29:42 +02004349static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
4350
Ulf Hansson96541ba2015-04-14 13:06:12 +02004351static struct mmc_driver mmc_driver = {
4352 .drv = {
4353 .name = "mmcblk",
4354 .pm = &mmc_blk_pm_ops,
4355 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 .probe = mmc_blk_probe,
4357 .remove = mmc_blk_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02004358 .shutdown = mmc_blk_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359};
4360
4361static int __init mmc_blk_init(void)
4362{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09004363 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364
Olof Johansson5e71b7a2010-09-17 21:19:57 -04004365 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
4366 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
4367
Ben Hutchingsa26eba62014-11-06 03:35:09 +00004368 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
Olof Johansson5e71b7a2010-09-17 21:19:57 -04004369
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02004370 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
4371 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09004374 res = mmc_register_driver(&mmc_driver);
4375 if (res)
4376 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09004378 return 0;
4379 out2:
4380 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 out:
4382 return res;
4383}
4384
4385static void __exit mmc_blk_exit(void)
4386{
4387 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02004388 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389}
4390
4391module_init(mmc_blk_init);
4392module_exit(mmc_blk_exit);
4393
4394MODULE_LICENSE("GPL");
4395MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
4396