blob: a78622d3d4da9042388beba5cf28e9689245fd8a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Sujit Reddy Thumma55291992014-12-09 20:40:16 +020033#include <linux/bitops.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020034#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040035#include <linux/delay.h>
36#include <linux/capability.h>
37#include <linux/compat.h>
Ulf Hanssone94cfef2013-05-02 14:02:38 +020038#include <linux/pm_runtime.h>
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -070039#include <linux/ioprio.h>
Ulf Hanssonb10fa992016-04-07 14:36:46 +020040#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
John Calixtocb87ea22011-04-26 18:56:29 -040042#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020044#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010045#include <linux/mmc/mmc.h>
46#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/uaccess.h>
49
Pierre Ossman98ac2162006-12-23 20:03:02 +010050#include "queue.h"
Baoyou Xie48ab0862016-09-30 09:37:38 +080051#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000053MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040054#ifdef MODULE_PARAM_PREFIX
55#undef MODULE_PARAM_PREFIX
56#endif
57#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010058
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050059#define INAND_CMD38_ARG_EXT_CSD 113
60#define INAND_CMD38_ARG_ERASE 0x00
61#define INAND_CMD38_ARG_TRIM 0x01
62#define INAND_CMD38_ARG_SECERASE 0x80
63#define INAND_CMD38_ARG_SECTRIM1 0x81
64#define INAND_CMD38_ARG_SECTRIM2 0x88
Subhash Jadavani2fbab612014-12-04 15:16:17 +020065#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
Maya Erez775a9362013-04-18 15:41:55 +030066#define MMC_SANITIZE_REQ_TIMEOUT 240000
67#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050068
Luca Porziod3df0462015-11-06 15:12:26 +000069#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090070 (rq_data_dir(req) == WRITE))
71#define PACKED_CMD_VER 0x01
72#define PACKED_CMD_WR 0x02
Lee Susman841fd132013-04-23 17:59:26 +030073#define PACKED_TRIGGER_MAX_ELEMENTS 5000
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090074
Maya Erezf93ca0a2014-12-09 23:34:41 +020075#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
Tatyana Brokhman08238ce2012-10-07 10:33:13 +020076#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
77 do { \
78 if (stats->enabled) \
79 stats->pack_stop_reason[reason]++; \
80 } while (0)
81
Lee Susman841fd132013-04-23 17:59:26 +030082#define PCKD_TRGR_INIT_MEAN_POTEN 17
83#define PCKD_TRGR_POTEN_LOWER_BOUND 5
84#define PCKD_TRGR_URGENT_PENALTY 2
85#define PCKD_TRGR_LOWER_BOUND 5
86#define PCKD_TRGR_PRECISION_MULTIPLIER 100
87
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020088static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040089
90/*
91 * The defaults come from config options but can be overriden by module
92 * or bootarg options.
93 */
94static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
95
96/*
97 * We've only got one major, so number of mmcblk devices is
Ben Hutchingsa26eba62014-11-06 03:35:09 +000098 * limited to (1 << 20) / number of minors per device. It is also
Ulf Hanssonb10fa992016-04-07 14:36:46 +020099 * limited by the MAX_DEVICES below.
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400100 */
101static int max_devices;
102
Ben Hutchingsa26eba62014-11-06 03:35:09 +0000103#define MAX_DEVICES 256
104
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200105static DEFINE_IDA(mmc_blk_ida);
106static DEFINE_SPINLOCK(mmc_blk_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/*
109 * There is one mmc_blk_data per slot.
110 */
111struct mmc_blk_data {
112 spinlock_t lock;
Dan Williams307d8e62016-06-20 10:40:44 -0700113 struct device *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 struct gendisk *disk;
115 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500116 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500118 unsigned int flags;
119#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
120#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900121#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -0700122#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000125 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500126 unsigned int part_type;
Adrian Hunter67716322011-08-29 16:42:15 +0300127 unsigned int reset_done;
128#define MMC_BLK_READ BIT(0)
129#define MMC_BLK_WRITE BIT(1)
130#define MMC_BLK_DISCARD BIT(2)
131#define MMC_BLK_SECDISCARD BIT(3)
Talel Shenhar8a8e3b42015-02-11 12:58:16 +0200132#define MMC_BLK_FLUSH BIT(4)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500133
134 /*
135 * Only set in main mmc_blk_data associated
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200136 * with mmc_card with dev_set_drvdata, and keeps
Andrei Warkentin371a6892011-04-11 18:10:25 -0500137 * track of the current selected device partition.
138 */
139 unsigned int part_curr;
140 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100141 struct device_attribute power_ro_lock;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200142 struct device_attribute num_wr_reqs_to_start_packing;
Maya Erez5a8dae12014-12-04 15:13:59 +0200143 struct device_attribute no_pack_for_random;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100144 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145};
146
Arjan van de Vena621aae2006-01-12 18:43:35 +0000147static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900149enum {
150 MMC_PACKED_NR_IDX = -1,
151 MMC_PACKED_NR_ZERO,
152 MMC_PACKED_NR_SINGLE,
153};
154
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400155module_param(perdev_minors, int, 0444);
156MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
157
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200158static inline int mmc_blk_part_switch(struct mmc_card *card,
159 struct mmc_blk_data *md);
160static int get_card_status(struct mmc_card *card, u32 *status, int retries);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -0700161static int mmc_blk_cmdq_switch(struct mmc_card *card,
162 struct mmc_blk_data *md, bool enable);
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200163
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900164static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
165{
166 struct mmc_packed *packed = mqrq->packed;
167
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900168 mqrq->cmd_type = MMC_PACKED_NONE;
169 packed->nr_entries = MMC_PACKED_NR_ZERO;
170 packed->idx_failure = MMC_PACKED_NR_IDX;
171 packed->retries = 0;
172 packed->blocks = 0;
173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
176{
177 struct mmc_blk_data *md;
178
Arjan van de Vena621aae2006-01-12 18:43:35 +0000179 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 md = disk->private_data;
181 if (md && md->usage == 0)
182 md = NULL;
183 if (md)
184 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000185 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 return md;
188}
189
Andrei Warkentin371a6892011-04-11 18:10:25 -0500190static inline int mmc_get_devidx(struct gendisk *disk)
191{
Colin Cross382c55f2015-10-22 10:00:41 -0700192 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500193 return devidx;
194}
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196static void mmc_blk_put(struct mmc_blk_data *md)
197{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000198 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 md->usage--;
200 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500201 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800202 blk_cleanup_queue(md->queue.queue);
203
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200204 spin_lock(&mmc_blk_lock);
205 ida_remove(&mmc_blk_ida, devidx);
206 spin_unlock(&mmc_blk_lock);
David Woodhouse1dff3142007-11-21 18:45:12 +0100207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 kfree(md);
210 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000211 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Johan Rudholmadd710e2011-12-02 08:51:06 +0100214static ssize_t power_ro_lock_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
216{
217 int ret;
218 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200219 struct mmc_card *card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100220 int locked = 0;
221
Asutosh Das507d9a72014-12-09 10:15:53 +0200222 if (!md)
223 return -EINVAL;
224
225 card = md->queue.card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100226 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
227 locked = 2;
228 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
229 locked = 1;
230
231 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
232
Tomas Winkler9098f842015-07-16 15:50:45 +0200233 mmc_blk_put(md);
234
Johan Rudholmadd710e2011-12-02 08:51:06 +0100235 return ret;
236}
237
238static ssize_t power_ro_lock_store(struct device *dev,
239 struct device_attribute *attr, const char *buf, size_t count)
240{
241 int ret;
242 struct mmc_blk_data *md, *part_md;
243 struct mmc_card *card;
244 unsigned long set;
245
246 if (kstrtoul(buf, 0, &set))
247 return -EINVAL;
248
249 if (set != 1)
250 return count;
251
252 md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200253 if (!md)
254 return -EINVAL;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100255 card = md->queue.card;
256
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200257 mmc_get_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100258
259 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
260 card->ext_csd.boot_ro_lock |
261 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
262 card->ext_csd.part_time);
263 if (ret)
264 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
265 else
266 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
267
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200268 mmc_put_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100269
270 if (!ret) {
271 pr_info("%s: Locking boot partition ro until next power on\n",
272 md->disk->disk_name);
273 set_disk_ro(md->disk, 1);
274
275 list_for_each_entry(part_md, &md->part, part)
276 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
277 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
278 set_disk_ro(part_md->disk, 1);
279 }
280 }
281
282 mmc_blk_put(md);
283 return count;
284}
285
Andrei Warkentin371a6892011-04-11 18:10:25 -0500286static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
287 char *buf)
288{
289 int ret;
290 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
291
Asutosh Das507d9a72014-12-09 10:15:53 +0200292 if (!md)
293 return -EINVAL;
294
Baruch Siach0031a982014-09-22 10:12:51 +0300295 ret = snprintf(buf, PAGE_SIZE, "%d\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -0500296 get_disk_ro(dev_to_disk(dev)) ^
297 md->read_only);
298 mmc_blk_put(md);
299 return ret;
300}
301
302static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
303 const char *buf, size_t count)
304{
305 int ret;
306 char *end;
307 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
308 unsigned long set = simple_strtoul(buf, &end, 0);
Asutosh Das507d9a72014-12-09 10:15:53 +0200309
310 if (!md)
311 return -EINVAL;
312
Andrei Warkentin371a6892011-04-11 18:10:25 -0500313 if (end == buf) {
314 ret = -EINVAL;
315 goto out;
316 }
317
318 set_disk_ro(dev_to_disk(dev), set || md->read_only);
319 ret = count;
320out:
321 mmc_blk_put(md);
322 return ret;
323}
324
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200325static ssize_t
Maya Erez5a8dae12014-12-04 15:13:59 +0200326no_pack_for_random_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
330 int ret;
331
Asutosh Das507d9a72014-12-09 10:15:53 +0200332 if (!md)
333 return -EINVAL;
Maya Erez5a8dae12014-12-04 15:13:59 +0200334 ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
335
336 mmc_blk_put(md);
337 return ret;
338}
339
340static ssize_t
341no_pack_for_random_store(struct device *dev,
342 struct device_attribute *attr,
343 const char *buf, size_t count)
344{
345 int value;
346 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200347 struct mmc_card *card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200348 int ret = count;
349
Asutosh Das507d9a72014-12-09 10:15:53 +0200350 if (!md)
351 return -EINVAL;
352
353 card = md->queue.card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200354 if (!card) {
355 ret = -EINVAL;
356 goto exit;
357 }
358
359 sscanf(buf, "%d", &value);
360
361 if (value < 0) {
362 pr_err("%s: value %d is not valid. old value remains = %d",
363 mmc_hostname(card->host), value,
364 md->queue.no_pack_for_random);
365 ret = -EINVAL;
366 goto exit;
367 }
368
369 md->queue.no_pack_for_random = (value > 0) ? true : false;
370
371 pr_debug("%s: no_pack_for_random: new value = %d",
372 mmc_hostname(card->host),
373 md->queue.no_pack_for_random);
374
375exit:
376 mmc_blk_put(md);
377 return ret;
378}
379
380static ssize_t
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200381num_wr_reqs_to_start_packing_show(struct device *dev,
382 struct device_attribute *attr, char *buf)
383{
384 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
385 int num_wr_reqs_to_start_packing;
386 int ret;
387
Asutosh Das507d9a72014-12-09 10:15:53 +0200388 if (!md)
389 return -EINVAL;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200390 num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
391
392 ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
393
394 mmc_blk_put(md);
395 return ret;
396}
397
398static ssize_t
399num_wr_reqs_to_start_packing_store(struct device *dev,
400 struct device_attribute *attr,
401 const char *buf, size_t count)
402{
403 int value;
404 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200405 struct mmc_card *card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200406 int ret = count;
407
Asutosh Das507d9a72014-12-09 10:15:53 +0200408 if (!md)
409 return -EINVAL;
410
411 card = md->queue.card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200412 if (!card) {
413 ret = -EINVAL;
414 goto exit;
415 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200416
417 sscanf(buf, "%d", &value);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200418
Yaniv Gardi42399822014-12-04 00:26:23 +0200419 if (value >= 0) {
420 md->queue.num_wr_reqs_to_start_packing =
421 min_t(int, value, (int)card->ext_csd.max_packed_writes);
422
423 pr_debug("%s: trigger to pack: new value = %d",
424 mmc_hostname(card->host),
425 md->queue.num_wr_reqs_to_start_packing);
426 } else {
427 pr_err("%s: value %d is not valid. old value remains = %d",
428 mmc_hostname(card->host), value,
429 md->queue.num_wr_reqs_to_start_packing);
430 ret = -EINVAL;
431 }
432
433exit:
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200434 mmc_blk_put(md);
Yaniv Gardi42399822014-12-04 00:26:23 +0200435 return ret;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200436}
437
Mark Salyzyn6904e432016-01-28 11:12:25 -0800438#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
439
440static int max_read_speed, max_write_speed, cache_size = 4;
441
442module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
443MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
444module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
445MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
446module_param(cache_size, int, S_IRUSR | S_IRGRP);
447MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
448
449/*
450 * helper macros and expectations:
451 * size - unsigned long number of bytes
452 * jiffies - unsigned long HZ timestamp difference
453 * speed - unsigned KB/s transfer rate
454 */
455#define size_and_speed_to_jiffies(size, speed) \
456 ((size) * HZ / (speed) / 1024UL)
457#define jiffies_and_speed_to_size(jiffies, speed) \
458 (((speed) * (jiffies) * 1024UL) / HZ)
459#define jiffies_and_size_to_speed(jiffies, size) \
460 ((size) * HZ / (jiffies) / 1024UL)
461
462/* Limits to report warning */
463/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
464#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
465#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
466
467#define speed_valid(speed) ((speed) > 0)
468
469static const char off[] = "off\n";
470
471static int max_speed_show(int speed, char *buf)
472{
473 if (speed)
474 return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
475 else
476 return scnprintf(buf, PAGE_SIZE, off);
477}
478
479static int max_speed_store(const char *buf, struct request_queue *q)
480{
481 unsigned int limit, set = 0;
482
483 if (!strncasecmp(off, buf, sizeof(off) - 2))
484 return set;
485 if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
486 return -EINVAL;
487 if (set == 0)
488 return set;
489 limit = MAX_SPEED(q);
490 if (set > limit)
491 pr_warn("max speed %u ineffective above %u\n", set, limit);
492 limit = MIN_SPEED(q);
493 if (set < limit)
494 pr_warn("max speed %u painful below %u\n", set, limit);
495 return set;
496}
497
498static ssize_t max_write_speed_show(struct device *dev,
499 struct device_attribute *attr, char *buf)
500{
501 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
502 int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
503
504 mmc_blk_put(md);
505 return ret;
506}
507
508static ssize_t max_write_speed_store(struct device *dev,
509 struct device_attribute *attr,
510 const char *buf, size_t count)
511{
512 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
513 int set = max_speed_store(buf, md->queue.queue);
514
515 if (set < 0) {
516 mmc_blk_put(md);
517 return set;
518 }
519
520 atomic_set(&md->queue.max_write_speed, set);
521 mmc_blk_put(md);
522 return count;
523}
524
525static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
526 max_write_speed_show, max_write_speed_store);
527
528static ssize_t max_read_speed_show(struct device *dev,
529 struct device_attribute *attr, char *buf)
530{
531 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
532 int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
533
534 mmc_blk_put(md);
535 return ret;
536}
537
538static ssize_t max_read_speed_store(struct device *dev,
539 struct device_attribute *attr,
540 const char *buf, size_t count)
541{
542 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
543 int set = max_speed_store(buf, md->queue.queue);
544
545 if (set < 0) {
546 mmc_blk_put(md);
547 return set;
548 }
549
550 atomic_set(&md->queue.max_read_speed, set);
551 mmc_blk_put(md);
552 return count;
553}
554
555static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
556 max_read_speed_show, max_read_speed_store);
557
558static ssize_t cache_size_show(struct device *dev,
559 struct device_attribute *attr, char *buf)
560{
561 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
562 struct mmc_queue *mq = &md->queue;
563 int cache_size = atomic_read(&mq->cache_size);
564 int ret;
565
566 if (!cache_size)
567 ret = scnprintf(buf, PAGE_SIZE, off);
568 else {
569 int speed = atomic_read(&mq->max_write_speed);
570
571 if (!speed_valid(speed))
572 ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
573 else { /* We accept race between cache_jiffies and cache_used */
574 unsigned long size = jiffies_and_speed_to_size(
575 jiffies - mq->cache_jiffies, speed);
576 long used = atomic_long_read(&mq->cache_used);
577
578 if (size >= used)
579 size = 0;
580 else
581 size = (used - size) * 100 / cache_size
582 / 1024UL / 1024UL;
583
584 ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
585 cache_size, size);
586 }
587 }
588
589 mmc_blk_put(md);
590 return ret;
591}
592
593static ssize_t cache_size_store(struct device *dev,
594 struct device_attribute *attr,
595 const char *buf, size_t count)
596{
597 struct mmc_blk_data *md;
598 unsigned int set = 0;
599
600 if (strncasecmp(off, buf, sizeof(off) - 2)
601 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
602 return -EINVAL;
603
604 md = mmc_blk_get(dev_to_disk(dev));
605 atomic_set(&md->queue.cache_size, set);
606 mmc_blk_put(md);
607 return count;
608}
609
610static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
611 cache_size_show, cache_size_store);
612
613/* correct for write-back */
614static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
615{
616 long used = 0;
617 int speed = atomic_read(&mq->max_write_speed);
618
619 if (speed_valid(speed)) {
620 unsigned long size = jiffies_and_speed_to_size(
621 waitfor - mq->cache_jiffies, speed);
622 used = atomic_long_read(&mq->cache_used);
623
624 if (size >= used)
625 used = 0;
626 else
627 used -= size;
628 }
629
630 atomic_long_set(&mq->cache_used, used);
631 mq->cache_jiffies = waitfor;
632
633 return used;
634}
635
636static void mmc_blk_simulate_delay(
637 struct mmc_queue *mq,
638 struct request *req,
639 unsigned long waitfor)
640{
641 int max_speed;
642
643 if (!req)
644 return;
645
646 max_speed = (rq_data_dir(req) == READ)
647 ? atomic_read(&mq->max_read_speed)
648 : atomic_read(&mq->max_write_speed);
649 if (speed_valid(max_speed)) {
650 unsigned long bytes = blk_rq_bytes(req);
651
652 if (rq_data_dir(req) != READ) {
653 int cache_size = atomic_read(&mq->cache_size);
654
655 if (cache_size) {
656 unsigned long size = cache_size * 1024L * 1024L;
657 long used = mmc_blk_cache_used(mq, waitfor);
658
659 used += bytes;
660 atomic_long_set(&mq->cache_used, used);
661 bytes = 0;
662 if (used > size)
663 bytes = used - size;
664 }
665 }
666 waitfor += size_and_speed_to_jiffies(bytes, max_speed);
667 if (time_is_after_jiffies(waitfor)) {
668 long msecs = jiffies_to_msecs(waitfor - jiffies);
669
670 if (likely(msecs > 0))
671 msleep(msecs);
672 }
673 }
674}
675
676#else
677
678#define mmc_blk_simulate_delay(mq, req, waitfor)
679
680#endif
681
Al Viroa5a15612008-03-02 10:33:30 -0500682static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683{
Al Viroa5a15612008-03-02 10:33:30 -0500684 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 int ret = -ENXIO;
686
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200687 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (md) {
689 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500690 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700692
Al Viroa5a15612008-03-02 10:33:30 -0500693 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700694 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700695 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700696 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200698 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 return ret;
701}
702
Al Virodb2a1442013-05-05 21:52:57 -0400703static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704{
Al Viroa5a15612008-03-02 10:33:30 -0500705 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200707 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200709 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710}
711
712static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800713mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800715 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
716 geo->heads = 4;
717 geo->sectors = 16;
718 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719}
720
John Calixtocb87ea22011-04-26 18:56:29 -0400721struct mmc_blk_ioc_data {
722 struct mmc_ioc_cmd ic;
723 unsigned char *buf;
724 u64 buf_bytes;
725};
726
727static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
728 struct mmc_ioc_cmd __user *user)
729{
730 struct mmc_blk_ioc_data *idata;
731 int err;
732
yalin wang1ff89502015-11-12 19:27:11 +0800733 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400734 if (!idata) {
735 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400736 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400737 }
738
739 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
740 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400741 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400742 }
743
744 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
745 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
746 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400747 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400748 }
749
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300750 if (!idata->buf_bytes) {
751 idata->buf = NULL;
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100752 return idata;
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300753 }
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100754
yalin wang1ff89502015-11-12 19:27:11 +0800755 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400756 if (!idata->buf) {
757 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400758 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400759 }
760
761 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
762 idata->ic.data_ptr, idata->buf_bytes)) {
763 err = -EFAULT;
764 goto copy_err;
765 }
766
767 return idata;
768
769copy_err:
770 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400771idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400772 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400773out:
John Calixtocb87ea22011-04-26 18:56:29 -0400774 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400775}
776
Jon Huntera5f57742015-09-22 10:27:53 +0100777static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
778 struct mmc_blk_ioc_data *idata)
779{
780 struct mmc_ioc_cmd *ic = &idata->ic;
781
782 if (copy_to_user(&(ic_ptr->response), ic->response,
783 sizeof(ic->response)))
784 return -EFAULT;
785
786 if (!idata->ic.write_flag) {
787 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
788 idata->buf, idata->buf_bytes))
789 return -EFAULT;
790 }
791
792 return 0;
793}
794
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200795static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
796 u32 retries_max)
797{
798 int err;
799 u32 retry_count = 0;
800
801 if (!status || !retries_max)
802 return -EINVAL;
803
804 do {
805 err = get_card_status(card, status, 5);
806 if (err)
807 break;
808
809 if (!R1_STATUS(*status) &&
810 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
811 break; /* RPMB programming operation complete */
812
813 /*
814 * Rechedule to give the MMC device a chance to continue
815 * processing the previous command without being polled too
816 * frequently.
817 */
818 usleep_range(1000, 5000);
819 } while (++retry_count < retries_max);
820
821 if (retry_count == retries_max)
822 err = -EPERM;
823
824 return err;
825}
826
Maya Erez775a9362013-04-18 15:41:55 +0300827static int ioctl_do_sanitize(struct mmc_card *card)
828{
829 int err;
830
Ulf Hanssona2d10862013-12-16 14:37:26 +0100831 if (!mmc_can_sanitize(card)) {
Maya Erez775a9362013-04-18 15:41:55 +0300832 pr_warn("%s: %s - SANITIZE is not supported\n",
833 mmc_hostname(card->host), __func__);
834 err = -EOPNOTSUPP;
835 goto out;
836 }
837
838 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
839 mmc_hostname(card->host), __func__);
840
841 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
842 EXT_CSD_SANITIZE_START, 1,
843 MMC_SANITIZE_REQ_TIMEOUT);
844
845 if (err)
846 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
847 mmc_hostname(card->host), __func__, err);
848
849 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
850 __func__);
851out:
852 return err;
853}
854
Jon Huntera5f57742015-09-22 10:27:53 +0100855static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
856 struct mmc_blk_ioc_data *idata)
John Calixtocb87ea22011-04-26 18:56:29 -0400857{
John Calixtocb87ea22011-04-26 18:56:29 -0400858 struct mmc_command cmd = {0};
859 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530860 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400861 struct scatterlist sg;
862 int err;
863
Jon Huntera5f57742015-09-22 10:27:53 +0100864 if (!card || !md || !idata)
865 return -EINVAL;
John Calixtocb87ea22011-04-26 18:56:29 -0400866
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100867 cmd.opcode = idata->ic.opcode;
868 cmd.arg = idata->ic.arg;
869 cmd.flags = idata->ic.flags;
870
871 if (idata->buf_bytes) {
872 data.sg = &sg;
873 data.sg_len = 1;
874 data.blksz = idata->ic.blksz;
875 data.blocks = idata->ic.blocks;
876
877 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
878
879 if (idata->ic.write_flag)
880 data.flags = MMC_DATA_WRITE;
881 else
882 data.flags = MMC_DATA_READ;
883
884 /* data.flags must already be set before doing this. */
885 mmc_set_data_timeout(&data, card);
886
887 /* Allow overriding the timeout_ns for empirical tuning. */
888 if (idata->ic.data_timeout_ns)
889 data.timeout_ns = idata->ic.data_timeout_ns;
890
891 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
892 /*
893 * Pretend this is a data transfer and rely on the
894 * host driver to compute timeout. When all host
895 * drivers support cmd.cmd_timeout for R1B, this
896 * can be changed to:
897 *
898 * mrq.data = NULL;
899 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
900 */
901 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
902 }
903
904 mrq.data = &data;
905 }
906
907 mrq.cmd = &cmd;
908
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200909 err = mmc_blk_part_switch(card, md);
910 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100911 return err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200912
John Calixtocb87ea22011-04-26 18:56:29 -0400913 if (idata->ic.is_acmd) {
914 err = mmc_app_cmd(card->host, card);
915 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100916 return err;
John Calixtocb87ea22011-04-26 18:56:29 -0400917 }
918
Yaniv Gardia82e4842013-06-05 14:13:08 +0300919 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
920 (cmd.opcode == MMC_SWITCH)) {
Maya Erez775a9362013-04-18 15:41:55 +0300921 err = ioctl_do_sanitize(card);
922
923 if (err)
924 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
925 __func__, err);
926
Jon Huntera5f57742015-09-22 10:27:53 +0100927 return err;
Maya Erez775a9362013-04-18 15:41:55 +0300928 }
929
John Calixtocb87ea22011-04-26 18:56:29 -0400930 mmc_wait_for_req(card->host, &mrq);
931
932 if (cmd.error) {
933 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
934 __func__, cmd.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100935 return cmd.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400936 }
937 if (data.error) {
938 dev_err(mmc_dev(card->host), "%s: data error %d\n",
939 __func__, data.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100940 return data.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400941 }
942
943 /*
944 * According to the SD specs, some commands require a delay after
945 * issuing the command.
946 */
947 if (idata->ic.postsleep_min_us)
948 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
949
Jon Huntera5f57742015-09-22 10:27:53 +0100950 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
John Calixtocb87ea22011-04-26 18:56:29 -0400951
Krishna Kondae6711632014-12-04 15:20:57 +0200952 return err;
953}
954
955struct mmc_blk_ioc_rpmb_data {
956 struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
957};
958
959static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
960 struct mmc_ioc_rpmb __user *user)
961{
962 struct mmc_blk_ioc_rpmb_data *idata;
963 int err, i;
964
965 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
966 if (!idata) {
967 err = -ENOMEM;
968 goto out;
969 }
970
971 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
972 idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
973 if (IS_ERR(idata->data[i])) {
974 err = PTR_ERR(idata->data[i]);
975 goto copy_err;
976 }
977 }
978
979 return idata;
980
981copy_err:
982 while (--i >= 0) {
983 kfree(idata->data[i]->buf);
984 kfree(idata->data[i]);
985 }
986 kfree(idata);
987out:
988 return ERR_PTR(err);
989}
990
991static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
992 struct mmc_ioc_rpmb __user *ic_ptr)
993{
994 struct mmc_blk_ioc_rpmb_data *idata;
995 struct mmc_blk_data *md;
996 struct mmc_card *card;
997 struct mmc_command cmd = {0};
998 struct mmc_data data = {0};
999 struct mmc_request mrq = {NULL};
1000 struct scatterlist sg;
1001 int err = 0, i = 0;
1002 u32 status = 0;
1003
1004 /* The caller must have CAP_SYS_RAWIO */
1005 if (!capable(CAP_SYS_RAWIO))
1006 return -EPERM;
1007
1008 md = mmc_blk_get(bdev->bd_disk);
1009 /* make sure this is a rpmb partition */
1010 if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
1011 err = -EINVAL;
Asutosh Das507d9a72014-12-09 10:15:53 +02001012 return err;
Krishna Kondae6711632014-12-04 15:20:57 +02001013 }
1014
1015 idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
1016 if (IS_ERR(idata)) {
1017 err = PTR_ERR(idata);
1018 goto cmd_done;
1019 }
1020
1021 card = md->queue.card;
1022 if (IS_ERR(card)) {
1023 err = PTR_ERR(card);
1024 goto idata_free;
1025 }
1026
Maya Erezdd669562015-02-12 20:37:31 +02001027 mmc_get_card(card);
Krishna Kondae6711632014-12-04 15:20:57 +02001028
1029 err = mmc_blk_part_switch(card, md);
1030 if (err)
1031 goto cmd_rel_host;
1032
1033 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1034 struct mmc_blk_ioc_data *curr_data;
1035 struct mmc_ioc_cmd *curr_cmd;
1036
1037 curr_data = idata->data[i];
1038 curr_cmd = &curr_data->ic;
1039 if (!curr_cmd->opcode)
1040 break;
1041
1042 cmd.opcode = curr_cmd->opcode;
1043 cmd.arg = curr_cmd->arg;
1044 cmd.flags = curr_cmd->flags;
1045
1046 if (curr_data->buf_bytes) {
1047 data.sg = &sg;
1048 data.sg_len = 1;
1049 data.blksz = curr_cmd->blksz;
1050 data.blocks = curr_cmd->blocks;
1051
1052 sg_init_one(data.sg, curr_data->buf,
1053 curr_data->buf_bytes);
1054
1055 if (curr_cmd->write_flag)
1056 data.flags = MMC_DATA_WRITE;
1057 else
1058 data.flags = MMC_DATA_READ;
1059
1060 /* data.flags must already be set before doing this. */
1061 mmc_set_data_timeout(&data, card);
1062
1063 /*
1064 * Allow overriding the timeout_ns for empirical tuning.
1065 */
1066 if (curr_cmd->data_timeout_ns)
1067 data.timeout_ns = curr_cmd->data_timeout_ns;
1068
1069 mrq.data = &data;
1070 }
1071
1072 mrq.cmd = &cmd;
1073
1074 err = mmc_set_blockcount(card, data.blocks,
1075 curr_cmd->write_flag & (1 << 31));
1076 if (err)
1077 goto cmd_rel_host;
1078
1079 mmc_wait_for_req(card->host, &mrq);
1080
1081 if (cmd.error) {
1082 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
1083 __func__, cmd.error);
1084 err = cmd.error;
1085 goto cmd_rel_host;
1086 }
1087 if (data.error) {
1088 dev_err(mmc_dev(card->host), "%s: data error %d\n",
1089 __func__, data.error);
1090 err = data.error;
1091 goto cmd_rel_host;
1092 }
1093
1094 if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
1095 sizeof(cmd.resp))) {
1096 err = -EFAULT;
1097 goto cmd_rel_host;
1098 }
1099
1100 if (!curr_cmd->write_flag) {
1101 if (copy_to_user((void __user *)(unsigned long)
1102 curr_cmd->data_ptr,
1103 curr_data->buf,
1104 curr_data->buf_bytes)) {
1105 err = -EFAULT;
1106 goto cmd_rel_host;
1107 }
1108 }
1109
Loic Pallardy8d1e9772012-08-06 17:12:31 +02001110 /*
1111 * Ensure RPMB command has completed by polling CMD13
1112 * "Send Status".
1113 */
1114 err = ioctl_rpmb_card_status_poll(card, &status, 5);
1115 if (err)
1116 dev_err(mmc_dev(card->host),
1117 "%s: Card Status=0x%08X, error %d\n",
1118 __func__, status, err);
1119 }
1120
Krishna Kondae6711632014-12-04 15:20:57 +02001121cmd_rel_host:
1122 mmc_put_card(card);
1123
1124idata_free:
1125 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1126 kfree(idata->data[i]->buf);
1127 kfree(idata->data[i]);
1128 }
1129 kfree(idata);
1130
1131cmd_done:
1132 mmc_blk_put(md);
Jon Huntera5f57742015-09-22 10:27:53 +01001133 return err;
1134}
1135
1136static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1137 struct mmc_ioc_cmd __user *ic_ptr)
1138{
1139 struct mmc_blk_ioc_data *idata;
1140 struct mmc_blk_data *md;
1141 struct mmc_card *card;
Grant Grundlerb0934102015-09-23 18:30:33 -07001142 int err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001143
Shawn Lin83c742c2016-03-16 18:15:47 +08001144 /*
1145 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1146 * whole block device, not on a partition. This prevents overspray
1147 * between sibling partitions.
1148 */
1149 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1150 return -EPERM;
1151
Jon Huntera5f57742015-09-22 10:27:53 +01001152 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
Asutosh Dasbbefab32013-10-07 14:53:32 +05301153 if (IS_ERR_OR_NULL(idata))
Jon Huntera5f57742015-09-22 10:27:53 +01001154 return PTR_ERR(idata);
1155
1156 md = mmc_blk_get(bdev->bd_disk);
1157 if (!md) {
1158 err = -EINVAL;
1159 goto cmd_err;
1160 }
1161
1162 card = md->queue.card;
Asutosh Dasbbefab32013-10-07 14:53:32 +05301163 if (IS_ERR_OR_NULL(card)) {
Jon Huntera5f57742015-09-22 10:27:53 +01001164 err = PTR_ERR(card);
1165 goto cmd_done;
1166 }
1167
1168 mmc_get_card(card);
1169
Grant Grundlerb0934102015-09-23 18:30:33 -07001170 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001171
Adrian Hunter3c866562016-05-04 14:38:12 +03001172 /* Always switch back to main area after RPMB access */
1173 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1174 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1175
Ulf Hanssone94cfef2013-05-02 14:02:38 +02001176 mmc_put_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -04001177
Grant Grundlerb0934102015-09-23 18:30:33 -07001178 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001179
John Calixtocb87ea22011-04-26 18:56:29 -04001180cmd_done:
1181 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +03001182cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -04001183 kfree(idata->buf);
1184 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001185 return ioc_err ? ioc_err : err;
John Calixtocb87ea22011-04-26 18:56:29 -04001186}
1187
Jon Huntera5f57742015-09-22 10:27:53 +01001188static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
1189 struct mmc_ioc_multi_cmd __user *user)
1190{
1191 struct mmc_blk_ioc_data **idata = NULL;
1192 struct mmc_ioc_cmd __user *cmds = user->cmds;
1193 struct mmc_card *card;
1194 struct mmc_blk_data *md;
Grant Grundlerb0934102015-09-23 18:30:33 -07001195 int i, err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001196 __u64 num_of_cmds;
1197
Shawn Lin83c742c2016-03-16 18:15:47 +08001198 /*
1199 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1200 * whole block device, not on a partition. This prevents overspray
1201 * between sibling partitions.
1202 */
1203 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1204 return -EPERM;
1205
Jon Huntera5f57742015-09-22 10:27:53 +01001206 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
1207 sizeof(num_of_cmds)))
1208 return -EFAULT;
1209
1210 if (num_of_cmds > MMC_IOC_MAX_CMDS)
1211 return -EINVAL;
1212
1213 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
1214 if (!idata)
1215 return -ENOMEM;
1216
1217 for (i = 0; i < num_of_cmds; i++) {
1218 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
1219 if (IS_ERR(idata[i])) {
1220 err = PTR_ERR(idata[i]);
1221 num_of_cmds = i;
1222 goto cmd_err;
1223 }
1224 }
1225
1226 md = mmc_blk_get(bdev->bd_disk);
Olof Johanssonf00ab142016-02-09 09:34:30 -08001227 if (!md) {
1228 err = -EINVAL;
Jon Huntera5f57742015-09-22 10:27:53 +01001229 goto cmd_err;
Olof Johanssonf00ab142016-02-09 09:34:30 -08001230 }
Jon Huntera5f57742015-09-22 10:27:53 +01001231
1232 card = md->queue.card;
1233 if (IS_ERR(card)) {
1234 err = PTR_ERR(card);
1235 goto cmd_done;
1236 }
1237
1238 mmc_get_card(card);
1239
Grant Grundlerb0934102015-09-23 18:30:33 -07001240 for (i = 0; i < num_of_cmds && !ioc_err; i++)
1241 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001242
Adrian Hunter3c866562016-05-04 14:38:12 +03001243 /* Always switch back to main area after RPMB access */
1244 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1245 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1246
Jon Huntera5f57742015-09-22 10:27:53 +01001247 mmc_put_card(card);
1248
1249 /* copy to user if data and response */
Grant Grundlerb0934102015-09-23 18:30:33 -07001250 for (i = 0; i < num_of_cmds && !err; i++)
Jon Huntera5f57742015-09-22 10:27:53 +01001251 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001252
1253cmd_done:
1254 mmc_blk_put(md);
1255cmd_err:
1256 for (i = 0; i < num_of_cmds; i++) {
1257 kfree(idata[i]->buf);
1258 kfree(idata[i]);
1259 }
1260 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001261 return ioc_err ? ioc_err : err;
Jon Huntera5f57742015-09-22 10:27:53 +01001262}
1263
John Calixtocb87ea22011-04-26 18:56:29 -04001264static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
1265 unsigned int cmd, unsigned long arg)
1266{
Jon Huntera5f57742015-09-22 10:27:53 +01001267 switch (cmd) {
1268 case MMC_IOC_CMD:
1269 return mmc_blk_ioctl_cmd(bdev,
1270 (struct mmc_ioc_cmd __user *)arg);
Krishna Kondae6711632014-12-04 15:20:57 +02001271 case MMC_IOC_RPMB_CMD:
1272 return mmc_blk_ioctl_rpmb_cmd(bdev,
1273 (struct mmc_ioc_rpmb __user *)arg);
Jon Huntera5f57742015-09-22 10:27:53 +01001274 case MMC_IOC_MULTI_CMD:
1275 return mmc_blk_ioctl_multi_cmd(bdev,
1276 (struct mmc_ioc_multi_cmd __user *)arg);
1277 default:
1278 return -EINVAL;
1279 }
John Calixtocb87ea22011-04-26 18:56:29 -04001280}
1281
1282#ifdef CONFIG_COMPAT
1283static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
1284 unsigned int cmd, unsigned long arg)
1285{
1286 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
1287}
1288#endif
1289
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001290static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -05001291 .open = mmc_blk_open,
1292 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001293 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -04001295 .ioctl = mmc_blk_ioctl,
1296#ifdef CONFIG_COMPAT
1297 .compat_ioctl = mmc_blk_compat_ioctl,
1298#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299};
1300
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001301static int mmc_blk_cmdq_switch(struct mmc_card *card,
1302 struct mmc_blk_data *md, bool enable)
1303{
1304 int ret = 0;
1305 bool cmdq_mode = !!mmc_card_cmdq(card);
1306
1307 if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
1308 !card->ext_csd.cmdq_support ||
1309 (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
1310 (cmdq_mode == enable))
1311 return 0;
1312
1313 if (enable) {
1314 ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
1315 if (ret) {
1316 pr_err("%s: failed (%d) to set block-size to %d\n",
1317 __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
1318 goto out;
1319 }
1320 }
1321
1322 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1323 EXT_CSD_CMDQ, enable,
1324 card->ext_csd.generic_cmd6_time);
1325 if (ret) {
1326 pr_err("%s: cmdq mode %sable failed %d\n",
1327 md->disk->disk_name, enable ? "en" : "dis", ret);
1328 goto out;
1329 }
1330
1331 if (enable)
1332 mmc_card_set_cmdq(card);
1333 else
1334 mmc_card_clr_cmdq(card);
1335out:
1336 return ret;
1337}
1338
Andrei Warkentin371a6892011-04-11 18:10:25 -05001339static inline int mmc_blk_part_switch(struct mmc_card *card,
1340 struct mmc_blk_data *md)
1341{
1342 int ret;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001343 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001344
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001345 if ((main_md->part_curr == md->part_type) &&
1346 (card->part_curr == md->part_type))
Andrei Warkentin371a6892011-04-11 18:10:25 -05001347 return 0;
1348
1349 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001350 u8 part_config = card->ext_csd.part_config;
1351
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07001352 if (md->part_type) {
1353 /* disable CQ mode for non-user data partitions */
1354 ret = mmc_blk_cmdq_switch(card, md, false);
1355 if (ret)
1356 return ret;
1357 }
1358
Adrian Hunter57da0c02016-05-04 14:38:13 +03001359 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1360 mmc_retune_pause(card->host);
1361
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001362 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1363 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -05001364
1365 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001366 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -05001367 card->ext_csd.part_time);
Adrian Hunter57da0c02016-05-04 14:38:13 +03001368 if (ret) {
1369 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1370 mmc_retune_unpause(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001371 return ret;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001372 }
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001373
1374 card->ext_csd.part_config = part_config;
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001375 card->part_curr = md->part_type;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001376
1377 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
1378 mmc_retune_unpause(card->host);
Adrian Hunter67716322011-08-29 16:42:15 +03001379 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05001380
1381 main_md->part_curr = md->part_type;
1382 return 0;
1383}
1384
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001385static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
1386{
1387 int err;
Ben Dooks051913d2009-06-08 23:33:57 +01001388 u32 result;
1389 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001390
Venkatraman Sad5fd972011-08-25 00:30:50 +05301391 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -04001392 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -04001393 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001394
1395 struct scatterlist sg;
1396
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001397 cmd.opcode = MMC_APP_CMD;
1398 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -07001399 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001400
1401 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -07001402 if (err)
1403 return (u32)-1;
1404 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001405 return (u32)-1;
1406
1407 memset(&cmd, 0, sizeof(struct mmc_command));
1408
1409 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1410 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -07001411 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001412
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001413 data.blksz = 4;
1414 data.blocks = 1;
1415 data.flags = MMC_DATA_READ;
1416 data.sg = &sg;
1417 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +05301418 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001419
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001420 mrq.cmd = &cmd;
1421 mrq.data = &data;
1422
Ben Dooks051913d2009-06-08 23:33:57 +01001423 blocks = kmalloc(4, GFP_KERNEL);
1424 if (!blocks)
1425 return (u32)-1;
1426
1427 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001428
1429 mmc_wait_for_req(card->host, &mrq);
1430
Ben Dooks051913d2009-06-08 23:33:57 +01001431 result = ntohl(*blocks);
1432 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001433
Ben Dooks051913d2009-06-08 23:33:57 +01001434 if (cmd.error || data.error)
1435 result = (u32)-1;
1436
1437 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001438}
1439
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001440static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +03001441{
Chris Ball1278dba2011-04-13 23:40:30 -04001442 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +03001443 int err;
1444
Adrian Hunter504f1912008-10-16 12:55:25 +03001445 cmd.opcode = MMC_SEND_STATUS;
1446 if (!mmc_host_is_spi(card->host))
1447 cmd.arg = card->rca << 16;
1448 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001449 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1450 if (err == 0)
1451 *status = cmd.resp[0];
1452 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +03001453}
1454
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001455static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
Ulf Hansson95a91292014-01-29 13:11:27 +01001456 bool hw_busy_detect, struct request *req, int *gen_err)
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001457{
1458 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1459 int err = 0;
1460 u32 status;
1461
1462 do {
1463 err = get_card_status(card, &status, 5);
1464 if (err) {
1465 pr_err("%s: error %d requesting status\n",
1466 req->rq_disk->disk_name, err);
1467 return err;
1468 }
1469
1470 if (status & R1_ERROR) {
1471 pr_err("%s: %s: error sending status cmd, status %#x\n",
1472 req->rq_disk->disk_name, __func__, status);
1473 *gen_err = 1;
1474 }
1475
Ulf Hansson95a91292014-01-29 13:11:27 +01001476 /* We may rely on the host hw to handle busy detection.*/
1477 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
1478 hw_busy_detect)
1479 break;
1480
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001481 /*
1482 * Timeout if the device never becomes ready for data and never
1483 * leaves the program state.
1484 */
1485 if (time_after(jiffies, timeout)) {
1486 pr_err("%s: Card stuck in programming state! %s %s\n",
1487 mmc_hostname(card->host),
1488 req->rq_disk->disk_name, __func__);
1489 return -ETIMEDOUT;
1490 }
1491
1492 /*
1493 * Some cards mishandle the status bits,
1494 * so make sure to check both the busy
1495 * indication and the card state.
1496 */
1497 } while (!(status & R1_READY_FOR_DATA) ||
1498 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1499
1500 return err;
1501}
1502
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001503static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
1504 struct request *req, int *gen_err, u32 *stop_status)
1505{
1506 struct mmc_host *host = card->host;
1507 struct mmc_command cmd = {0};
1508 int err;
1509 bool use_r1b_resp = rq_data_dir(req) == WRITE;
1510
1511 /*
1512 * Normally we use R1B responses for WRITE, but in cases where the host
1513 * has specified a max_busy_timeout we need to validate it. A failure
1514 * means we need to prevent the host from doing hw busy detection, which
1515 * is done by converting to a R1 response instead.
1516 */
1517 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1518 use_r1b_resp = false;
1519
1520 cmd.opcode = MMC_STOP_TRANSMISSION;
1521 if (use_r1b_resp) {
1522 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1523 cmd.busy_timeout = timeout_ms;
1524 } else {
1525 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1526 }
1527
1528 err = mmc_wait_for_cmd(host, &cmd, 5);
1529 if (err)
1530 return err;
1531
1532 *stop_status = cmd.resp[0];
1533
1534 /* No need to check card status in case of READ. */
1535 if (rq_data_dir(req) == READ)
1536 return 0;
1537
1538 if (!mmc_host_is_spi(host) &&
1539 (*stop_status & R1_ERROR)) {
1540 pr_err("%s: %s: general error sending stop command, resp %#x\n",
1541 req->rq_disk->disk_name, __func__, *stop_status);
1542 *gen_err = 1;
1543 }
1544
1545 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
1546}
1547
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301548#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001549#define ERR_RETRY 2
1550#define ERR_ABORT 1
1551#define ERR_CONTINUE 0
1552
1553static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1554 bool status_valid, u32 status)
1555{
1556 switch (error) {
1557 case -EILSEQ:
1558 /* response crc error, retry the r/w cmd */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001559 pr_err_ratelimited(
1560 "%s: response CRC error sending %s command, card status %#x\n",
1561 req->rq_disk->disk_name,
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001562 name, status);
1563 return ERR_RETRY;
1564
1565 case -ETIMEDOUT:
Talel Shenhar0821fe852015-01-28 14:44:57 +02001566 pr_err_ratelimited(
1567 "%s: timed out sending %s command, card status %#x\n",
1568 req->rq_disk->disk_name, name, status);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001569
1570 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301571 if (!status_valid) {
Talel Shenhar0821fe852015-01-28 14:44:57 +02001572 pr_err_ratelimited("%s: status not valid, retrying timeout\n",
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301573 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001574 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301575 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001576
1577 /*
1578 * If it was a r/w cmd crc error, or illegal command
1579 * (eg, issued in wrong state) then retry - we should
1580 * have corrected the state problem above.
1581 */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301582 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
Talel Shenhar0821fe852015-01-28 14:44:57 +02001583 pr_err_ratelimited(
1584 "%s: command error, retrying timeout\n",
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301585 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001586 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301587 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001588
1589 /* Otherwise abort the command */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001590 pr_err_ratelimited(
1591 "%s: not retrying timeout\n",
1592 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001593 return ERR_ABORT;
1594
1595 default:
1596 /* We don't understand the error code the driver gave us */
Talel Shenhar0821fe852015-01-28 14:44:57 +02001597 pr_err_ratelimited(
1598 "%s: unknown error %d sending read/write command, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001599 req->rq_disk->disk_name, error, status);
1600 return ERR_ABORT;
1601 }
1602}
1603
1604/*
1605 * Initial r/w and stop cmd error recovery.
1606 * We don't know whether the card received the r/w cmd or not, so try to
1607 * restore things back to a sane state. Essentially, we do this as follows:
1608 * - Obtain card status. If the first attempt to obtain card status fails,
1609 * the status word will reflect the failed status cmd, not the failed
1610 * r/w cmd. If we fail to obtain card status, it suggests we can no
1611 * longer communicate with the card.
1612 * - Check the card state. If the card received the cmd but there was a
1613 * transient problem with the response, it might still be in a data transfer
1614 * mode. Try to send it a stop command. If this fails, we can't recover.
1615 * - If the r/w cmd failed due to a response CRC error, it was probably
1616 * transient, so retry the cmd.
1617 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1618 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1619 * illegal cmd, retry.
1620 * Otherwise we don't understand what happened, so abort.
1621 */
1622static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001623 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001624{
1625 bool prev_cmd_status_valid = true;
1626 u32 status, stop_status = 0;
1627 int err, retry;
1628
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301629 if (mmc_card_removed(card))
1630 return ERR_NOMEDIUM;
1631
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001632 /*
1633 * Try to get card status which indicates both the card state
1634 * and why there was no response. If the first attempt fails,
1635 * we can't be sure the returned status is for the r/w command.
1636 */
1637 for (retry = 2; retry >= 0; retry--) {
1638 err = get_card_status(card, &status, 0);
1639 if (!err)
1640 break;
1641
Adrian Hunter6f398ad2015-05-07 13:10:23 +03001642 /* Re-tune if needed */
1643 mmc_retune_recheck(card->host);
1644
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001645 prev_cmd_status_valid = false;
1646 pr_err("%s: error %d sending status command, %sing\n",
1647 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1648 }
1649
1650 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301651 if (err) {
1652 /* Check if the card is removed */
1653 if (mmc_detect_card_removed(card->host))
1654 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001655 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301656 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001657
Adrian Hunter67716322011-08-29 16:42:15 +03001658 /* Flag ECC errors */
1659 if ((status & R1_CARD_ECC_FAILED) ||
1660 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1661 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1662 *ecc_err = 1;
1663
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001664 /* Flag General errors */
1665 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1666 if ((status & R1_ERROR) ||
1667 (brq->stop.resp[0] & R1_ERROR)) {
1668 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1669 req->rq_disk->disk_name, __func__,
1670 brq->stop.resp[0], status);
1671 *gen_err = 1;
1672 }
1673
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001674 /*
1675 * Check the current card state. If it is in some data transfer
1676 * mode, tell it to stop (and hopefully transition back to TRAN.)
1677 */
1678 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1679 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001680 err = send_stop(card,
1681 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1682 req, gen_err, &stop_status);
1683 if (err) {
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001684 pr_err("%s: error %d sending stop command\n",
1685 req->rq_disk->disk_name, err);
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001686 /*
1687 * If the stop cmd also timed out, the card is probably
1688 * not present, so abort. Other errors are bad news too.
1689 */
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001690 return ERR_ABORT;
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001691 }
1692
Adrian Hunter67716322011-08-29 16:42:15 +03001693 if (stop_status & R1_CARD_ECC_FAILED)
1694 *ecc_err = 1;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001695 }
1696
1697 /* Check for set block count errors */
1698 if (brq->sbc.error)
1699 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1700 prev_cmd_status_valid, status);
1701
1702 /* Check for r/w command errors */
1703 if (brq->cmd.error)
1704 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1705 prev_cmd_status_valid, status);
1706
Adrian Hunter67716322011-08-29 16:42:15 +03001707 /* Data errors */
1708 if (!brq->stop.error)
1709 return ERR_CONTINUE;
1710
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001711 /* Now for stop errors. These aren't fatal to the transfer. */
Johan Rudholm5e1344e2014-09-17 09:50:42 +02001712 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001713 req->rq_disk->disk_name, brq->stop.error,
1714 brq->cmd.resp[0], status);
1715
1716 /*
1717 * Subsitute in our own stop status as this will give the error
1718 * state which happened during the execution of the r/w command.
1719 */
1720 if (stop_status) {
1721 brq->stop.resp[0] = stop_status;
1722 brq->stop.error = 0;
1723 }
1724 return ERR_CONTINUE;
1725}
1726
Adrian Hunter67716322011-08-29 16:42:15 +03001727static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1728 int type)
1729{
1730 int err;
1731
1732 if (md->reset_done & type)
1733 return -EEXIST;
1734
1735 md->reset_done |= type;
1736 err = mmc_hw_reset(host);
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301737 if (err && err != -EOPNOTSUPP) {
1738 /* We failed to reset so we need to abort the request */
1739 pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
1740 __func__, err);
1741 return -ENODEV;
1742 }
1743
Adrian Hunter67716322011-08-29 16:42:15 +03001744 /* Ensure we switch back to the correct partition */
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301745 if (host->card) {
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001746 struct mmc_blk_data *main_md =
1747 dev_get_drvdata(&host->card->dev);
Adrian Hunter67716322011-08-29 16:42:15 +03001748 int part_err;
1749
1750 main_md->part_curr = main_md->part_type;
1751 part_err = mmc_blk_part_switch(host->card, md);
1752 if (part_err) {
1753 /*
1754 * We have failed to get back into the correct
1755 * partition, so we need to abort the whole request.
1756 */
1757 return -ENODEV;
1758 }
1759 }
1760 return err;
1761}
1762
1763static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1764{
1765 md->reset_done &= ~type;
1766}
1767
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +08001768int mmc_access_rpmb(struct mmc_queue *mq)
1769{
1770 struct mmc_blk_data *md = mq->data;
1771 /*
1772 * If this is a RPMB partition access, return ture
1773 */
1774 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1775 return true;
1776
1777 return false;
1778}
1779
Adrian Hunterbd788c92010-08-11 14:17:47 -07001780static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1781{
1782 struct mmc_blk_data *md = mq->data;
1783 struct mmc_card *card = md->queue.card;
1784 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001785 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001786
Adrian Hunterbd788c92010-08-11 14:17:47 -07001787 if (!mmc_can_erase(card)) {
1788 err = -EOPNOTSUPP;
1789 goto out;
1790 }
1791
1792 from = blk_rq_pos(req);
1793 nr = blk_rq_sectors(req);
1794
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001795 if (mmc_can_discard(card))
1796 arg = MMC_DISCARD_ARG;
1797 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -07001798 arg = MMC_TRIM_ARG;
1799 else
1800 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +03001801retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001802 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1803 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1804 INAND_CMD38_ARG_EXT_CSD,
1805 arg == MMC_TRIM_ARG ?
1806 INAND_CMD38_ARG_TRIM :
1807 INAND_CMD38_ARG_ERASE,
1808 0);
1809 if (err)
1810 goto out;
1811 }
Adrian Hunterbd788c92010-08-11 14:17:47 -07001812 err = mmc_erase(card, from, nr, arg);
1813out:
Adrian Hunter67716322011-08-29 16:42:15 +03001814 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1815 goto retry;
1816 if (!err)
1817 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301818 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -07001819
Adrian Hunterbd788c92010-08-11 14:17:47 -07001820 return err ? 0 : 1;
1821}
1822
Adrian Hunter49804542010-08-11 14:17:50 -07001823static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1824 struct request *req)
1825{
1826 struct mmc_blk_data *md = mq->data;
1827 struct mmc_card *card = md->queue.card;
Maya Erez775a9362013-04-18 15:41:55 +03001828 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001829 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -07001830
Maya Erez775a9362013-04-18 15:41:55 +03001831 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -07001832 err = -EOPNOTSUPP;
1833 goto out;
1834 }
1835
1836 from = blk_rq_pos(req);
1837 nr = blk_rq_sectors(req);
1838
Maya Erez775a9362013-04-18 15:41:55 +03001839 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1840 arg = MMC_SECURE_TRIM1_ARG;
1841 else
1842 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter28302812012-04-05 14:45:48 +03001843
Adrian Hunter67716322011-08-29 16:42:15 +03001844retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001845 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1846 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1847 INAND_CMD38_ARG_EXT_CSD,
1848 arg == MMC_SECURE_TRIM1_ARG ?
1849 INAND_CMD38_ARG_SECTRIM1 :
1850 INAND_CMD38_ARG_SECERASE,
1851 0);
1852 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001853 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001854 }
Adrian Hunter28302812012-04-05 14:45:48 +03001855
Adrian Hunter49804542010-08-11 14:17:50 -07001856 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +03001857 if (err == -EIO)
1858 goto out_retry;
1859 if (err)
1860 goto out;
1861
1862 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001863 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1864 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1865 INAND_CMD38_ARG_EXT_CSD,
1866 INAND_CMD38_ARG_SECTRIM2,
1867 0);
1868 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001869 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001870 }
Adrian Hunter28302812012-04-05 14:45:48 +03001871
Adrian Hunter49804542010-08-11 14:17:50 -07001872 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +03001873 if (err == -EIO)
1874 goto out_retry;
1875 if (err)
1876 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001877 }
Adrian Hunter28302812012-04-05 14:45:48 +03001878
Adrian Hunter28302812012-04-05 14:45:48 +03001879out_retry:
1880 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03001881 goto retry;
1882 if (!err)
1883 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03001884out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301885 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07001886
Adrian Hunter49804542010-08-11 14:17:50 -07001887 return err ? 0 : 1;
1888}
1889
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001890static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1891{
1892 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001893 struct mmc_card *card = md->queue.card;
1894 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001895
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001896 ret = mmc_flush_cache(card);
Talel Shenhar8a8e3b42015-02-11 12:58:16 +02001897 if (ret == -ENODEV) {
1898 pr_err("%s: %s: restart mmc card",
1899 req->rq_disk->disk_name, __func__);
1900 if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
1901 pr_err("%s: %s: fail to restart mmc",
1902 req->rq_disk->disk_name, __func__);
1903 else
1904 mmc_blk_reset_success(md, MMC_BLK_FLUSH);
1905 }
1906
1907 if (ret) {
1908 pr_err("%s: %s: notify flush error to upper layers",
1909 req->rq_disk->disk_name, __func__);
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001910 ret = -EIO;
Talel Shenhar8a8e3b42015-02-11 12:58:16 +02001911 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001912
Mark Salyzyn6904e432016-01-28 11:12:25 -08001913#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
1914 else if (atomic_read(&mq->cache_size)) {
1915 long used = mmc_blk_cache_used(mq, jiffies);
1916
1917 if (used) {
1918 int speed = atomic_read(&mq->max_write_speed);
1919
1920 if (speed_valid(speed)) {
1921 unsigned long msecs = jiffies_to_msecs(
1922 size_and_speed_to_jiffies(
1923 used, speed));
1924 if (msecs)
1925 msleep(msecs);
1926 }
1927 }
1928 }
1929#endif
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301930 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001931
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001932 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001933}
1934
1935/*
1936 * Reformat current write as a reliable write, supporting
1937 * both legacy and the enhanced reliable write MMC cards.
1938 * In each transfer we'll handle only as much as a single
1939 * reliable write can handle, thus finish the request in
1940 * partial completions.
1941 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001942static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1943 struct mmc_card *card,
1944 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001945{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001946 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1947 /* Legacy mode imposes restrictions on transfers. */
1948 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1949 brq->data.blocks = 1;
1950
1951 if (brq->data.blocks > card->ext_csd.rel_sectors)
1952 brq->data.blocks = card->ext_csd.rel_sectors;
1953 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1954 brq->data.blocks = 1;
1955 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001956}
1957
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001958#define CMD_ERRORS \
1959 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1960 R1_ADDRESS_ERROR | /* Misaligned address */ \
1961 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1962 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1963 R1_CC_ERROR | /* Card controller error */ \
1964 R1_ERROR) /* General/unknown error */
1965
Per Forlinee8a43a2011-07-01 18:55:33 +02001966static int mmc_blk_err_check(struct mmc_card *card,
1967 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02001968{
Per Forlinee8a43a2011-07-01 18:55:33 +02001969 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1970 mmc_active);
1971 struct mmc_blk_request *brq = &mq_mrq->brq;
1972 struct request *req = mq_mrq->req;
Adrian Hunterb8360a42015-05-07 13:10:24 +03001973 int need_retune = card->host->need_retune;
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001974 int ecc_err = 0, gen_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02001975
1976 /*
1977 * sbc.error indicates a problem with the set block count
1978 * command. No data will have been transferred.
1979 *
1980 * cmd.error indicates a problem with the r/w command. No
1981 * data will have been transferred.
1982 *
1983 * stop.error indicates a problem with the stop command. Data
1984 * may have been transferred, or may still be transferring.
1985 */
Adrian Hunter67716322011-08-29 16:42:15 +03001986 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1987 brq->data.error) {
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001988 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02001989 case ERR_RETRY:
1990 return MMC_BLK_RETRY;
1991 case ERR_ABORT:
1992 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301993 case ERR_NOMEDIUM:
1994 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02001995 case ERR_CONTINUE:
1996 break;
1997 }
1998 }
1999
2000 /*
2001 * Check for errors relating to the execution of the
2002 * initial command - such as address errors. No data
2003 * has been transferred.
2004 */
2005 if (brq->cmd.resp[0] & CMD_ERRORS) {
2006 pr_err("%s: r/w command failed, status = %#x\n",
2007 req->rq_disk->disk_name, brq->cmd.resp[0]);
2008 return MMC_BLK_ABORT;
2009 }
2010
2011 /*
2012 * Everything else is either success, or a data error of some
2013 * kind. If it was a write, we may have transitioned to
2014 * program mode, which we have to wait for it to complete.
2015 */
2016 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Ulf Hanssonc49433f2014-01-29 11:01:55 +01002017 int err;
Trey Ramsay8fee4762012-11-16 09:31:41 -06002018
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002019 /* Check stop command response */
2020 if (brq->stop.resp[0] & R1_ERROR) {
2021 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
2022 req->rq_disk->disk_name, __func__,
2023 brq->stop.resp[0]);
2024 gen_err = 1;
2025 }
2026
Ulf Hansson95a91292014-01-29 13:11:27 +01002027 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
2028 &gen_err);
Ulf Hanssonc49433f2014-01-29 11:01:55 +01002029 if (err)
2030 return MMC_BLK_CMD_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02002031 }
2032
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09002033 /* if general error occurs, retry the write operation. */
2034 if (gen_err) {
2035 pr_warn("%s: retrying write for general error\n",
2036 req->rq_disk->disk_name);
2037 return MMC_BLK_RETRY;
2038 }
2039
Per Forlind78d4a82011-07-01 18:55:30 +02002040 if (brq->data.error) {
Adrian Hunterb8360a42015-05-07 13:10:24 +03002041 if (need_retune && !brq->retune_retry_done) {
Russell King09faf612016-01-29 09:44:00 +00002042 pr_debug("%s: retrying because a re-tune was needed\n",
2043 req->rq_disk->disk_name);
Adrian Hunterb8360a42015-05-07 13:10:24 +03002044 brq->retune_retry_done = 1;
2045 return MMC_BLK_RETRY;
2046 }
Per Forlind78d4a82011-07-01 18:55:30 +02002047 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
2048 req->rq_disk->disk_name, brq->data.error,
2049 (unsigned)blk_rq_pos(req),
2050 (unsigned)blk_rq_sectors(req),
2051 brq->cmd.resp[0], brq->stop.resp[0]);
2052
2053 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03002054 if (ecc_err)
2055 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02002056 return MMC_BLK_DATA_ERR;
2057 } else {
2058 return MMC_BLK_CMD_ERR;
2059 }
2060 }
2061
Adrian Hunter67716322011-08-29 16:42:15 +03002062 if (!brq->data.bytes_xfered)
2063 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02002064
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002065 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
2066 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
2067 return MMC_BLK_PARTIAL;
2068 else
2069 return MMC_BLK_SUCCESS;
2070 }
2071
Adrian Hunter67716322011-08-29 16:42:15 +03002072 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
2073 return MMC_BLK_PARTIAL;
2074
2075 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02002076}
2077
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002078static int mmc_blk_packed_err_check(struct mmc_card *card,
2079 struct mmc_async_req *areq)
2080{
2081 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
2082 mmc_active);
2083 struct request *req = mq_rq->req;
2084 struct mmc_packed *packed = mq_rq->packed;
2085 int err, check, status;
2086 u8 *ext_csd;
2087
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002088 packed->retries--;
2089 check = mmc_blk_err_check(card, areq);
2090 err = get_card_status(card, &status, 0);
2091 if (err) {
2092 pr_err("%s: error %d sending status command\n",
2093 req->rq_disk->disk_name, err);
2094 return MMC_BLK_ABORT;
2095 }
2096
2097 if (status & R1_EXCEPTION_EVENT) {
Ulf Hansson86817ff2014-10-17 11:39:05 +02002098 err = mmc_get_ext_csd(card, &ext_csd);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002099 if (err) {
2100 pr_err("%s: error %d sending ext_csd\n",
2101 req->rq_disk->disk_name, err);
Ulf Hansson86817ff2014-10-17 11:39:05 +02002102 return MMC_BLK_ABORT;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002103 }
2104
2105 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
2106 EXT_CSD_PACKED_FAILURE) &&
2107 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2108 EXT_CSD_PACKED_GENERIC_ERROR)) {
2109 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2110 EXT_CSD_PACKED_INDEXED_ERROR) {
2111 packed->idx_failure =
2112 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
2113 check = MMC_BLK_PARTIAL;
2114 }
2115 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
2116 "failure index: %d\n",
2117 req->rq_disk->disk_name, packed->nr_entries,
2118 packed->blocks, packed->idx_failure);
2119 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002120 kfree(ext_csd);
2121 }
2122
2123 return check;
2124}
2125
Per Forlin54d49d72011-07-01 18:55:29 +02002126static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
2127 struct mmc_card *card,
2128 int disable_multi,
2129 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130{
Per Forlin54d49d72011-07-01 18:55:29 +02002131 u32 readcmd, writecmd;
2132 struct mmc_blk_request *brq = &mqrq->brq;
2133 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05302135 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002137 /*
2138 * Reliable writes are used to implement Forced Unit Access and
Luca Porziod3df0462015-11-06 15:12:26 +00002139 * are supported only on MMCs.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002140 */
Luca Porziod3df0462015-11-06 15:12:26 +00002141 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002142 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002143 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002144
Per Forlin54d49d72011-07-01 18:55:29 +02002145 memset(brq, 0, sizeof(struct mmc_blk_request));
2146 brq->mrq.cmd = &brq->cmd;
2147 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Per Forlin54d49d72011-07-01 18:55:29 +02002149 brq->cmd.arg = blk_rq_pos(req);
2150 if (!mmc_card_blockaddr(card))
2151 brq->cmd.arg <<= 9;
2152 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2153 brq->data.blksz = 512;
2154 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2155 brq->stop.arg = 0;
Per Forlin54d49d72011-07-01 18:55:29 +02002156 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
Asutosh Dasf0665412012-07-27 18:10:19 +05302158 brq->data.fault_injected = false;
Per Forlin54d49d72011-07-01 18:55:29 +02002159 /*
2160 * The block layer doesn't support all sector count
2161 * restrictions, so we need to be prepared for too big
2162 * requests.
2163 */
2164 if (brq->data.blocks > card->host->max_blk_count)
2165 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002167 if (brq->data.blocks > 1) {
2168 /*
2169 * After a read error, we redo the request one sector
2170 * at a time in order to accurately determine which
2171 * sectors can be read successfully.
2172 */
2173 if (disable_multi)
2174 brq->data.blocks = 1;
2175
Kuninori Morimoto2e47e842014-09-02 19:08:53 -07002176 /*
2177 * Some controllers have HW issues while operating
2178 * in multiple I/O mode
2179 */
2180 if (card->host->ops->multi_io_quirk)
2181 brq->data.blocks = card->host->ops->multi_io_quirk(card,
2182 (rq_data_dir(req) == READ) ?
2183 MMC_DATA_READ : MMC_DATA_WRITE,
2184 brq->data.blocks);
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002185 }
Per Forlin54d49d72011-07-01 18:55:29 +02002186
2187 if (brq->data.blocks > 1 || do_rel_wr) {
2188 /* SPI multiblock writes terminate using a special
2189 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02002190 */
Per Forlin54d49d72011-07-01 18:55:29 +02002191 if (!mmc_host_is_spi(card->host) ||
2192 rq_data_dir(req) == READ)
2193 brq->mrq.stop = &brq->stop;
2194 readcmd = MMC_READ_MULTIPLE_BLOCK;
2195 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
2196 } else {
2197 brq->mrq.stop = NULL;
2198 readcmd = MMC_READ_SINGLE_BLOCK;
2199 writecmd = MMC_WRITE_BLOCK;
2200 }
2201 if (rq_data_dir(req) == READ) {
2202 brq->cmd.opcode = readcmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002203 brq->data.flags = MMC_DATA_READ;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002204 if (brq->mrq.stop)
2205 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
2206 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002207 } else {
2208 brq->cmd.opcode = writecmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002209 brq->data.flags = MMC_DATA_WRITE;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002210 if (brq->mrq.stop)
2211 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
2212 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002213 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02002214
Per Forlin54d49d72011-07-01 18:55:29 +02002215 if (do_rel_wr)
2216 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01002217
Per Forlin54d49d72011-07-01 18:55:29 +02002218 /*
Saugata Das42659002011-12-21 13:09:17 +05302219 * Data tag is used only during writing meta data to speed
2220 * up write and any subsequent read of this meta data
2221 */
2222 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2223 (req->cmd_flags & REQ_META) &&
2224 (rq_data_dir(req) == WRITE) &&
2225 ((brq->data.blocks * brq->data.blksz) >=
2226 card->ext_csd.data_tag_unit_size);
2227
2228 /*
Per Forlin54d49d72011-07-01 18:55:29 +02002229 * Pre-defined multi-block transfers are preferable to
2230 * open ended-ones (and necessary for reliable writes).
2231 * However, it is not sufficient to just send CMD23,
2232 * and avoid the final CMD12, as on an error condition
2233 * CMD12 (stop) needs to be sent anyway. This, coupled
2234 * with Auto-CMD23 enhancements provided by some
2235 * hosts, means that the complexity of dealing
2236 * with this is best left to the host. If CMD23 is
2237 * supported by card and host, we'll fill sbc in and let
2238 * the host deal with handling it correctly. This means
2239 * that for hosts that don't expose MMC_CAP_CMD23, no
2240 * change of behavior will be observed.
2241 *
2242 * N.B: Some MMC cards experience perf degradation.
2243 * We'll avoid using CMD23-bounded multiblock writes for
2244 * these, while retaining features like reliable writes.
2245 */
Saugata Das42659002011-12-21 13:09:17 +05302246 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
2247 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
2248 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02002249 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2250 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05302251 (do_rel_wr ? (1 << 31) : 0) |
2252 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02002253 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2254 brq->mrq.sbc = &brq->sbc;
2255 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002256
Per Forlin54d49d72011-07-01 18:55:29 +02002257 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002258
Per Forlin54d49d72011-07-01 18:55:29 +02002259 brq->data.sg = mqrq->sg;
2260 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002261
Per Forlin54d49d72011-07-01 18:55:29 +02002262 /*
2263 * Adjust the sg list so it is the same size as the
2264 * request.
2265 */
2266 if (brq->data.blocks != blk_rq_sectors(req)) {
2267 int i, data_size = brq->data.blocks << 9;
2268 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02002269
Per Forlin54d49d72011-07-01 18:55:29 +02002270 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
2271 data_size -= sg->length;
2272 if (data_size <= 0) {
2273 sg->length += data_size;
2274 i++;
2275 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01002276 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002277 }
Per Forlin54d49d72011-07-01 18:55:29 +02002278 brq->data.sg_len = i;
2279 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002280
Per Forlinee8a43a2011-07-01 18:55:33 +02002281 mqrq->mmc_active.mrq = &brq->mrq;
Sahitya Tummalac44de842015-05-08 11:12:30 +05302282 mqrq->mmc_active.mrq->req = mqrq->req;
Per Forlinee8a43a2011-07-01 18:55:33 +02002283 mqrq->mmc_active.err_check = mmc_blk_err_check;
2284
Per Forlin54d49d72011-07-01 18:55:29 +02002285 mmc_queue_bounce_pre(mqrq);
2286}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002288static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
2289 struct mmc_card *card)
2290{
2291 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
2292 unsigned int max_seg_sz = queue_max_segment_size(q);
2293 unsigned int len, nr_segs = 0;
2294
2295 do {
2296 len = min(hdr_sz, max_seg_sz);
2297 hdr_sz -= len;
2298 nr_segs++;
2299 } while (hdr_sz);
2300
2301 return nr_segs;
2302}
2303
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002304/**
2305 * mmc_blk_disable_wr_packing() - disables packing mode
2306 * @mq: MMC queue.
2307 *
2308 */
2309void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
2310{
2311 if (mq) {
2312 mq->wr_packing_enabled = false;
2313 mq->num_of_potential_packed_wr_reqs = 0;
2314 }
2315}
2316EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
2317
Lee Susman841fd132013-04-23 17:59:26 +03002318static int get_packed_trigger(int potential, struct mmc_card *card,
2319 struct request *req, int curr_trigger)
2320{
2321 static int num_mean_elements = 1;
2322 static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2323 unsigned int trigger = curr_trigger;
2324 unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
2325
2326 /* scale down the upper bound to 75% */
2327 pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
2328
2329 /*
2330 * since the most common calls for this function are with small
2331 * potential write values and since we don't want these calls to affect
2332 * the packed trigger, set a lower bound and ignore calls with
2333 * potential lower than that bound
2334 */
2335 if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
2336 return trigger;
2337
2338 /*
2339 * this is to prevent integer overflow in the following calculation:
2340 * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
2341 */
2342 if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
2343 num_mean_elements = 1;
2344 mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2345 }
2346
2347 /*
2348 * get next mean value based on previous mean value and current
2349 * potential packed writes. Calculation is as follows:
2350 * mean_pot[i+1] =
2351 * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
2352 */
2353 mean_potential *= num_mean_elements;
2354 /*
2355 * add num_mean_elements so that the division of two integers doesn't
2356 * lower mean_potential too much
2357 */
2358 if (potential > mean_potential)
2359 mean_potential += num_mean_elements;
2360 mean_potential += potential;
2361 /* this is for gaining more precision when dividing two integers */
2362 mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
2363 /* this completes the mean calculation */
2364 mean_potential /= ++num_mean_elements;
2365 mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
2366
2367 /*
2368 * if current potential packed writes is greater than the mean potential
2369 * then the heuristic is that the following workload will contain many
2370 * write requests, therefore we lower the packed trigger. In the
2371 * opposite case we want to increase the trigger in order to get less
2372 * packing events.
2373 */
2374 if (potential >= mean_potential)
2375 trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
2376 PCKD_TRGR_LOWER_BOUND : trigger - 1;
2377 else
2378 trigger = (trigger >= pckd_trgr_upper_bound) ?
2379 pckd_trgr_upper_bound : trigger + 1;
2380
2381 /*
2382 * an urgent read request indicates a packed list being interrupted
2383 * by this read, therefore we aim for less packing, hence the trigger
2384 * gets increased
2385 */
2386 if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
2387 trigger += PCKD_TRGR_URGENT_PENALTY;
2388
2389 return trigger;
2390}
2391
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002392static void mmc_blk_write_packing_control(struct mmc_queue *mq,
2393 struct request *req)
2394{
2395 struct mmc_host *host = mq->card->host;
2396 int data_dir;
2397
2398 if (!(host->caps2 & MMC_CAP2_PACKED_WR))
2399 return;
2400
Maya Erez8e2b3c32012-12-02 13:27:15 +02002401 /* Support for the write packing on eMMC 4.5 or later */
2402 if (mq->card->ext_csd.rev <= 5)
2403 return;
2404
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002405 /*
2406 * In case the packing control is not supported by the host, it should
2407 * not have an effect on the write packing. Therefore we have to enable
2408 * the write packing
2409 */
2410 if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
2411 mq->wr_packing_enabled = true;
2412 return;
2413 }
2414
2415 if (!req || (req && (req->cmd_flags & REQ_PREFLUSH))) {
2416 if (mq->num_of_potential_packed_wr_reqs >
2417 mq->num_wr_reqs_to_start_packing)
2418 mq->wr_packing_enabled = true;
Lee Susman841fd132013-04-23 17:59:26 +03002419 mq->num_wr_reqs_to_start_packing =
2420 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2421 mq->card, req,
2422 mq->num_wr_reqs_to_start_packing);
Tatyana Brokhman843915a2012-10-07 10:26:27 +02002423 mq->num_of_potential_packed_wr_reqs = 0;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002424 return;
2425 }
2426
2427 data_dir = rq_data_dir(req);
2428
2429 if (data_dir == READ) {
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002430 mmc_blk_disable_wr_packing(mq);
Lee Susman841fd132013-04-23 17:59:26 +03002431 mq->num_wr_reqs_to_start_packing =
2432 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2433 mq->card, req,
2434 mq->num_wr_reqs_to_start_packing);
2435 mq->num_of_potential_packed_wr_reqs = 0;
2436 mq->wr_packing_enabled = false;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002437 return;
2438 } else if (data_dir == WRITE) {
2439 mq->num_of_potential_packed_wr_reqs++;
2440 }
2441
2442 if (mq->num_of_potential_packed_wr_reqs >
2443 mq->num_wr_reqs_to_start_packing)
2444 mq->wr_packing_enabled = true;
2445}
2446
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002447struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
2448{
2449 if (!card)
2450 return NULL;
2451
2452 return &card->wr_pack_stats;
2453}
2454EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
2455
2456void mmc_blk_init_packed_statistics(struct mmc_card *card)
2457{
2458 int max_num_of_packed_reqs = 0;
2459
2460 if (!card || !card->wr_pack_stats.packing_events)
2461 return;
2462
2463 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
2464
2465 spin_lock(&card->wr_pack_stats.lock);
2466 memset(card->wr_pack_stats.packing_events, 0,
2467 (max_num_of_packed_reqs + 1) *
2468 sizeof(*card->wr_pack_stats.packing_events));
2469 memset(&card->wr_pack_stats.pack_stop_reason, 0,
2470 sizeof(card->wr_pack_stats.pack_stop_reason));
2471 card->wr_pack_stats.enabled = true;
2472 spin_unlock(&card->wr_pack_stats.lock);
2473}
2474EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
2475
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002476static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
2477{
2478 struct request_queue *q = mq->queue;
2479 struct mmc_card *card = mq->card;
2480 struct request *cur = req, *next = NULL;
2481 struct mmc_blk_data *md = mq->data;
2482 struct mmc_queue_req *mqrq = mq->mqrq_cur;
2483 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
2484 unsigned int req_sectors = 0, phys_segments = 0;
2485 unsigned int max_blk_count, max_phys_segs;
2486 bool put_back = true;
2487 u8 max_packed_rw = 0;
2488 u8 reqs = 0;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002489 struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002490
Shawn Lin96e52da2016-08-26 08:49:55 +08002491 /*
2492 * We don't need to check packed for any further
2493 * operation of packed stuff as we set MMC_PACKED_NONE
2494 * and return zero for reqs if geting null packed. Also
2495 * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
2496 * it again when removing blk req.
2497 */
2498 if (!mqrq->packed) {
2499 md->flags &= (~MMC_BLK_PACKED_CMD);
2500 goto no_packed;
2501 }
2502
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002503 if (!(md->flags & MMC_BLK_PACKED_CMD))
2504 goto no_packed;
2505
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002506 if (!mq->wr_packing_enabled)
2507 goto no_packed;
2508
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002509 if ((rq_data_dir(cur) == WRITE) &&
2510 mmc_host_packed_wr(card->host))
2511 max_packed_rw = card->ext_csd.max_packed_writes;
2512
2513 if (max_packed_rw == 0)
2514 goto no_packed;
2515
2516 if (mmc_req_rel_wr(cur) &&
2517 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
2518 goto no_packed;
2519
2520 if (mmc_large_sector(card) &&
2521 !IS_ALIGNED(blk_rq_sectors(cur), 8))
2522 goto no_packed;
2523
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002524 if (cur->cmd_flags & REQ_FUA)
2525 goto no_packed;
2526
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002527 mmc_blk_clear_packed(mqrq);
2528
2529 max_blk_count = min(card->host->max_blk_count,
2530 card->host->max_req_size >> 9);
2531 if (unlikely(max_blk_count > 0xffff))
2532 max_blk_count = 0xffff;
2533
2534 max_phys_segs = queue_max_segments(q);
2535 req_sectors += blk_rq_sectors(cur);
2536 phys_segments += cur->nr_phys_segments;
2537
2538 if (rq_data_dir(cur) == WRITE) {
2539 req_sectors += mmc_large_sector(card) ? 8 : 1;
2540 phys_segments += mmc_calc_packed_hdr_segs(q, card);
2541 }
2542
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002543 spin_lock(&stats->lock);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002544 do {
2545 if (reqs >= max_packed_rw - 1) {
2546 put_back = false;
2547 break;
2548 }
2549
2550 spin_lock_irq(q->queue_lock);
2551 next = blk_fetch_request(q);
2552 spin_unlock_irq(q->queue_lock);
2553 if (!next) {
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002554 MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002555 put_back = false;
2556 break;
2557 }
2558
2559 if (mmc_large_sector(card) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002560 !IS_ALIGNED(blk_rq_sectors(next), 8)) {
2561 MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002562 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002563 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002564
Mike Christie3a5e02c2016-06-05 14:32:23 -05002565 if (req_op(next) == REQ_OP_DISCARD ||
Adrian Hunter7afafc82016-08-16 10:59:35 +03002566 req_op(next) == REQ_OP_SECURE_ERASE ||
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002567 req_op(next) == REQ_OP_FLUSH) {
2568 if (req_op(next) != REQ_OP_SECURE_ERASE)
2569 MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002570 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002571 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002572
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002573 if (next->cmd_flags & REQ_FUA) {
2574 MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
2575 break;
2576 }
2577
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002578 if (rq_data_dir(cur) != rq_data_dir(next)) {
2579 MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002580 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002581 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002582
2583 if (mmc_req_rel_wr(next) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002584 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
2585 MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002586 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002587 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002588
2589 req_sectors += blk_rq_sectors(next);
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002590 if (req_sectors > max_blk_count) {
2591 if (stats->enabled)
2592 stats->pack_stop_reason[EXCEEDS_SECTORS]++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002593 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002594 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002595
2596 phys_segments += next->nr_phys_segments;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002597 if (phys_segments > max_phys_segs) {
2598 MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002599 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002600 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002601
Maya Erez5a8dae12014-12-04 15:13:59 +02002602 if (mq->no_pack_for_random) {
2603 if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
2604 blk_rq_pos(next)) {
2605 MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
2606 put_back = 1;
2607 break;
2608 }
2609 }
2610
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002611 if (rq_data_dir(next) == WRITE)
2612 mq->num_of_potential_packed_wr_reqs++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002613 list_add_tail(&next->queuelist, &mqrq->packed->list);
2614 cur = next;
2615 reqs++;
2616 } while (1);
2617
2618 if (put_back) {
2619 spin_lock_irq(q->queue_lock);
2620 blk_requeue_request(q, next);
2621 spin_unlock_irq(q->queue_lock);
2622 }
2623
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002624 if (stats->enabled) {
2625 if (reqs + 1 <= card->ext_csd.max_packed_writes)
2626 stats->packing_events[reqs + 1]++;
2627 if (reqs + 1 == max_packed_rw)
2628 MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
2629 }
2630
2631 spin_unlock(&stats->lock);
2632
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002633 if (reqs > 0) {
2634 list_add(&req->queuelist, &mqrq->packed->list);
2635 mqrq->packed->nr_entries = ++reqs;
2636 mqrq->packed->retries = reqs;
2637 return reqs;
2638 }
2639
2640no_packed:
2641 mqrq->cmd_type = MMC_PACKED_NONE;
2642 return 0;
2643}
2644
2645static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
2646 struct mmc_card *card,
2647 struct mmc_queue *mq)
2648{
2649 struct mmc_blk_request *brq = &mqrq->brq;
2650 struct request *req = mqrq->req;
2651 struct request *prq;
2652 struct mmc_blk_data *md = mq->data;
2653 struct mmc_packed *packed = mqrq->packed;
2654 bool do_rel_wr, do_data_tag;
Jiri Slaby3f2d2662016-10-03 10:58:28 +02002655 __le32 *packed_cmd_hdr;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002656 u8 hdr_blocks;
2657 u8 i = 1;
2658
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002659 mqrq->cmd_type = MMC_PACKED_WRITE;
2660 packed->blocks = 0;
2661 packed->idx_failure = MMC_PACKED_NR_IDX;
2662
2663 packed_cmd_hdr = packed->cmd_hdr;
2664 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002665 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
2666 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002667 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
2668
2669 /*
2670 * Argument for each entry of packed group
2671 */
2672 list_for_each_entry(prq, &packed->list, queuelist) {
2673 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
2674 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2675 (prq->cmd_flags & REQ_META) &&
2676 (rq_data_dir(prq) == WRITE) &&
Adrian Hunterd806b462016-06-10 16:22:16 +03002677 blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002678 /* Argument of CMD23 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002679 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002680 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
2681 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002682 blk_rq_sectors(prq));
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002683 /* Argument of CMD18 or CMD25 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002684 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002685 mmc_card_blockaddr(card) ?
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002686 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002687 packed->blocks += blk_rq_sectors(prq);
2688 i++;
2689 }
2690
2691 memset(brq, 0, sizeof(struct mmc_blk_request));
2692 brq->mrq.cmd = &brq->cmd;
2693 brq->mrq.data = &brq->data;
2694 brq->mrq.sbc = &brq->sbc;
2695 brq->mrq.stop = &brq->stop;
2696
2697 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2698 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2699 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2700
2701 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2702 brq->cmd.arg = blk_rq_pos(req);
2703 if (!mmc_card_blockaddr(card))
2704 brq->cmd.arg <<= 9;
2705 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2706
2707 brq->data.blksz = 512;
2708 brq->data.blocks = packed->blocks + hdr_blocks;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002709 brq->data.flags = MMC_DATA_WRITE;
Asutosh Dasf0665412012-07-27 18:10:19 +05302710 brq->data.fault_injected = false;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002711
2712 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2713 brq->stop.arg = 0;
2714 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2715
2716 mmc_set_data_timeout(&brq->data, card);
2717
2718 brq->data.sg = mqrq->sg;
2719 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2720
2721 mqrq->mmc_active.mrq = &brq->mrq;
Tatyana Brokhman71aefb82012-10-09 13:50:56 +02002722
2723 /*
2724 * This is intended for packed commands tests usage - in case these
2725 * functions are not in use the respective pointers are NULL
2726 */
2727 if (mq->err_check_fn)
2728 mqrq->mmc_active.err_check = mq->err_check_fn;
2729 else
2730 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2731
2732 if (mq->packed_test_fn)
2733 mq->packed_test_fn(mq->queue, mqrq);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002734
2735 mmc_queue_bounce_pre(mqrq);
2736}
2737
Adrian Hunter67716322011-08-29 16:42:15 +03002738static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2739 struct mmc_blk_request *brq, struct request *req,
2740 int ret)
2741{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002742 struct mmc_queue_req *mq_rq;
2743 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2744
Adrian Hunter67716322011-08-29 16:42:15 +03002745 /*
2746 * If this is an SD card and we're writing, we can first
2747 * mark the known good sectors as ok.
2748 *
2749 * If the card is not SD, we can still ok written sectors
2750 * as reported by the controller (which might be less than
2751 * the real number of written sectors, but never more).
2752 */
2753 if (mmc_card_sd(card)) {
2754 u32 blocks;
Asutosh Dasf0665412012-07-27 18:10:19 +05302755 if (!brq->data.fault_injected) {
2756 blocks = mmc_sd_num_wr_blocks(card);
2757 if (blocks != (u32)-1)
2758 ret = blk_end_request(req, 0, blocks << 9);
2759 } else
2760 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002761 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002762 if (!mmc_packed_cmd(mq_rq->cmd_type))
2763 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002764 }
2765 return ret;
2766}
2767
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002768static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2769{
2770 struct request *prq;
2771 struct mmc_packed *packed = mq_rq->packed;
2772 int idx = packed->idx_failure, i = 0;
2773 int ret = 0;
2774
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002775 while (!list_empty(&packed->list)) {
2776 prq = list_entry_rq(packed->list.next);
2777 if (idx == i) {
2778 /* retry from error index */
2779 packed->nr_entries -= idx;
2780 mq_rq->req = prq;
2781 ret = 1;
2782
2783 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2784 list_del_init(&prq->queuelist);
2785 mmc_blk_clear_packed(mq_rq);
2786 }
2787 return ret;
2788 }
2789 list_del_init(&prq->queuelist);
2790 blk_end_request(prq, 0, blk_rq_bytes(prq));
2791 i++;
2792 }
2793
2794 mmc_blk_clear_packed(mq_rq);
2795 return ret;
2796}
2797
2798static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2799{
2800 struct request *prq;
2801 struct mmc_packed *packed = mq_rq->packed;
2802
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002803 while (!list_empty(&packed->list)) {
2804 prq = list_entry_rq(packed->list.next);
2805 list_del_init(&prq->queuelist);
2806 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2807 }
2808
2809 mmc_blk_clear_packed(mq_rq);
2810}
2811
2812static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2813 struct mmc_queue_req *mq_rq)
2814{
2815 struct request *prq;
2816 struct request_queue *q = mq->queue;
2817 struct mmc_packed *packed = mq_rq->packed;
2818
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002819 while (!list_empty(&packed->list)) {
2820 prq = list_entry_rq(packed->list.prev);
2821 if (prq->queuelist.prev != &packed->list) {
2822 list_del_init(&prq->queuelist);
2823 spin_lock_irq(q->queue_lock);
2824 blk_requeue_request(mq->queue, prq);
2825 spin_unlock_irq(q->queue_lock);
2826 } else {
2827 list_del_init(&prq->queuelist);
2828 }
2829 }
2830
2831 mmc_blk_clear_packed(mq_rq);
2832}
2833
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07002834static int mmc_blk_cmdq_start_req(struct mmc_host *host,
2835 struct mmc_cmdq_req *cmdq_req)
2836{
2837 struct mmc_request *mrq = &cmdq_req->mrq;
2838
2839 mrq->done = mmc_blk_cmdq_req_done;
2840 return mmc_cmdq_start_req(host, cmdq_req);
2841}
2842
2843#define IS_RT_CLASS_REQ(x) \
2844 (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
2845
2846static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
2847 struct mmc_queue_req *mqrq, struct mmc_queue *mq)
2848{
2849 struct mmc_card *card = mq->card;
2850 struct request *req = mqrq->req;
2851 struct mmc_blk_data *md = mq->data;
2852 bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
2853 bool do_data_tag;
2854 bool read_dir = (rq_data_dir(req) == READ);
2855 bool prio = IS_RT_CLASS_REQ(req);
2856 struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
2857
2858 memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
2859
2860 cmdq_rq->tag = req->tag;
2861 if (read_dir) {
2862 cmdq_rq->cmdq_req_flags |= DIR;
2863 cmdq_rq->data.flags = MMC_DATA_READ;
2864 } else {
2865 cmdq_rq->data.flags = MMC_DATA_WRITE;
2866 }
2867 if (prio)
2868 cmdq_rq->cmdq_req_flags |= PRIO;
2869
2870 if (do_rel_wr)
2871 cmdq_rq->cmdq_req_flags |= REL_WR;
2872
2873 cmdq_rq->data.blocks = blk_rq_sectors(req);
2874 cmdq_rq->blk_addr = blk_rq_pos(req);
2875 cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
2876
2877 mmc_set_data_timeout(&cmdq_rq->data, card);
2878
2879 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2880 (req->cmd_flags & REQ_META) &&
2881 (rq_data_dir(req) == WRITE) &&
2882 ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
2883 card->ext_csd.data_tag_unit_size);
2884 if (do_data_tag)
2885 cmdq_rq->cmdq_req_flags |= DAT_TAG;
2886 cmdq_rq->data.sg = mqrq->sg;
2887 cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2888
2889 /*
2890 * Adjust the sg list so it is the same size as the
2891 * request.
2892 */
2893 if (cmdq_rq->data.blocks > card->host->max_blk_count)
2894 cmdq_rq->data.blocks = card->host->max_blk_count;
2895
2896 if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
2897 int i, data_size = cmdq_rq->data.blocks << 9;
2898 struct scatterlist *sg;
2899
2900 for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
2901 data_size -= sg->length;
2902 if (data_size <= 0) {
2903 sg->length += data_size;
2904 i++;
2905 break;
2906 }
2907 }
2908 cmdq_rq->data.sg_len = i;
2909 }
2910
2911 mqrq->cmdq_req.cmd_flags = req->cmd_flags;
2912 mqrq->cmdq_req.mrq.req = mqrq->req;
2913 mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
2914 mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
2915 mqrq->req->special = mqrq;
2916
2917 pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
2918 mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
2919 mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
2920 cmdq_rq, cmdq_rq->blk_addr,
2921 (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
2922
2923 return &mqrq->cmdq_req;
2924}
2925
2926static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
2927{
2928 struct mmc_queue_req *active_mqrq;
2929 struct mmc_card *card = mq->card;
2930 struct mmc_host *host = card->host;
2931 struct mmc_cmdq_req *mc_rq;
2932 int ret = 0;
2933
2934 BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
2935 BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
2936
2937 active_mqrq = &mq->mqrq_cmdq[req->tag];
2938 active_mqrq->req = req;
2939
2940 mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
2941
2942 ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
2943 return ret;
2944}
2945
2946/* invoked by block layer in softirq context */
2947void mmc_blk_cmdq_complete_rq(struct request *rq)
2948{
2949 struct mmc_queue_req *mq_rq = rq->special;
2950 struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
2951 struct mmc_host *host = mrq->host;
2952 struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
2953 struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
2954 struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
2955 int err = 0;
2956
2957 if (mrq->cmd && mrq->cmd->error)
2958 err = mrq->cmd->error;
2959 else if (mrq->data && mrq->data->error)
2960 err = mrq->data->error;
2961
2962 mmc_cmdq_post_req(host, mrq, err);
2963 if (err) {
2964 pr_err("%s: %s: txfr error: %d\n", mmc_hostname(mrq->host),
2965 __func__, err);
2966 set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
2967 WARN_ON(1);
2968 }
2969
2970 BUG_ON(!test_and_clear_bit(cmdq_req->tag,
2971 &ctx_info->active_reqs));
2972
2973 blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
2974
2975 if (test_and_clear_bit(0, &ctx_info->req_starved))
2976 blk_run_queue(mq->queue);
2977
2978 mmc_release_host(host);
2979 return;
2980}
2981
2982/*
2983 * Complete reqs from block layer softirq context
2984 * Invoked in irq context
2985 */
2986void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
2987{
2988 struct request *req = mrq->req;
2989
2990 blk_complete_request(req);
2991}
2992EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
2993
Per Forlinee8a43a2011-07-01 18:55:33 +02002994static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02002995{
2996 struct mmc_blk_data *md = mq->data;
2997 struct mmc_card *card = md->queue.card;
2998 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunterb8360a42015-05-07 13:10:24 +03002999 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02003000 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02003001 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05303002 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02003003 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003004 const u8 packed_nr = 2;
3005 u8 reqs = 0;
Mark Salyzyn6904e432016-01-28 11:12:25 -08003006#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3007 unsigned long waitfor = jiffies;
3008#endif
Per Forlinee8a43a2011-07-01 18:55:33 +02003009
3010 if (!rqc && !mq->mqrq_prev->req)
3011 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02003012
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003013 if (rqc)
3014 reqs = mmc_blk_prep_packed_list(mq, rqc);
3015
Per Forlin54d49d72011-07-01 18:55:29 +02003016 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02003017 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05303018 /*
3019 * When 4KB native sector is enabled, only 8 blocks
3020 * multiple read or write is allowed
3021 */
Yuan, Juntaoe87c8562016-05-13 07:59:24 +00003022 if (mmc_large_sector(card) &&
3023 !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05303024 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
3025 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003026 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05303027 goto cmd_abort;
3028 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003029
3030 if (reqs >= packed_nr)
3031 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
3032 card, mq);
3033 else
3034 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02003035 areq = &mq->mqrq_cur->mmc_active;
3036 } else
3037 areq = NULL;
3038 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003039 if (!areq) {
3040 if (status == MMC_BLK_NEW_REQUEST)
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003041 set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Per Forlinee8a43a2011-07-01 18:55:33 +02003042 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003043 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02003044
Per Forlinee8a43a2011-07-01 18:55:33 +02003045 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
3046 brq = &mq_rq->brq;
3047 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03003048 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02003049 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02003050
Per Forlind78d4a82011-07-01 18:55:30 +02003051 switch (status) {
3052 case MMC_BLK_SUCCESS:
3053 case MMC_BLK_PARTIAL:
3054 /*
3055 * A block was successfully transferred.
3056 */
Adrian Hunter67716322011-08-29 16:42:15 +03003057 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003058
Mark Salyzyn6904e432016-01-28 11:12:25 -08003059 mmc_blk_simulate_delay(mq, rqc, waitfor);
3060
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003061 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3062 ret = mmc_blk_end_packed_req(mq_rq);
3063 break;
3064 } else {
3065 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02003066 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003067 }
3068
Adrian Hunter67716322011-08-29 16:42:15 +03003069 /*
3070 * If the blk_end_request function returns non-zero even
3071 * though all data has been transferred and no errors
3072 * were returned by the host controller, it's a bug.
3073 */
Per Forlinee8a43a2011-07-01 18:55:33 +02003074 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05303075 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02003076 __func__, blk_rq_bytes(req),
3077 brq->data.bytes_xfered);
3078 rqc = NULL;
3079 goto cmd_abort;
3080 }
Per Forlind78d4a82011-07-01 18:55:30 +02003081 break;
3082 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03003083 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
Ding Wang29535f72015-05-18 20:14:15 +08003084 if (mmc_blk_reset(md, card->host, type))
3085 goto cmd_abort;
3086 if (!ret)
3087 goto start_new_req;
3088 break;
Per Forlind78d4a82011-07-01 18:55:30 +02003089 case MMC_BLK_RETRY:
Adrian Hunterb8360a42015-05-07 13:10:24 +03003090 retune_retry_done = brq->retune_retry_done;
Maya Erezf93ca0a2014-12-09 23:34:41 +02003091 if (retry++ < MMC_BLK_MAX_RETRIES)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01003092 break;
Adrian Hunter67716322011-08-29 16:42:15 +03003093 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02003094 case MMC_BLK_ABORT:
Maya Erezf93ca0a2014-12-09 23:34:41 +02003095 if (!mmc_blk_reset(md, card->host, type) &&
3096 (retry++ < (MMC_BLK_MAX_RETRIES + 1)))
Adrian Hunter67716322011-08-29 16:42:15 +03003097 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01003098 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03003099 case MMC_BLK_DATA_ERR: {
3100 int err;
3101
3102 err = mmc_blk_reset(md, card->host, type);
3103 if (!err)
3104 break;
Sahitya Tummalad0a19842014-10-31 09:46:20 +05303105 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03003106 }
3107 case MMC_BLK_ECC_ERR:
3108 if (brq->data.blocks > 1) {
3109 /* Redo read one sector at a time */
Joe Perches66061102014-09-12 14:56:56 -07003110 pr_warn("%s: retrying using single block read\n",
3111 req->rq_disk->disk_name);
Adrian Hunter67716322011-08-29 16:42:15 +03003112 disable_multi = 1;
3113 break;
3114 }
Per Forlind78d4a82011-07-01 18:55:30 +02003115 /*
3116 * After an error, we redo I/O one sector at a
3117 * time, so we only reach here after trying to
3118 * read a single sector.
3119 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05303120 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02003121 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02003122 if (!ret)
3123 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02003124 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05303125 case MMC_BLK_NOMEDIUM:
3126 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003127 default:
3128 pr_err("%s: Unhandled return value (%d)",
3129 req->rq_disk->disk_name, status);
3130 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01003131 }
3132
Per Forlinee8a43a2011-07-01 18:55:33 +02003133 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003134 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3135 if (!mq_rq->packed->retries)
3136 goto cmd_abort;
3137 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
3138 mmc_start_req(card->host,
3139 &mq_rq->mmc_active, NULL);
3140 } else {
3141
3142 /*
3143 * In case of a incomplete request
3144 * prepare it again and resend.
3145 */
3146 mmc_blk_rw_rq_prep(mq_rq, card,
3147 disable_multi, mq);
3148 mmc_start_req(card->host,
3149 &mq_rq->mmc_active, NULL);
3150 }
Adrian Hunterb8360a42015-05-07 13:10:24 +03003151 mq_rq->brq.retune_retry_done = retune_retry_done;
Per Forlinee8a43a2011-07-01 18:55:33 +02003152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 } while (ret);
3154
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 return 1;
3156
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01003157 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003158 if (mmc_packed_cmd(mq_rq->cmd_type)) {
3159 mmc_blk_abort_packed_req(mq_rq);
3160 } else {
3161 if (mmc_card_removed(card))
3162 req->cmd_flags |= REQ_QUIET;
3163 while (ret)
3164 ret = blk_end_request(req, -EIO,
3165 blk_rq_cur_bytes(req));
3166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
Per Forlinee8a43a2011-07-01 18:55:33 +02003168 start_new_req:
3169 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09003170 if (mmc_card_removed(card)) {
3171 rqc->cmd_flags |= REQ_QUIET;
3172 blk_end_request_all(rqc, -EIO);
3173 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003174 /*
3175 * If current request is packed, it needs to put back.
3176 */
3177 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
3178 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
3179
Seungwon Jeon7a819022013-01-22 19:48:07 +09003180 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
3181 mmc_start_req(card->host,
3182 &mq->mqrq_cur->mmc_active, NULL);
3183 }
Per Forlinee8a43a2011-07-01 18:55:33 +02003184 }
3185
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 return 0;
3187}
3188
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003189static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
3190{
3191 int ret;
3192 struct mmc_blk_data *md = mq->data;
3193 struct mmc_card *card = md->queue.card;
3194
3195 mmc_claim_host(card->host);
3196 ret = mmc_blk_part_switch(card, md);
3197 if (ret) {
3198 pr_err("%s: %s: partition switch failed %d\n",
3199 md->disk->disk_name, __func__, ret);
3200 blk_end_request_all(req, ret);
3201 mmc_release_host(card->host);
3202 goto switch_failure;
3203 }
3204
3205 ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
3206
3207switch_failure:
3208 return ret;
3209}
3210
Linus Walleij29eb7bd2016-09-20 11:34:38 +02003211int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
Adrian Hunterbd788c92010-08-11 14:17:47 -07003212{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003213 int ret;
3214 struct mmc_blk_data *md = mq->data;
3215 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003216 struct mmc_host *host = card->host;
3217 unsigned long flags;
Adrian Hunter869c5542016-08-25 14:11:43 -06003218 bool req_is_special = mmc_req_is_special(req);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003219
Per Forlinee8a43a2011-07-01 18:55:33 +02003220 if (req && !mq->mqrq_prev->req)
3221 /* claim host only for the first request */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003222 mmc_get_card(card);
Per Forlinee8a43a2011-07-01 18:55:33 +02003223
Andrei Warkentin371a6892011-04-11 18:10:25 -05003224 ret = mmc_blk_part_switch(card, md);
3225 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03003226 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05303227 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03003228 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003229 ret = 0;
3230 goto out;
3231 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003232
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003233 mmc_blk_write_packing_control(mq, req);
3234
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003235 clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Mike Christiec2df40d2016-06-05 14:32:17 -05003236 if (req && req_op(req) == REQ_OP_DISCARD) {
Per Forlinee8a43a2011-07-01 18:55:33 +02003237 /* complete ongoing async transfer before issuing discard */
3238 if (card->host->areq)
3239 mmc_blk_issue_rw_rq(mq, NULL);
Christoph Hellwig288dab82016-06-09 16:00:36 +02003240 ret = mmc_blk_issue_discard_rq(mq, req);
3241 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
3242 /* complete ongoing async transfer before issuing secure erase*/
3243 if (card->host->areq)
3244 mmc_blk_issue_rw_rq(mq, NULL);
Maya Erez0c0609f2014-12-09 23:31:55 +02003245 if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3246 ret = mmc_blk_issue_secdiscard_rq(mq, req);
3247 else
3248 ret = mmc_blk_issue_discard_rq(mq, req);
Mike Christie3a5e02c2016-06-05 14:32:23 -05003249 } else if (req && req_op(req) == REQ_OP_FLUSH) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09003250 /* complete ongoing async transfer before issuing flush */
3251 if (card->host->areq)
3252 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003253 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003254 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05003255 if (!req && host->areq) {
3256 spin_lock_irqsave(&host->context_info.lock, flags);
3257 host->context_info.is_waiting_last_req = true;
3258 spin_unlock_irqrestore(&host->context_info.lock, flags);
3259 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003260 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003261 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003262
Andrei Warkentin371a6892011-04-11 18:10:25 -05003263out:
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003264 if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
3265 req_is_special)
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09003266 /*
3267 * Release host when there are no more requests
3268 * and after special request(discard, flush) is done.
3269 * In case sepecial request, there is no reentry to
3270 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
3271 */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003272 mmc_put_card(card);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003273 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07003274}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
Russell Kinga6f6c962006-01-03 22:38:44 +00003276static inline int mmc_blk_readonly(struct mmc_card *card)
3277{
3278 return mmc_card_readonly(card) ||
3279 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
3280}
3281
Andrei Warkentin371a6892011-04-11 18:10:25 -05003282static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3283 struct device *parent,
3284 sector_t size,
3285 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003286 const char *subname,
3287 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288{
3289 struct mmc_blk_data *md;
3290 int devidx, ret;
3291
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003292again:
3293 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
3294 return ERR_PTR(-ENOMEM);
3295
3296 spin_lock(&mmc_blk_lock);
3297 ret = ida_get_new(&mmc_blk_ida, &devidx);
3298 spin_unlock(&mmc_blk_lock);
3299
3300 if (ret == -EAGAIN)
3301 goto again;
3302 else if (ret)
3303 return ERR_PTR(ret);
3304
3305 if (devidx >= max_devices) {
3306 ret = -ENOSPC;
3307 goto out;
3308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07003310 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00003311 if (!md) {
3312 ret = -ENOMEM;
3313 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 }
Russell Kinga6f6c962006-01-03 22:38:44 +00003315
Johan Rudholmadd710e2011-12-02 08:51:06 +01003316 md->area_type = area_type;
3317
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003318 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00003319 * Set the read-only status based on the supported commands
3320 * and the write protect switch.
3321 */
3322 md->read_only = mmc_blk_readonly(card);
3323
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003324 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00003325 if (md->disk == NULL) {
3326 ret = -ENOMEM;
3327 goto err_kfree;
3328 }
3329
3330 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003331 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00003332 md->usage = 1;
3333
Asutosh Das963469b2015-05-21 13:29:51 +05303334 ret = mmc_init_queue(&md->queue, card, &md->lock, subname, area_type);
Russell Kinga6f6c962006-01-03 22:38:44 +00003335 if (ret)
3336 goto err_putdisk;
3337
Russell Kinga6f6c962006-01-03 22:38:44 +00003338 md->queue.data = md;
3339
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003340 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003341 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00003342 md->disk->fops = &mmc_bdops;
3343 md->disk->private_data = md;
3344 md->disk->queue = md->queue.queue;
Dan Williams307d8e62016-06-20 10:40:44 -07003345 md->parent = parent;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003346 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Cross382c55f2015-10-22 10:00:41 -07003347 md->disk->flags = GENHD_FL_EXT_DEVT;
Ulf Hanssonf5b4d712014-09-03 11:02:23 +02003348 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
Loic Pallardy53d8f972012-08-06 17:12:28 +02003349 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00003350
3351 /*
3352 * As discussed on lkml, GENHD_FL_REMOVABLE should:
3353 *
3354 * - be set for removable media with permanent block devices
3355 * - be unset for removable block devices with permanent media
3356 *
3357 * Since MMC block devices clearly fall under the second
3358 * case, we do not set GENHD_FL_REMOVABLE. Userspace
3359 * should use the block device creation/destruction hotplug
3360 * messages to tell when the card is present.
3361 */
3362
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003363 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
Ulf Hansson9aaf3432016-04-06 16:12:08 +02003364 "mmcblk%u%s", card->host->index, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00003365
Saugata Dasa5075eb2012-05-17 16:32:21 +05303366 if (mmc_card_mmc(card))
3367 blk_queue_logical_block_size(md->queue.queue,
3368 card->ext_csd.data_sector_size);
3369 else
3370 blk_queue_logical_block_size(md->queue.queue, 512);
3371
Andrei Warkentin371a6892011-04-11 18:10:25 -05003372 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003373
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003374 if (mmc_host_cmd23(card->host)) {
Daniel Glöckner0ed50ab2016-08-30 14:17:30 +02003375 if ((mmc_card_mmc(card) &&
3376 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003377 (mmc_card_sd(card) &&
3378 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
3379 md->flags |= MMC_BLK_CMD23;
3380 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003381
3382 if (mmc_card_mmc(card) &&
3383 md->flags & MMC_BLK_CMD23 &&
3384 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003385 card->ext_csd.rel_sectors) && !card->cmdq_init) {
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003386 md->flags |= MMC_BLK_REL_WR;
Jens Axboee9d5c742016-03-30 10:17:20 -06003387 blk_queue_write_cache(md->queue.queue, true, true);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003388 }
3389
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003390 if (card->cmdq_init) {
3391 md->flags |= MMC_BLK_CMD_QUEUE;
3392 md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
3393 md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
3394 }
3395
3396 if (mmc_card_mmc(card) && !card->cmdq_init &&
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003397 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
3398 (md->flags & MMC_BLK_CMD23) &&
3399 card->ext_csd.packed_event_en) {
3400 if (!mmc_packed_init(&md->queue, card))
3401 md->flags |= MMC_BLK_PACKED_CMD;
3402 }
3403
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00003405
3406 err_putdisk:
3407 put_disk(md->disk);
3408 err_kfree:
3409 kfree(md);
3410 out:
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003411 spin_lock(&mmc_blk_lock);
3412 ida_remove(&mmc_blk_ida, devidx);
3413 spin_unlock(&mmc_blk_lock);
Russell Kinga6f6c962006-01-03 22:38:44 +00003414 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415}
3416
Andrei Warkentin371a6892011-04-11 18:10:25 -05003417static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
3418{
3419 sector_t size;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003420
3421 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
3422 /*
3423 * The EXT_CSD sector count is in number or 512 byte
3424 * sectors.
3425 */
3426 size = card->ext_csd.sectors;
3427 } else {
3428 /*
3429 * The CSD capacity field is in units of read_blkbits.
3430 * set_capacity takes units of 512 bytes.
3431 */
Kuninori Morimoto087de9e2015-05-11 07:35:28 +00003432 size = (typeof(sector_t))card->csd.capacity
3433 << (card->csd.read_blkbits - 9);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003434 }
3435
Tobias Klauser7a30f2a2015-01-21 15:56:44 +01003436 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003437 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003438}
3439
3440static int mmc_blk_alloc_part(struct mmc_card *card,
3441 struct mmc_blk_data *md,
3442 unsigned int part_type,
3443 sector_t size,
3444 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003445 const char *subname,
3446 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05003447{
3448 char cap_str[10];
3449 struct mmc_blk_data *part_md;
3450
3451 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003452 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003453 if (IS_ERR(part_md))
3454 return PTR_ERR(part_md);
3455 part_md->part_type = part_type;
3456 list_add(&part_md->part, &md->part);
3457
James Bottomleyb9f28d82015-03-05 18:47:01 -08003458 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
Andrei Warkentin371a6892011-04-11 18:10:25 -05003459 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05303460 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05003461 part_md->disk->disk_name, mmc_card_id(card),
3462 mmc_card_name(card), part_md->part_type, cap_str);
3463 return 0;
3464}
3465
Namjae Jeone0c368d2011-10-06 23:41:38 +09003466/* MMC Physical partitions consist of two boot partitions and
3467 * up to four general purpose partitions.
3468 * For each partition enabled in EXT_CSD a block device will be allocatedi
3469 * to provide access to the partition.
3470 */
3471
Andrei Warkentin371a6892011-04-11 18:10:25 -05003472static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
3473{
Namjae Jeone0c368d2011-10-06 23:41:38 +09003474 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003475
3476 if (!mmc_card_mmc(card))
3477 return 0;
3478
Namjae Jeone0c368d2011-10-06 23:41:38 +09003479 for (idx = 0; idx < card->nr_parts; idx++) {
3480 if (card->part[idx].size) {
3481 ret = mmc_blk_alloc_part(card, md,
3482 card->part[idx].part_cfg,
3483 card->part[idx].size >> 9,
3484 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003485 card->part[idx].name,
3486 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09003487 if (ret)
3488 return ret;
3489 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003490 }
3491
3492 return ret;
3493}
3494
Andrei Warkentin371a6892011-04-11 18:10:25 -05003495static void mmc_blk_remove_req(struct mmc_blk_data *md)
3496{
Johan Rudholmadd710e2011-12-02 08:51:06 +01003497 struct mmc_card *card;
3498
Andrei Warkentin371a6892011-04-11 18:10:25 -05003499 if (md) {
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003500 /*
3501 * Flush remaining requests and free queues. It
3502 * is freeing the queue that stops new requests
3503 * from being accepted.
3504 */
Franck Jullien8efb83a2013-07-24 15:17:48 +02003505 card = md->queue.card;
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003506 mmc_cleanup_queue(&md->queue);
3507 if (md->flags & MMC_BLK_PACKED_CMD)
3508 mmc_packed_clean(&md->queue);
Venkat Gopalakrishnane95d7bf2015-05-29 16:51:43 -07003509 if (md->flags & MMC_BLK_CMD_QUEUE)
3510 mmc_cmdq_clean(&md->queue, card);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003511 device_remove_file(disk_to_dev(md->disk),
3512 &md->num_wr_reqs_to_start_packing);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003513 if (md->disk->flags & GENHD_FL_UP) {
3514 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003515 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
3516 card->ext_csd.boot_ro_lockable)
3517 device_remove_file(disk_to_dev(md->disk),
3518 &md->power_ro_lock);
Mark Salyzyn6904e432016-01-28 11:12:25 -08003519#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3520 device_remove_file(disk_to_dev(md->disk),
3521 &dev_attr_max_write_speed);
3522 device_remove_file(disk_to_dev(md->disk),
3523 &dev_attr_max_read_speed);
3524 device_remove_file(disk_to_dev(md->disk),
3525 &dev_attr_cache_size);
3526#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05003527
Andrei Warkentin371a6892011-04-11 18:10:25 -05003528 del_gendisk(md->disk);
3529 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003530 mmc_blk_put(md);
3531 }
3532}
3533
3534static void mmc_blk_remove_parts(struct mmc_card *card,
3535 struct mmc_blk_data *md)
3536{
3537 struct list_head *pos, *q;
3538 struct mmc_blk_data *part_md;
3539
3540 list_for_each_safe(pos, q, &md->part) {
3541 part_md = list_entry(pos, struct mmc_blk_data, part);
3542 list_del(pos);
3543 mmc_blk_remove_req(part_md);
3544 }
3545}
3546
3547static int mmc_add_disk(struct mmc_blk_data *md)
3548{
3549 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01003550 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003551
Dan Williams307d8e62016-06-20 10:40:44 -07003552 device_add_disk(md->parent, md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003553 md->force_ro.show = force_ro_show;
3554 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05303555 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003556 md->force_ro.attr.name = "force_ro";
3557 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
3558 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
3559 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01003560 goto force_ro_fail;
Mark Salyzyn6904e432016-01-28 11:12:25 -08003561#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3562 atomic_set(&md->queue.max_write_speed, max_write_speed);
3563 ret = device_create_file(disk_to_dev(md->disk),
3564 &dev_attr_max_write_speed);
3565 if (ret)
3566 goto max_write_speed_fail;
3567 atomic_set(&md->queue.max_read_speed, max_read_speed);
3568 ret = device_create_file(disk_to_dev(md->disk),
3569 &dev_attr_max_read_speed);
3570 if (ret)
3571 goto max_read_speed_fail;
3572 atomic_set(&md->queue.cache_size, cache_size);
3573 atomic_long_set(&md->queue.cache_used, 0);
3574 md->queue.cache_jiffies = jiffies;
3575 ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
3576 if (ret)
3577 goto cache_size_fail;
3578#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01003579
3580 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
3581 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04003582 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01003583
3584 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
3585 mode = S_IRUGO;
3586 else
3587 mode = S_IRUGO | S_IWUSR;
3588
3589 md->power_ro_lock.show = power_ro_lock_show;
3590 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01003591 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003592 md->power_ro_lock.attr.mode = mode;
3593 md->power_ro_lock.attr.name =
3594 "ro_lock_until_next_power_on";
3595 ret = device_create_file(disk_to_dev(md->disk),
3596 &md->power_ro_lock);
3597 if (ret)
3598 goto power_ro_lock_fail;
3599 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003600
3601 md->num_wr_reqs_to_start_packing.show =
3602 num_wr_reqs_to_start_packing_show;
3603 md->num_wr_reqs_to_start_packing.store =
3604 num_wr_reqs_to_start_packing_store;
3605 sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
3606 md->num_wr_reqs_to_start_packing.attr.name =
3607 "num_wr_reqs_to_start_packing";
3608 md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
3609 ret = device_create_file(disk_to_dev(md->disk),
3610 &md->num_wr_reqs_to_start_packing);
3611 if (ret)
Maya Erez17022402014-12-04 00:15:42 +02003612 goto num_wr_reqs_to_start_packing_fail;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003613
Maya Erez5a8dae12014-12-04 15:13:59 +02003614 md->no_pack_for_random.show = no_pack_for_random_show;
3615 md->no_pack_for_random.store = no_pack_for_random_store;
3616 sysfs_attr_init(&md->no_pack_for_random.attr);
3617 md->no_pack_for_random.attr.name = "no_pack_for_random";
3618 md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
3619 ret = device_create_file(disk_to_dev(md->disk),
3620 &md->no_pack_for_random);
3621 if (ret)
3622 goto no_pack_for_random_fails;
3623
Johan Rudholmadd710e2011-12-02 08:51:06 +01003624 return ret;
3625
Maya Erez5a8dae12014-12-04 15:13:59 +02003626no_pack_for_random_fails:
3627 device_remove_file(disk_to_dev(md->disk),
3628 &md->num_wr_reqs_to_start_packing);
Maya Erez17022402014-12-04 00:15:42 +02003629num_wr_reqs_to_start_packing_fail:
3630 device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003631power_ro_lock_fail:
Mark Salyzyn6904e432016-01-28 11:12:25 -08003632#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3633 device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
3634cache_size_fail:
3635 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
3636max_read_speed_fail:
3637 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
3638max_write_speed_fail:
3639#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01003640 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
3641force_ro_fail:
3642 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003643
3644 return ret;
3645}
3646
Andrei Warkentin6f60c222011-04-11 19:11:04 -04003647static const struct mmc_fixup blk_fixups[] =
3648{
Chris Ballc59d4472011-11-11 22:01:43 -05003649 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
3650 MMC_QUIRK_INAND_CMD38),
3651 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
3652 MMC_QUIRK_INAND_CMD38),
3653 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
3654 MMC_QUIRK_INAND_CMD38),
3655 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
3656 MMC_QUIRK_INAND_CMD38),
3657 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
3658 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003659
3660 /*
3661 * Some MMC cards experience performance degradation with CMD23
3662 * instead of CMD12-bounded multiblock transfers. For now we'll
3663 * black list what's bad...
3664 * - Certain Toshiba cards.
3665 *
3666 * N.B. This doesn't affect SD cards.
3667 */
Yangbo Lu7d70d472015-07-10 11:44:03 +08003668 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
3669 MMC_QUIRK_BLK_NO_CMD23),
3670 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
3671 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05003672 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003673 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05003674 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003675 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05003676 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003677 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003678
3679 /*
Matt Gumbel32ecd322016-05-20 10:33:46 +03003680 * Some MMC cards need longer data read timeout than indicated in CSD.
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003681 */
Chris Ballc59d4472011-11-11 22:01:43 -05003682 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003683 MMC_QUIRK_LONG_READ_TIME),
Matt Gumbel32ecd322016-05-20 10:33:46 +03003684 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3685 MMC_QUIRK_LONG_READ_TIME),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003686
Ian Chen3550ccd2012-08-29 15:05:36 +09003687 /*
Guoping Yu3c984a92014-08-06 12:44:55 +08003688 * Some Samsung MMC cards need longer data read timeout than
3689 * indicated in CSD.
3690 */
3691 MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
3692 MMC_QUIRK_LONG_READ_TIME),
3693
3694 /*
Ian Chen3550ccd2012-08-29 15:05:36 +09003695 * On these Samsung MoviNAND parts, performing secure erase or
3696 * secure trim can result in unrecoverable corruption due to a
3697 * firmware bug.
3698 */
3699 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3700 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3701 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3702 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3703 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3704 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3705 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3706 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3707 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3708 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3709 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3710 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3711 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3712 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3713 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3714 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3715
Shawn Linb5b4ff02015-08-12 13:08:32 +08003716 /*
3717 * On Some Kingston eMMCs, performing trim can result in
3718 * unrecoverable data conrruption occasionally due to a firmware bug.
3719 */
3720 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
3721 MMC_QUIRK_TRIM_BROKEN),
3722 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
3723 MMC_QUIRK_TRIM_BROKEN),
3724
Pratibhasagar V8d664e32014-12-03 18:26:42 +02003725 /* Some INAND MCP devices advertise incorrect timeout values */
3726 MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
3727 MMC_QUIRK_INAND_DATA_TIMEOUT),
3728
Andrei Warkentin6f60c222011-04-11 19:11:04 -04003729 END_FIXUP
3730};
3731
Ulf Hansson96541ba2015-04-14 13:06:12 +02003732static int mmc_blk_probe(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733{
Andrei Warkentin371a6892011-04-11 18:10:25 -05003734 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02003735 char cap_str[10];
3736
Pierre Ossman912490d2005-05-21 10:27:02 +01003737 /*
3738 * Check that the card supports the command class(es) we need.
3739 */
3740 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 return -ENODEV;
3742
Lukas Czerner5204d002014-06-18 13:18:07 +02003743 mmc_fixup_device(card, blk_fixups);
3744
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 md = mmc_blk_alloc(card);
3746 if (IS_ERR(md))
3747 return PTR_ERR(md);
3748
James Bottomleyb9f28d82015-03-05 18:47:01 -08003749 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02003750 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05303751 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02003753 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754
Andrei Warkentin371a6892011-04-11 18:10:25 -05003755 if (mmc_blk_alloc_parts(card, md))
3756 goto out;
3757
Ulf Hansson96541ba2015-04-14 13:06:12 +02003758 dev_set_drvdata(&card->dev, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04003759
Andrei Warkentin371a6892011-04-11 18:10:25 -05003760 if (mmc_add_disk(md))
3761 goto out;
3762
3763 list_for_each_entry(part_md, &md->part, part) {
3764 if (mmc_add_disk(part_md))
3765 goto out;
3766 }
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003767
3768 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
3769 pm_runtime_use_autosuspend(&card->dev);
3770
3771 /*
3772 * Don't enable runtime PM for SD-combo cards here. Leave that
3773 * decision to be taken during the SDIO init sequence instead.
3774 */
3775 if (card->type != MMC_TYPE_SD_COMBO) {
3776 pm_runtime_set_active(&card->dev);
3777 pm_runtime_enable(&card->dev);
3778 }
3779
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 return 0;
3781
3782 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05003783 mmc_blk_remove_parts(card, md);
3784 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01003785 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786}
3787
Ulf Hansson96541ba2015-04-14 13:06:12 +02003788static void mmc_blk_remove(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789{
Ulf Hansson96541ba2015-04-14 13:06:12 +02003790 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Andrei Warkentin371a6892011-04-11 18:10:25 -05003792 mmc_blk_remove_parts(card, md);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003793 pm_runtime_get_sync(&card->dev);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03003794 mmc_claim_host(card->host);
3795 mmc_blk_part_switch(card, md);
3796 mmc_release_host(card->host);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003797 if (card->type != MMC_TYPE_SD_COMBO)
3798 pm_runtime_disable(&card->dev);
3799 pm_runtime_put_noidle(&card->dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003800 mmc_blk_remove_req(md);
Ulf Hansson96541ba2015-04-14 13:06:12 +02003801 dev_set_drvdata(&card->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802}
3803
Ulf Hansson96541ba2015-04-14 13:06:12 +02003804static int _mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805{
Andrei Warkentin371a6892011-04-11 18:10:25 -05003806 struct mmc_blk_data *part_md;
Ulf Hansson96541ba2015-04-14 13:06:12 +02003807 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303808 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809
3810 if (md) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05303811 rc = mmc_queue_suspend(&md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303812 if (rc)
3813 goto out;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003814 list_for_each_entry(part_md, &md->part, part) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05303815 rc = mmc_queue_suspend(&part_md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303816 if (rc)
3817 goto out_resume;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 }
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303820 goto out;
3821
3822 out_resume:
3823 mmc_queue_resume(&md->queue);
3824 list_for_each_entry(part_md, &md->part, part) {
3825 mmc_queue_resume(&part_md->queue);
3826 }
3827 out:
3828 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829}
3830
Ulf Hansson96541ba2015-04-14 13:06:12 +02003831static void mmc_blk_shutdown(struct mmc_card *card)
Ulf Hansson76287742013-06-10 17:03:40 +02003832{
Ulf Hansson96541ba2015-04-14 13:06:12 +02003833 _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02003834}
3835
Ulf Hansson0967edc2014-10-06 11:29:42 +02003836#ifdef CONFIG_PM_SLEEP
3837static int mmc_blk_suspend(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02003838{
Ulf Hansson96541ba2015-04-14 13:06:12 +02003839 struct mmc_card *card = mmc_dev_to_card(dev);
3840
3841 return _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02003842}
3843
Ulf Hansson0967edc2014-10-06 11:29:42 +02003844static int mmc_blk_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845{
Andrei Warkentin371a6892011-04-11 18:10:25 -05003846 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02003847 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848
3849 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05003850 /*
3851 * Resume involves the card going into idle state,
3852 * so current partition is always the main one.
3853 */
3854 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003856 list_for_each_entry(part_md, &md->part, part) {
3857 mmc_queue_resume(&part_md->queue);
3858 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 }
3860 return 0;
3861}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862#endif
3863
Ulf Hansson0967edc2014-10-06 11:29:42 +02003864static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
3865
Ulf Hansson96541ba2015-04-14 13:06:12 +02003866static struct mmc_driver mmc_driver = {
3867 .drv = {
3868 .name = "mmcblk",
3869 .pm = &mmc_blk_pm_ops,
3870 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 .probe = mmc_blk_probe,
3872 .remove = mmc_blk_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02003873 .shutdown = mmc_blk_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874};
3875
3876static int __init mmc_blk_init(void)
3877{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09003878 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003880 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3881 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3882
Ben Hutchingsa26eba62014-11-06 03:35:09 +00003883 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003884
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003885 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3886 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09003889 res = mmc_register_driver(&mmc_driver);
3890 if (res)
3891 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09003893 return 0;
3894 out2:
3895 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 out:
3897 return res;
3898}
3899
3900static void __exit mmc_blk_exit(void)
3901{
3902 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003903 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904}
3905
3906module_init(mmc_blk_init);
3907module_exit(mmc_blk_exit);
3908
3909MODULE_LICENSE("GPL");
3910MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3911