blob: 2b569108d8a2ea8869ba2abba4c680f656afb4a1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Sujit Reddy Thumma55291992014-12-09 20:40:16 +020033#include <linux/bitops.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020034#include <linux/string_helpers.h>
John Calixtocb87ea22011-04-26 18:56:29 -040035#include <linux/delay.h>
36#include <linux/capability.h>
37#include <linux/compat.h>
Ulf Hanssone94cfef2013-05-02 14:02:38 +020038#include <linux/pm_runtime.h>
Ulf Hanssonb10fa992016-04-07 14:36:46 +020039#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
John Calixtocb87ea22011-04-26 18:56:29 -040041#include <linux/mmc/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020043#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010044#include <linux/mmc/mmc.h>
45#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/uaccess.h>
48
Pierre Ossman98ac2162006-12-23 20:03:02 +010049#include "queue.h"
Baoyou Xie48ab0862016-09-30 09:37:38 +080050#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000052MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040053#ifdef MODULE_PARAM_PREFIX
54#undef MODULE_PARAM_PREFIX
55#endif
56#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010057
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050058#define INAND_CMD38_ARG_EXT_CSD 113
59#define INAND_CMD38_ARG_ERASE 0x00
60#define INAND_CMD38_ARG_TRIM 0x01
61#define INAND_CMD38_ARG_SECERASE 0x80
62#define INAND_CMD38_ARG_SECTRIM1 0x81
63#define INAND_CMD38_ARG_SECTRIM2 0x88
Subhash Jadavani2fbab612014-12-04 15:16:17 +020064#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
Maya Erez775a9362013-04-18 15:41:55 +030065#define MMC_SANITIZE_REQ_TIMEOUT 240000
66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -050067
Luca Porziod3df0462015-11-06 15:12:26 +000068#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090069 (rq_data_dir(req) == WRITE))
70#define PACKED_CMD_VER 0x01
71#define PACKED_CMD_WR 0x02
Lee Susman841fd132013-04-23 17:59:26 +030072#define PACKED_TRIGGER_MAX_ELEMENTS 5000
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090073
Tatyana Brokhman08238ce2012-10-07 10:33:13 +020074#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
75 do { \
76 if (stats->enabled) \
77 stats->pack_stop_reason[reason]++; \
78 } while (0)
79
Lee Susman841fd132013-04-23 17:59:26 +030080#define PCKD_TRGR_INIT_MEAN_POTEN 17
81#define PCKD_TRGR_POTEN_LOWER_BOUND 5
82#define PCKD_TRGR_URGENT_PENALTY 2
83#define PCKD_TRGR_LOWER_BOUND 5
84#define PCKD_TRGR_PRECISION_MULTIPLIER 100
85
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020086static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040087
88/*
89 * The defaults come from config options but can be overriden by module
90 * or bootarg options.
91 */
92static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
93
94/*
95 * We've only got one major, so number of mmcblk devices is
Ben Hutchingsa26eba62014-11-06 03:35:09 +000096 * limited to (1 << 20) / number of minors per device. It is also
Ulf Hanssonb10fa992016-04-07 14:36:46 +020097 * limited by the MAX_DEVICES below.
Olof Johansson5e71b7a2010-09-17 21:19:57 -040098 */
99static int max_devices;
100
Ben Hutchingsa26eba62014-11-06 03:35:09 +0000101#define MAX_DEVICES 256
102
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200103static DEFINE_IDA(mmc_blk_ida);
104static DEFINE_SPINLOCK(mmc_blk_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * There is one mmc_blk_data per slot.
108 */
109struct mmc_blk_data {
110 spinlock_t lock;
Dan Williams307d8e62016-06-20 10:40:44 -0700111 struct device *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 struct gendisk *disk;
113 struct mmc_queue queue;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500114 struct list_head part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500116 unsigned int flags;
117#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
118#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900119#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -0500120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +0000122 unsigned int read_only;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500123 unsigned int part_type;
Adrian Hunter67716322011-08-29 16:42:15 +0300124 unsigned int reset_done;
125#define MMC_BLK_READ BIT(0)
126#define MMC_BLK_WRITE BIT(1)
127#define MMC_BLK_DISCARD BIT(2)
128#define MMC_BLK_SECDISCARD BIT(3)
Andrei Warkentin371a6892011-04-11 18:10:25 -0500129
130 /*
131 * Only set in main mmc_blk_data associated
Ulf Hanssonfc95e302014-10-06 14:34:09 +0200132 * with mmc_card with dev_set_drvdata, and keeps
Andrei Warkentin371a6892011-04-11 18:10:25 -0500133 * track of the current selected device partition.
134 */
135 unsigned int part_curr;
136 struct device_attribute force_ro;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100137 struct device_attribute power_ro_lock;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200138 struct device_attribute num_wr_reqs_to_start_packing;
Maya Erez5a8dae12014-12-04 15:13:59 +0200139 struct device_attribute no_pack_for_random;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100140 int area_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141};
142
Arjan van de Vena621aae2006-01-12 18:43:35 +0000143static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900145enum {
146 MMC_PACKED_NR_IDX = -1,
147 MMC_PACKED_NR_ZERO,
148 MMC_PACKED_NR_SINGLE,
149};
150
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400151module_param(perdev_minors, int, 0444);
152MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
153
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200154static inline int mmc_blk_part_switch(struct mmc_card *card,
155 struct mmc_blk_data *md);
156static int get_card_status(struct mmc_card *card, u32 *status, int retries);
157
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900158static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
159{
160 struct mmc_packed *packed = mqrq->packed;
161
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900162 mqrq->cmd_type = MMC_PACKED_NONE;
163 packed->nr_entries = MMC_PACKED_NR_ZERO;
164 packed->idx_failure = MMC_PACKED_NR_IDX;
165 packed->retries = 0;
166 packed->blocks = 0;
167}
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
170{
171 struct mmc_blk_data *md;
172
Arjan van de Vena621aae2006-01-12 18:43:35 +0000173 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 md = disk->private_data;
175 if (md && md->usage == 0)
176 md = NULL;
177 if (md)
178 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +0000179 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181 return md;
182}
183
Andrei Warkentin371a6892011-04-11 18:10:25 -0500184static inline int mmc_get_devidx(struct gendisk *disk)
185{
Colin Cross382c55f2015-10-22 10:00:41 -0700186 int devidx = disk->first_minor / perdev_minors;
Andrei Warkentin371a6892011-04-11 18:10:25 -0500187 return devidx;
188}
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190static void mmc_blk_put(struct mmc_blk_data *md)
191{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000192 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 md->usage--;
194 if (md->usage == 0) {
Andrei Warkentin371a6892011-04-11 18:10:25 -0500195 int devidx = mmc_get_devidx(md->disk);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800196 blk_cleanup_queue(md->queue.queue);
197
Ulf Hanssonb10fa992016-04-07 14:36:46 +0200198 spin_lock(&mmc_blk_lock);
199 ida_remove(&mmc_blk_ida, devidx);
200 spin_unlock(&mmc_blk_lock);
David Woodhouse1dff3142007-11-21 18:45:12 +0100201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 kfree(md);
204 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000205 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Johan Rudholmadd710e2011-12-02 08:51:06 +0100208static ssize_t power_ro_lock_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
210{
211 int ret;
212 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200213 struct mmc_card *card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100214 int locked = 0;
215
Asutosh Das507d9a72014-12-09 10:15:53 +0200216 if (!md)
217 return -EINVAL;
218
219 card = md->queue.card;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100220 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
221 locked = 2;
222 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
223 locked = 1;
224
225 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
226
Tomas Winkler9098f842015-07-16 15:50:45 +0200227 mmc_blk_put(md);
228
Johan Rudholmadd710e2011-12-02 08:51:06 +0100229 return ret;
230}
231
232static ssize_t power_ro_lock_store(struct device *dev,
233 struct device_attribute *attr, const char *buf, size_t count)
234{
235 int ret;
236 struct mmc_blk_data *md, *part_md;
237 struct mmc_card *card;
238 unsigned long set;
239
240 if (kstrtoul(buf, 0, &set))
241 return -EINVAL;
242
243 if (set != 1)
244 return count;
245
246 md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200247 if (!md)
248 return -EINVAL;
Johan Rudholmadd710e2011-12-02 08:51:06 +0100249 card = md->queue.card;
250
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200251 mmc_get_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100252
253 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
254 card->ext_csd.boot_ro_lock |
255 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
256 card->ext_csd.part_time);
257 if (ret)
258 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
259 else
260 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
261
Ulf Hanssone94cfef2013-05-02 14:02:38 +0200262 mmc_put_card(card);
Johan Rudholmadd710e2011-12-02 08:51:06 +0100263
264 if (!ret) {
265 pr_info("%s: Locking boot partition ro until next power on\n",
266 md->disk->disk_name);
267 set_disk_ro(md->disk, 1);
268
269 list_for_each_entry(part_md, &md->part, part)
270 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
271 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
272 set_disk_ro(part_md->disk, 1);
273 }
274 }
275
276 mmc_blk_put(md);
277 return count;
278}
279
Andrei Warkentin371a6892011-04-11 18:10:25 -0500280static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
281 char *buf)
282{
283 int ret;
284 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
285
Asutosh Das507d9a72014-12-09 10:15:53 +0200286 if (!md)
287 return -EINVAL;
288
Baruch Siach0031a982014-09-22 10:12:51 +0300289 ret = snprintf(buf, PAGE_SIZE, "%d\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -0500290 get_disk_ro(dev_to_disk(dev)) ^
291 md->read_only);
292 mmc_blk_put(md);
293 return ret;
294}
295
296static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t count)
298{
299 int ret;
300 char *end;
301 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
302 unsigned long set = simple_strtoul(buf, &end, 0);
Asutosh Das507d9a72014-12-09 10:15:53 +0200303
304 if (!md)
305 return -EINVAL;
306
Andrei Warkentin371a6892011-04-11 18:10:25 -0500307 if (end == buf) {
308 ret = -EINVAL;
309 goto out;
310 }
311
312 set_disk_ro(dev_to_disk(dev), set || md->read_only);
313 ret = count;
314out:
315 mmc_blk_put(md);
316 return ret;
317}
318
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200319static ssize_t
Maya Erez5a8dae12014-12-04 15:13:59 +0200320no_pack_for_random_show(struct device *dev,
321 struct device_attribute *attr, char *buf)
322{
323 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
324 int ret;
325
Asutosh Das507d9a72014-12-09 10:15:53 +0200326 if (!md)
327 return -EINVAL;
Maya Erez5a8dae12014-12-04 15:13:59 +0200328 ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
329
330 mmc_blk_put(md);
331 return ret;
332}
333
334static ssize_t
335no_pack_for_random_store(struct device *dev,
336 struct device_attribute *attr,
337 const char *buf, size_t count)
338{
339 int value;
340 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200341 struct mmc_card *card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200342 int ret = count;
343
Asutosh Das507d9a72014-12-09 10:15:53 +0200344 if (!md)
345 return -EINVAL;
346
347 card = md->queue.card;
Maya Erez5a8dae12014-12-04 15:13:59 +0200348 if (!card) {
349 ret = -EINVAL;
350 goto exit;
351 }
352
353 sscanf(buf, "%d", &value);
354
355 if (value < 0) {
356 pr_err("%s: value %d is not valid. old value remains = %d",
357 mmc_hostname(card->host), value,
358 md->queue.no_pack_for_random);
359 ret = -EINVAL;
360 goto exit;
361 }
362
363 md->queue.no_pack_for_random = (value > 0) ? true : false;
364
365 pr_debug("%s: no_pack_for_random: new value = %d",
366 mmc_hostname(card->host),
367 md->queue.no_pack_for_random);
368
369exit:
370 mmc_blk_put(md);
371 return ret;
372}
373
374static ssize_t
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200375num_wr_reqs_to_start_packing_show(struct device *dev,
376 struct device_attribute *attr, char *buf)
377{
378 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
379 int num_wr_reqs_to_start_packing;
380 int ret;
381
Asutosh Das507d9a72014-12-09 10:15:53 +0200382 if (!md)
383 return -EINVAL;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200384 num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
385
386 ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
387
388 mmc_blk_put(md);
389 return ret;
390}
391
392static ssize_t
393num_wr_reqs_to_start_packing_store(struct device *dev,
394 struct device_attribute *attr,
395 const char *buf, size_t count)
396{
397 int value;
398 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
Asutosh Das507d9a72014-12-09 10:15:53 +0200399 struct mmc_card *card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200400 int ret = count;
401
Asutosh Das507d9a72014-12-09 10:15:53 +0200402 if (!md)
403 return -EINVAL;
404
405 card = md->queue.card;
Yaniv Gardi42399822014-12-04 00:26:23 +0200406 if (!card) {
407 ret = -EINVAL;
408 goto exit;
409 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200410
411 sscanf(buf, "%d", &value);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200412
Yaniv Gardi42399822014-12-04 00:26:23 +0200413 if (value >= 0) {
414 md->queue.num_wr_reqs_to_start_packing =
415 min_t(int, value, (int)card->ext_csd.max_packed_writes);
416
417 pr_debug("%s: trigger to pack: new value = %d",
418 mmc_hostname(card->host),
419 md->queue.num_wr_reqs_to_start_packing);
420 } else {
421 pr_err("%s: value %d is not valid. old value remains = %d",
422 mmc_hostname(card->host), value,
423 md->queue.num_wr_reqs_to_start_packing);
424 ret = -EINVAL;
425 }
426
427exit:
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200428 mmc_blk_put(md);
Yaniv Gardi42399822014-12-04 00:26:23 +0200429 return ret;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +0200430}
431
Mark Salyzyn6904e432016-01-28 11:12:25 -0800432#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
433
434static int max_read_speed, max_write_speed, cache_size = 4;
435
436module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
437MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
438module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
439MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
440module_param(cache_size, int, S_IRUSR | S_IRGRP);
441MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
442
443/*
444 * helper macros and expectations:
445 * size - unsigned long number of bytes
446 * jiffies - unsigned long HZ timestamp difference
447 * speed - unsigned KB/s transfer rate
448 */
449#define size_and_speed_to_jiffies(size, speed) \
450 ((size) * HZ / (speed) / 1024UL)
451#define jiffies_and_speed_to_size(jiffies, speed) \
452 (((speed) * (jiffies) * 1024UL) / HZ)
453#define jiffies_and_size_to_speed(jiffies, size) \
454 ((size) * HZ / (jiffies) / 1024UL)
455
456/* Limits to report warning */
457/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
458#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
459#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
460
461#define speed_valid(speed) ((speed) > 0)
462
463static const char off[] = "off\n";
464
465static int max_speed_show(int speed, char *buf)
466{
467 if (speed)
468 return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
469 else
470 return scnprintf(buf, PAGE_SIZE, off);
471}
472
473static int max_speed_store(const char *buf, struct request_queue *q)
474{
475 unsigned int limit, set = 0;
476
477 if (!strncasecmp(off, buf, sizeof(off) - 2))
478 return set;
479 if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
480 return -EINVAL;
481 if (set == 0)
482 return set;
483 limit = MAX_SPEED(q);
484 if (set > limit)
485 pr_warn("max speed %u ineffective above %u\n", set, limit);
486 limit = MIN_SPEED(q);
487 if (set < limit)
488 pr_warn("max speed %u painful below %u\n", set, limit);
489 return set;
490}
491
492static ssize_t max_write_speed_show(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
496 int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
497
498 mmc_blk_put(md);
499 return ret;
500}
501
502static ssize_t max_write_speed_store(struct device *dev,
503 struct device_attribute *attr,
504 const char *buf, size_t count)
505{
506 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
507 int set = max_speed_store(buf, md->queue.queue);
508
509 if (set < 0) {
510 mmc_blk_put(md);
511 return set;
512 }
513
514 atomic_set(&md->queue.max_write_speed, set);
515 mmc_blk_put(md);
516 return count;
517}
518
519static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
520 max_write_speed_show, max_write_speed_store);
521
522static ssize_t max_read_speed_show(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
526 int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
527
528 mmc_blk_put(md);
529 return ret;
530}
531
532static ssize_t max_read_speed_store(struct device *dev,
533 struct device_attribute *attr,
534 const char *buf, size_t count)
535{
536 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
537 int set = max_speed_store(buf, md->queue.queue);
538
539 if (set < 0) {
540 mmc_blk_put(md);
541 return set;
542 }
543
544 atomic_set(&md->queue.max_read_speed, set);
545 mmc_blk_put(md);
546 return count;
547}
548
549static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
550 max_read_speed_show, max_read_speed_store);
551
552static ssize_t cache_size_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
554{
555 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
556 struct mmc_queue *mq = &md->queue;
557 int cache_size = atomic_read(&mq->cache_size);
558 int ret;
559
560 if (!cache_size)
561 ret = scnprintf(buf, PAGE_SIZE, off);
562 else {
563 int speed = atomic_read(&mq->max_write_speed);
564
565 if (!speed_valid(speed))
566 ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
567 else { /* We accept race between cache_jiffies and cache_used */
568 unsigned long size = jiffies_and_speed_to_size(
569 jiffies - mq->cache_jiffies, speed);
570 long used = atomic_long_read(&mq->cache_used);
571
572 if (size >= used)
573 size = 0;
574 else
575 size = (used - size) * 100 / cache_size
576 / 1024UL / 1024UL;
577
578 ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
579 cache_size, size);
580 }
581 }
582
583 mmc_blk_put(md);
584 return ret;
585}
586
587static ssize_t cache_size_store(struct device *dev,
588 struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 struct mmc_blk_data *md;
592 unsigned int set = 0;
593
594 if (strncasecmp(off, buf, sizeof(off) - 2)
595 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
596 return -EINVAL;
597
598 md = mmc_blk_get(dev_to_disk(dev));
599 atomic_set(&md->queue.cache_size, set);
600 mmc_blk_put(md);
601 return count;
602}
603
604static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
605 cache_size_show, cache_size_store);
606
607/* correct for write-back */
608static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
609{
610 long used = 0;
611 int speed = atomic_read(&mq->max_write_speed);
612
613 if (speed_valid(speed)) {
614 unsigned long size = jiffies_and_speed_to_size(
615 waitfor - mq->cache_jiffies, speed);
616 used = atomic_long_read(&mq->cache_used);
617
618 if (size >= used)
619 used = 0;
620 else
621 used -= size;
622 }
623
624 atomic_long_set(&mq->cache_used, used);
625 mq->cache_jiffies = waitfor;
626
627 return used;
628}
629
630static void mmc_blk_simulate_delay(
631 struct mmc_queue *mq,
632 struct request *req,
633 unsigned long waitfor)
634{
635 int max_speed;
636
637 if (!req)
638 return;
639
640 max_speed = (rq_data_dir(req) == READ)
641 ? atomic_read(&mq->max_read_speed)
642 : atomic_read(&mq->max_write_speed);
643 if (speed_valid(max_speed)) {
644 unsigned long bytes = blk_rq_bytes(req);
645
646 if (rq_data_dir(req) != READ) {
647 int cache_size = atomic_read(&mq->cache_size);
648
649 if (cache_size) {
650 unsigned long size = cache_size * 1024L * 1024L;
651 long used = mmc_blk_cache_used(mq, waitfor);
652
653 used += bytes;
654 atomic_long_set(&mq->cache_used, used);
655 bytes = 0;
656 if (used > size)
657 bytes = used - size;
658 }
659 }
660 waitfor += size_and_speed_to_jiffies(bytes, max_speed);
661 if (time_is_after_jiffies(waitfor)) {
662 long msecs = jiffies_to_msecs(waitfor - jiffies);
663
664 if (likely(msecs > 0))
665 msleep(msecs);
666 }
667 }
668}
669
670#else
671
672#define mmc_blk_simulate_delay(mq, req, waitfor)
673
674#endif
675
Al Viroa5a15612008-03-02 10:33:30 -0500676static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
Al Viroa5a15612008-03-02 10:33:30 -0500678 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 int ret = -ENXIO;
680
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200681 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 if (md) {
683 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500684 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700686
Al Viroa5a15612008-03-02 10:33:30 -0500687 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700688 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700689 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200692 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 return ret;
695}
696
Al Virodb2a1442013-05-05 21:52:57 -0400697static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698{
Al Viroa5a15612008-03-02 10:33:30 -0500699 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200701 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200703 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
706static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800707mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800709 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
710 geo->heads = 4;
711 geo->sectors = 16;
712 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
John Calixtocb87ea22011-04-26 18:56:29 -0400715struct mmc_blk_ioc_data {
716 struct mmc_ioc_cmd ic;
717 unsigned char *buf;
718 u64 buf_bytes;
719};
720
721static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
722 struct mmc_ioc_cmd __user *user)
723{
724 struct mmc_blk_ioc_data *idata;
725 int err;
726
yalin wang1ff89502015-11-12 19:27:11 +0800727 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400728 if (!idata) {
729 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400730 goto out;
John Calixtocb87ea22011-04-26 18:56:29 -0400731 }
732
733 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
734 err = -EFAULT;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400735 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400736 }
737
738 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
739 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
740 err = -EOVERFLOW;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400741 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400742 }
743
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300744 if (!idata->buf_bytes) {
745 idata->buf = NULL;
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100746 return idata;
Ville Viinikkabfe5b1b2016-07-08 18:27:02 +0300747 }
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100748
yalin wang1ff89502015-11-12 19:27:11 +0800749 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
John Calixtocb87ea22011-04-26 18:56:29 -0400750 if (!idata->buf) {
751 err = -ENOMEM;
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400752 goto idata_err;
John Calixtocb87ea22011-04-26 18:56:29 -0400753 }
754
755 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
756 idata->ic.data_ptr, idata->buf_bytes)) {
757 err = -EFAULT;
758 goto copy_err;
759 }
760
761 return idata;
762
763copy_err:
764 kfree(idata->buf);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400765idata_err:
John Calixtocb87ea22011-04-26 18:56:29 -0400766 kfree(idata);
Vladimir Motykaaea253e2011-05-11 00:00:43 -0400767out:
John Calixtocb87ea22011-04-26 18:56:29 -0400768 return ERR_PTR(err);
John Calixtocb87ea22011-04-26 18:56:29 -0400769}
770
Jon Huntera5f57742015-09-22 10:27:53 +0100771static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
772 struct mmc_blk_ioc_data *idata)
773{
774 struct mmc_ioc_cmd *ic = &idata->ic;
775
776 if (copy_to_user(&(ic_ptr->response), ic->response,
777 sizeof(ic->response)))
778 return -EFAULT;
779
780 if (!idata->ic.write_flag) {
781 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
782 idata->buf, idata->buf_bytes))
783 return -EFAULT;
784 }
785
786 return 0;
787}
788
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200789static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
790 u32 retries_max)
791{
792 int err;
793 u32 retry_count = 0;
794
795 if (!status || !retries_max)
796 return -EINVAL;
797
798 do {
799 err = get_card_status(card, status, 5);
800 if (err)
801 break;
802
803 if (!R1_STATUS(*status) &&
804 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
805 break; /* RPMB programming operation complete */
806
807 /*
808 * Rechedule to give the MMC device a chance to continue
809 * processing the previous command without being polled too
810 * frequently.
811 */
812 usleep_range(1000, 5000);
813 } while (++retry_count < retries_max);
814
815 if (retry_count == retries_max)
816 err = -EPERM;
817
818 return err;
819}
820
Maya Erez775a9362013-04-18 15:41:55 +0300821static int ioctl_do_sanitize(struct mmc_card *card)
822{
823 int err;
824
Ulf Hanssona2d10862013-12-16 14:37:26 +0100825 if (!mmc_can_sanitize(card)) {
Maya Erez775a9362013-04-18 15:41:55 +0300826 pr_warn("%s: %s - SANITIZE is not supported\n",
827 mmc_hostname(card->host), __func__);
828 err = -EOPNOTSUPP;
829 goto out;
830 }
831
832 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
833 mmc_hostname(card->host), __func__);
834
835 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
836 EXT_CSD_SANITIZE_START, 1,
837 MMC_SANITIZE_REQ_TIMEOUT);
838
839 if (err)
840 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
841 mmc_hostname(card->host), __func__, err);
842
843 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
844 __func__);
845out:
846 return err;
847}
848
Jon Huntera5f57742015-09-22 10:27:53 +0100849static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
850 struct mmc_blk_ioc_data *idata)
John Calixtocb87ea22011-04-26 18:56:29 -0400851{
John Calixtocb87ea22011-04-26 18:56:29 -0400852 struct mmc_command cmd = {0};
853 struct mmc_data data = {0};
Venkatraman Sad5fd972011-08-25 00:30:50 +0530854 struct mmc_request mrq = {NULL};
John Calixtocb87ea22011-04-26 18:56:29 -0400855 struct scatterlist sg;
856 int err;
857
Jon Huntera5f57742015-09-22 10:27:53 +0100858 if (!card || !md || !idata)
859 return -EINVAL;
John Calixtocb87ea22011-04-26 18:56:29 -0400860
Johan Rudholm4d6144d2011-11-23 09:05:58 +0100861 cmd.opcode = idata->ic.opcode;
862 cmd.arg = idata->ic.arg;
863 cmd.flags = idata->ic.flags;
864
865 if (idata->buf_bytes) {
866 data.sg = &sg;
867 data.sg_len = 1;
868 data.blksz = idata->ic.blksz;
869 data.blocks = idata->ic.blocks;
870
871 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
872
873 if (idata->ic.write_flag)
874 data.flags = MMC_DATA_WRITE;
875 else
876 data.flags = MMC_DATA_READ;
877
878 /* data.flags must already be set before doing this. */
879 mmc_set_data_timeout(&data, card);
880
881 /* Allow overriding the timeout_ns for empirical tuning. */
882 if (idata->ic.data_timeout_ns)
883 data.timeout_ns = idata->ic.data_timeout_ns;
884
885 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
886 /*
887 * Pretend this is a data transfer and rely on the
888 * host driver to compute timeout. When all host
889 * drivers support cmd.cmd_timeout for R1B, this
890 * can be changed to:
891 *
892 * mrq.data = NULL;
893 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
894 */
895 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
896 }
897
898 mrq.data = &data;
899 }
900
901 mrq.cmd = &cmd;
902
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200903 err = mmc_blk_part_switch(card, md);
904 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100905 return err;
Loic Pallardy8d1e9772012-08-06 17:12:31 +0200906
John Calixtocb87ea22011-04-26 18:56:29 -0400907 if (idata->ic.is_acmd) {
908 err = mmc_app_cmd(card->host, card);
909 if (err)
Jon Huntera5f57742015-09-22 10:27:53 +0100910 return err;
John Calixtocb87ea22011-04-26 18:56:29 -0400911 }
912
Yaniv Gardia82e4842013-06-05 14:13:08 +0300913 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
914 (cmd.opcode == MMC_SWITCH)) {
Maya Erez775a9362013-04-18 15:41:55 +0300915 err = ioctl_do_sanitize(card);
916
917 if (err)
918 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
919 __func__, err);
920
Jon Huntera5f57742015-09-22 10:27:53 +0100921 return err;
Maya Erez775a9362013-04-18 15:41:55 +0300922 }
923
John Calixtocb87ea22011-04-26 18:56:29 -0400924 mmc_wait_for_req(card->host, &mrq);
925
926 if (cmd.error) {
927 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
928 __func__, cmd.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100929 return cmd.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400930 }
931 if (data.error) {
932 dev_err(mmc_dev(card->host), "%s: data error %d\n",
933 __func__, data.error);
Jon Huntera5f57742015-09-22 10:27:53 +0100934 return data.error;
John Calixtocb87ea22011-04-26 18:56:29 -0400935 }
936
937 /*
938 * According to the SD specs, some commands require a delay after
939 * issuing the command.
940 */
941 if (idata->ic.postsleep_min_us)
942 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
943
Jon Huntera5f57742015-09-22 10:27:53 +0100944 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
John Calixtocb87ea22011-04-26 18:56:29 -0400945
Krishna Kondae6711632014-12-04 15:20:57 +0200946 return err;
947}
948
949struct mmc_blk_ioc_rpmb_data {
950 struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
951};
952
953static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
954 struct mmc_ioc_rpmb __user *user)
955{
956 struct mmc_blk_ioc_rpmb_data *idata;
957 int err, i;
958
959 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
960 if (!idata) {
961 err = -ENOMEM;
962 goto out;
963 }
964
965 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
966 idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
967 if (IS_ERR(idata->data[i])) {
968 err = PTR_ERR(idata->data[i]);
969 goto copy_err;
970 }
971 }
972
973 return idata;
974
975copy_err:
976 while (--i >= 0) {
977 kfree(idata->data[i]->buf);
978 kfree(idata->data[i]);
979 }
980 kfree(idata);
981out:
982 return ERR_PTR(err);
983}
984
985static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
986 struct mmc_ioc_rpmb __user *ic_ptr)
987{
988 struct mmc_blk_ioc_rpmb_data *idata;
989 struct mmc_blk_data *md;
990 struct mmc_card *card;
991 struct mmc_command cmd = {0};
992 struct mmc_data data = {0};
993 struct mmc_request mrq = {NULL};
994 struct scatterlist sg;
995 int err = 0, i = 0;
996 u32 status = 0;
997
998 /* The caller must have CAP_SYS_RAWIO */
999 if (!capable(CAP_SYS_RAWIO))
1000 return -EPERM;
1001
1002 md = mmc_blk_get(bdev->bd_disk);
1003 /* make sure this is a rpmb partition */
1004 if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
1005 err = -EINVAL;
Asutosh Das507d9a72014-12-09 10:15:53 +02001006 return err;
Krishna Kondae6711632014-12-04 15:20:57 +02001007 }
1008
1009 idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
1010 if (IS_ERR(idata)) {
1011 err = PTR_ERR(idata);
1012 goto cmd_done;
1013 }
1014
1015 card = md->queue.card;
1016 if (IS_ERR(card)) {
1017 err = PTR_ERR(card);
1018 goto idata_free;
1019 }
1020
1021 mmc_claim_host(card->host);
1022
1023 err = mmc_blk_part_switch(card, md);
1024 if (err)
1025 goto cmd_rel_host;
1026
1027 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1028 struct mmc_blk_ioc_data *curr_data;
1029 struct mmc_ioc_cmd *curr_cmd;
1030
1031 curr_data = idata->data[i];
1032 curr_cmd = &curr_data->ic;
1033 if (!curr_cmd->opcode)
1034 break;
1035
1036 cmd.opcode = curr_cmd->opcode;
1037 cmd.arg = curr_cmd->arg;
1038 cmd.flags = curr_cmd->flags;
1039
1040 if (curr_data->buf_bytes) {
1041 data.sg = &sg;
1042 data.sg_len = 1;
1043 data.blksz = curr_cmd->blksz;
1044 data.blocks = curr_cmd->blocks;
1045
1046 sg_init_one(data.sg, curr_data->buf,
1047 curr_data->buf_bytes);
1048
1049 if (curr_cmd->write_flag)
1050 data.flags = MMC_DATA_WRITE;
1051 else
1052 data.flags = MMC_DATA_READ;
1053
1054 /* data.flags must already be set before doing this. */
1055 mmc_set_data_timeout(&data, card);
1056
1057 /*
1058 * Allow overriding the timeout_ns for empirical tuning.
1059 */
1060 if (curr_cmd->data_timeout_ns)
1061 data.timeout_ns = curr_cmd->data_timeout_ns;
1062
1063 mrq.data = &data;
1064 }
1065
1066 mrq.cmd = &cmd;
1067
1068 err = mmc_set_blockcount(card, data.blocks,
1069 curr_cmd->write_flag & (1 << 31));
1070 if (err)
1071 goto cmd_rel_host;
1072
1073 mmc_wait_for_req(card->host, &mrq);
1074
1075 if (cmd.error) {
1076 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
1077 __func__, cmd.error);
1078 err = cmd.error;
1079 goto cmd_rel_host;
1080 }
1081 if (data.error) {
1082 dev_err(mmc_dev(card->host), "%s: data error %d\n",
1083 __func__, data.error);
1084 err = data.error;
1085 goto cmd_rel_host;
1086 }
1087
1088 if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
1089 sizeof(cmd.resp))) {
1090 err = -EFAULT;
1091 goto cmd_rel_host;
1092 }
1093
1094 if (!curr_cmd->write_flag) {
1095 if (copy_to_user((void __user *)(unsigned long)
1096 curr_cmd->data_ptr,
1097 curr_data->buf,
1098 curr_data->buf_bytes)) {
1099 err = -EFAULT;
1100 goto cmd_rel_host;
1101 }
1102 }
1103
Loic Pallardy8d1e9772012-08-06 17:12:31 +02001104 /*
1105 * Ensure RPMB command has completed by polling CMD13
1106 * "Send Status".
1107 */
1108 err = ioctl_rpmb_card_status_poll(card, &status, 5);
1109 if (err)
1110 dev_err(mmc_dev(card->host),
1111 "%s: Card Status=0x%08X, error %d\n",
1112 __func__, status, err);
1113 }
1114
Krishna Kondae6711632014-12-04 15:20:57 +02001115cmd_rel_host:
1116 mmc_put_card(card);
1117
1118idata_free:
1119 for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
1120 kfree(idata->data[i]->buf);
1121 kfree(idata->data[i]);
1122 }
1123 kfree(idata);
1124
1125cmd_done:
1126 mmc_blk_put(md);
Jon Huntera5f57742015-09-22 10:27:53 +01001127 return err;
1128}
1129
1130static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1131 struct mmc_ioc_cmd __user *ic_ptr)
1132{
1133 struct mmc_blk_ioc_data *idata;
1134 struct mmc_blk_data *md;
1135 struct mmc_card *card;
Grant Grundlerb0934102015-09-23 18:30:33 -07001136 int err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001137
Shawn Lin83c742c2016-03-16 18:15:47 +08001138 /*
1139 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1140 * whole block device, not on a partition. This prevents overspray
1141 * between sibling partitions.
1142 */
1143 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1144 return -EPERM;
1145
Jon Huntera5f57742015-09-22 10:27:53 +01001146 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
Asutosh Dasbbefab32013-10-07 14:53:32 +05301147 if (IS_ERR_OR_NULL(idata))
Jon Huntera5f57742015-09-22 10:27:53 +01001148 return PTR_ERR(idata);
1149
1150 md = mmc_blk_get(bdev->bd_disk);
1151 if (!md) {
1152 err = -EINVAL;
1153 goto cmd_err;
1154 }
1155
1156 card = md->queue.card;
Asutosh Dasbbefab32013-10-07 14:53:32 +05301157 if (IS_ERR_OR_NULL(card)) {
Jon Huntera5f57742015-09-22 10:27:53 +01001158 err = PTR_ERR(card);
1159 goto cmd_done;
1160 }
1161
1162 mmc_get_card(card);
1163
Grant Grundlerb0934102015-09-23 18:30:33 -07001164 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001165
Adrian Hunter3c866562016-05-04 14:38:12 +03001166 /* Always switch back to main area after RPMB access */
1167 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1168 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1169
Ulf Hanssone94cfef2013-05-02 14:02:38 +02001170 mmc_put_card(card);
John Calixtocb87ea22011-04-26 18:56:29 -04001171
Grant Grundlerb0934102015-09-23 18:30:33 -07001172 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
Jon Huntera5f57742015-09-22 10:27:53 +01001173
John Calixtocb87ea22011-04-26 18:56:29 -04001174cmd_done:
1175 mmc_blk_put(md);
Philippe De Swert1c02f002012-04-11 23:31:45 +03001176cmd_err:
John Calixtocb87ea22011-04-26 18:56:29 -04001177 kfree(idata->buf);
1178 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001179 return ioc_err ? ioc_err : err;
John Calixtocb87ea22011-04-26 18:56:29 -04001180}
1181
Jon Huntera5f57742015-09-22 10:27:53 +01001182static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
1183 struct mmc_ioc_multi_cmd __user *user)
1184{
1185 struct mmc_blk_ioc_data **idata = NULL;
1186 struct mmc_ioc_cmd __user *cmds = user->cmds;
1187 struct mmc_card *card;
1188 struct mmc_blk_data *md;
Grant Grundlerb0934102015-09-23 18:30:33 -07001189 int i, err = 0, ioc_err = 0;
Jon Huntera5f57742015-09-22 10:27:53 +01001190 __u64 num_of_cmds;
1191
Shawn Lin83c742c2016-03-16 18:15:47 +08001192 /*
1193 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
1194 * whole block device, not on a partition. This prevents overspray
1195 * between sibling partitions.
1196 */
1197 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
1198 return -EPERM;
1199
Jon Huntera5f57742015-09-22 10:27:53 +01001200 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
1201 sizeof(num_of_cmds)))
1202 return -EFAULT;
1203
1204 if (num_of_cmds > MMC_IOC_MAX_CMDS)
1205 return -EINVAL;
1206
1207 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
1208 if (!idata)
1209 return -ENOMEM;
1210
1211 for (i = 0; i < num_of_cmds; i++) {
1212 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
1213 if (IS_ERR(idata[i])) {
1214 err = PTR_ERR(idata[i]);
1215 num_of_cmds = i;
1216 goto cmd_err;
1217 }
1218 }
1219
1220 md = mmc_blk_get(bdev->bd_disk);
Olof Johanssonf00ab142016-02-09 09:34:30 -08001221 if (!md) {
1222 err = -EINVAL;
Jon Huntera5f57742015-09-22 10:27:53 +01001223 goto cmd_err;
Olof Johanssonf00ab142016-02-09 09:34:30 -08001224 }
Jon Huntera5f57742015-09-22 10:27:53 +01001225
1226 card = md->queue.card;
1227 if (IS_ERR(card)) {
1228 err = PTR_ERR(card);
1229 goto cmd_done;
1230 }
1231
1232 mmc_get_card(card);
1233
Grant Grundlerb0934102015-09-23 18:30:33 -07001234 for (i = 0; i < num_of_cmds && !ioc_err; i++)
1235 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001236
Adrian Hunter3c866562016-05-04 14:38:12 +03001237 /* Always switch back to main area after RPMB access */
1238 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1239 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1240
Jon Huntera5f57742015-09-22 10:27:53 +01001241 mmc_put_card(card);
1242
1243 /* copy to user if data and response */
Grant Grundlerb0934102015-09-23 18:30:33 -07001244 for (i = 0; i < num_of_cmds && !err; i++)
Jon Huntera5f57742015-09-22 10:27:53 +01001245 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
Jon Huntera5f57742015-09-22 10:27:53 +01001246
1247cmd_done:
1248 mmc_blk_put(md);
1249cmd_err:
1250 for (i = 0; i < num_of_cmds; i++) {
1251 kfree(idata[i]->buf);
1252 kfree(idata[i]);
1253 }
1254 kfree(idata);
Grant Grundlerb0934102015-09-23 18:30:33 -07001255 return ioc_err ? ioc_err : err;
Jon Huntera5f57742015-09-22 10:27:53 +01001256}
1257
John Calixtocb87ea22011-04-26 18:56:29 -04001258static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
1259 unsigned int cmd, unsigned long arg)
1260{
Jon Huntera5f57742015-09-22 10:27:53 +01001261 switch (cmd) {
1262 case MMC_IOC_CMD:
1263 return mmc_blk_ioctl_cmd(bdev,
1264 (struct mmc_ioc_cmd __user *)arg);
Krishna Kondae6711632014-12-04 15:20:57 +02001265 case MMC_IOC_RPMB_CMD:
1266 return mmc_blk_ioctl_rpmb_cmd(bdev,
1267 (struct mmc_ioc_rpmb __user *)arg);
Jon Huntera5f57742015-09-22 10:27:53 +01001268 case MMC_IOC_MULTI_CMD:
1269 return mmc_blk_ioctl_multi_cmd(bdev,
1270 (struct mmc_ioc_multi_cmd __user *)arg);
1271 default:
1272 return -EINVAL;
1273 }
John Calixtocb87ea22011-04-26 18:56:29 -04001274}
1275
1276#ifdef CONFIG_COMPAT
1277static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
1278 unsigned int cmd, unsigned long arg)
1279{
1280 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
1281}
1282#endif
1283
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001284static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -05001285 .open = mmc_blk_open,
1286 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001287 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 .owner = THIS_MODULE,
John Calixtocb87ea22011-04-26 18:56:29 -04001289 .ioctl = mmc_blk_ioctl,
1290#ifdef CONFIG_COMPAT
1291 .compat_ioctl = mmc_blk_compat_ioctl,
1292#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293};
1294
Andrei Warkentin371a6892011-04-11 18:10:25 -05001295static inline int mmc_blk_part_switch(struct mmc_card *card,
1296 struct mmc_blk_data *md)
1297{
1298 int ret;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001299 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001300
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001301 if ((main_md->part_curr == md->part_type) &&
1302 (card->part_curr == md->part_type))
Andrei Warkentin371a6892011-04-11 18:10:25 -05001303 return 0;
1304
1305 if (mmc_card_mmc(card)) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001306 u8 part_config = card->ext_csd.part_config;
1307
Adrian Hunter57da0c02016-05-04 14:38:13 +03001308 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1309 mmc_retune_pause(card->host);
1310
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001311 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1312 part_config |= md->part_type;
Andrei Warkentin371a6892011-04-11 18:10:25 -05001313
1314 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001315 EXT_CSD_PART_CONFIG, part_config,
Andrei Warkentin371a6892011-04-11 18:10:25 -05001316 card->ext_csd.part_time);
Adrian Hunter57da0c02016-05-04 14:38:13 +03001317 if (ret) {
1318 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1319 mmc_retune_unpause(card->host);
Andrei Warkentin371a6892011-04-11 18:10:25 -05001320 return ret;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001321 }
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03001322
1323 card->ext_csd.part_config = part_config;
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08001324 card->part_curr = md->part_type;
Adrian Hunter57da0c02016-05-04 14:38:13 +03001325
1326 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
1327 mmc_retune_unpause(card->host);
Adrian Hunter67716322011-08-29 16:42:15 +03001328 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05001329
1330 main_md->part_curr = md->part_type;
1331 return 0;
1332}
1333
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001334static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
1335{
1336 int err;
Ben Dooks051913d2009-06-08 23:33:57 +01001337 u32 result;
1338 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001339
Venkatraman Sad5fd972011-08-25 00:30:50 +05301340 struct mmc_request mrq = {NULL};
Chris Ball1278dba2011-04-13 23:40:30 -04001341 struct mmc_command cmd = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -04001342 struct mmc_data data = {0};
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001343
1344 struct scatterlist sg;
1345
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001346 cmd.opcode = MMC_APP_CMD;
1347 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -07001348 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001349
1350 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -07001351 if (err)
1352 return (u32)-1;
1353 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001354 return (u32)-1;
1355
1356 memset(&cmd, 0, sizeof(struct mmc_command));
1357
1358 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1359 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -07001360 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001361
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001362 data.blksz = 4;
1363 data.blocks = 1;
1364 data.flags = MMC_DATA_READ;
1365 data.sg = &sg;
1366 data.sg_len = 1;
Subhash Jadavanid3804432012-06-13 17:10:43 +05301367 mmc_set_data_timeout(&data, card);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001368
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001369 mrq.cmd = &cmd;
1370 mrq.data = &data;
1371
Ben Dooks051913d2009-06-08 23:33:57 +01001372 blocks = kmalloc(4, GFP_KERNEL);
1373 if (!blocks)
1374 return (u32)-1;
1375
1376 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001377
1378 mmc_wait_for_req(card->host, &mrq);
1379
Ben Dooks051913d2009-06-08 23:33:57 +01001380 result = ntohl(*blocks);
1381 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001382
Ben Dooks051913d2009-06-08 23:33:57 +01001383 if (cmd.error || data.error)
1384 result = (u32)-1;
1385
1386 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -07001387}
1388
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001389static int get_card_status(struct mmc_card *card, u32 *status, int retries)
Adrian Hunter504f1912008-10-16 12:55:25 +03001390{
Chris Ball1278dba2011-04-13 23:40:30 -04001391 struct mmc_command cmd = {0};
Adrian Hunter504f1912008-10-16 12:55:25 +03001392 int err;
1393
Adrian Hunter504f1912008-10-16 12:55:25 +03001394 cmd.opcode = MMC_SEND_STATUS;
1395 if (!mmc_host_is_spi(card->host))
1396 cmd.arg = card->rca << 16;
1397 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
Russell King - ARM Linux0a2d4042011-06-20 20:10:08 +01001398 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1399 if (err == 0)
1400 *status = cmd.resp[0];
1401 return err;
Adrian Hunter504f1912008-10-16 12:55:25 +03001402}
1403
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001404static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
Ulf Hansson95a91292014-01-29 13:11:27 +01001405 bool hw_busy_detect, struct request *req, int *gen_err)
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001406{
1407 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1408 int err = 0;
1409 u32 status;
1410
1411 do {
1412 err = get_card_status(card, &status, 5);
1413 if (err) {
1414 pr_err("%s: error %d requesting status\n",
1415 req->rq_disk->disk_name, err);
1416 return err;
1417 }
1418
1419 if (status & R1_ERROR) {
1420 pr_err("%s: %s: error sending status cmd, status %#x\n",
1421 req->rq_disk->disk_name, __func__, status);
1422 *gen_err = 1;
1423 }
1424
Ulf Hansson95a91292014-01-29 13:11:27 +01001425 /* We may rely on the host hw to handle busy detection.*/
1426 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
1427 hw_busy_detect)
1428 break;
1429
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001430 /*
1431 * Timeout if the device never becomes ready for data and never
1432 * leaves the program state.
1433 */
1434 if (time_after(jiffies, timeout)) {
1435 pr_err("%s: Card stuck in programming state! %s %s\n",
1436 mmc_hostname(card->host),
1437 req->rq_disk->disk_name, __func__);
1438 return -ETIMEDOUT;
1439 }
1440
1441 /*
1442 * Some cards mishandle the status bits,
1443 * so make sure to check both the busy
1444 * indication and the card state.
1445 */
1446 } while (!(status & R1_READY_FOR_DATA) ||
1447 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1448
1449 return err;
1450}
1451
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001452static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
1453 struct request *req, int *gen_err, u32 *stop_status)
1454{
1455 struct mmc_host *host = card->host;
1456 struct mmc_command cmd = {0};
1457 int err;
1458 bool use_r1b_resp = rq_data_dir(req) == WRITE;
1459
1460 /*
1461 * Normally we use R1B responses for WRITE, but in cases where the host
1462 * has specified a max_busy_timeout we need to validate it. A failure
1463 * means we need to prevent the host from doing hw busy detection, which
1464 * is done by converting to a R1 response instead.
1465 */
1466 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1467 use_r1b_resp = false;
1468
1469 cmd.opcode = MMC_STOP_TRANSMISSION;
1470 if (use_r1b_resp) {
1471 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1472 cmd.busy_timeout = timeout_ms;
1473 } else {
1474 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1475 }
1476
1477 err = mmc_wait_for_cmd(host, &cmd, 5);
1478 if (err)
1479 return err;
1480
1481 *stop_status = cmd.resp[0];
1482
1483 /* No need to check card status in case of READ. */
1484 if (rq_data_dir(req) == READ)
1485 return 0;
1486
1487 if (!mmc_host_is_spi(host) &&
1488 (*stop_status & R1_ERROR)) {
1489 pr_err("%s: %s: general error sending stop command, resp %#x\n",
1490 req->rq_disk->disk_name, __func__, *stop_status);
1491 *gen_err = 1;
1492 }
1493
1494 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
1495}
1496
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301497#define ERR_NOMEDIUM 3
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001498#define ERR_RETRY 2
1499#define ERR_ABORT 1
1500#define ERR_CONTINUE 0
1501
1502static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1503 bool status_valid, u32 status)
1504{
1505 switch (error) {
1506 case -EILSEQ:
1507 /* response crc error, retry the r/w cmd */
1508 pr_err("%s: %s sending %s command, card status %#x\n",
1509 req->rq_disk->disk_name, "response CRC error",
1510 name, status);
1511 return ERR_RETRY;
1512
1513 case -ETIMEDOUT:
1514 pr_err("%s: %s sending %s command, card status %#x\n",
1515 req->rq_disk->disk_name, "timed out", name, status);
1516
1517 /* If the status cmd initially failed, retry the r/w cmd */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301518 if (!status_valid) {
1519 pr_err("%s: status not valid, retrying timeout\n",
1520 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001521 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301522 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001523
1524 /*
1525 * If it was a r/w cmd crc error, or illegal command
1526 * (eg, issued in wrong state) then retry - we should
1527 * have corrected the state problem above.
1528 */
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301529 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1530 pr_err("%s: command error, retrying timeout\n",
1531 req->rq_disk->disk_name);
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001532 return ERR_RETRY;
Ken Sumrallcc4d04b2016-05-10 14:53:13 +05301533 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001534
1535 /* Otherwise abort the command */
1536 return ERR_ABORT;
1537
1538 default:
1539 /* We don't understand the error code the driver gave us */
1540 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1541 req->rq_disk->disk_name, error, status);
1542 return ERR_ABORT;
1543 }
1544}
1545
1546/*
1547 * Initial r/w and stop cmd error recovery.
1548 * We don't know whether the card received the r/w cmd or not, so try to
1549 * restore things back to a sane state. Essentially, we do this as follows:
1550 * - Obtain card status. If the first attempt to obtain card status fails,
1551 * the status word will reflect the failed status cmd, not the failed
1552 * r/w cmd. If we fail to obtain card status, it suggests we can no
1553 * longer communicate with the card.
1554 * - Check the card state. If the card received the cmd but there was a
1555 * transient problem with the response, it might still be in a data transfer
1556 * mode. Try to send it a stop command. If this fails, we can't recover.
1557 * - If the r/w cmd failed due to a response CRC error, it was probably
1558 * transient, so retry the cmd.
1559 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1560 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1561 * illegal cmd, retry.
1562 * Otherwise we don't understand what happened, so abort.
1563 */
1564static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001565 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001566{
1567 bool prev_cmd_status_valid = true;
1568 u32 status, stop_status = 0;
1569 int err, retry;
1570
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301571 if (mmc_card_removed(card))
1572 return ERR_NOMEDIUM;
1573
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001574 /*
1575 * Try to get card status which indicates both the card state
1576 * and why there was no response. If the first attempt fails,
1577 * we can't be sure the returned status is for the r/w command.
1578 */
1579 for (retry = 2; retry >= 0; retry--) {
1580 err = get_card_status(card, &status, 0);
1581 if (!err)
1582 break;
1583
Adrian Hunter6f398ad2015-05-07 13:10:23 +03001584 /* Re-tune if needed */
1585 mmc_retune_recheck(card->host);
1586
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001587 prev_cmd_status_valid = false;
1588 pr_err("%s: error %d sending status command, %sing\n",
1589 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1590 }
1591
1592 /* We couldn't get a response from the card. Give up. */
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301593 if (err) {
1594 /* Check if the card is removed */
1595 if (mmc_detect_card_removed(card->host))
1596 return ERR_NOMEDIUM;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001597 return ERR_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301598 }
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001599
Adrian Hunter67716322011-08-29 16:42:15 +03001600 /* Flag ECC errors */
1601 if ((status & R1_CARD_ECC_FAILED) ||
1602 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1603 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1604 *ecc_err = 1;
1605
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001606 /* Flag General errors */
1607 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1608 if ((status & R1_ERROR) ||
1609 (brq->stop.resp[0] & R1_ERROR)) {
1610 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1611 req->rq_disk->disk_name, __func__,
1612 brq->stop.resp[0], status);
1613 *gen_err = 1;
1614 }
1615
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001616 /*
1617 * Check the current card state. If it is in some data transfer
1618 * mode, tell it to stop (and hopefully transition back to TRAN.)
1619 */
1620 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1621 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001622 err = send_stop(card,
1623 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1624 req, gen_err, &stop_status);
1625 if (err) {
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001626 pr_err("%s: error %d sending stop command\n",
1627 req->rq_disk->disk_name, err);
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001628 /*
1629 * If the stop cmd also timed out, the card is probably
1630 * not present, so abort. Other errors are bad news too.
1631 */
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001632 return ERR_ABORT;
Ulf Hanssonbb5cba42014-01-14 21:31:35 +01001633 }
1634
Adrian Hunter67716322011-08-29 16:42:15 +03001635 if (stop_status & R1_CARD_ECC_FAILED)
1636 *ecc_err = 1;
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001637 }
1638
1639 /* Check for set block count errors */
1640 if (brq->sbc.error)
1641 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1642 prev_cmd_status_valid, status);
1643
1644 /* Check for r/w command errors */
1645 if (brq->cmd.error)
1646 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1647 prev_cmd_status_valid, status);
1648
Adrian Hunter67716322011-08-29 16:42:15 +03001649 /* Data errors */
1650 if (!brq->stop.error)
1651 return ERR_CONTINUE;
1652
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001653 /* Now for stop errors. These aren't fatal to the transfer. */
Johan Rudholm5e1344e2014-09-17 09:50:42 +02001654 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01001655 req->rq_disk->disk_name, brq->stop.error,
1656 brq->cmd.resp[0], status);
1657
1658 /*
1659 * Subsitute in our own stop status as this will give the error
1660 * state which happened during the execution of the r/w command.
1661 */
1662 if (stop_status) {
1663 brq->stop.resp[0] = stop_status;
1664 brq->stop.error = 0;
1665 }
1666 return ERR_CONTINUE;
1667}
1668
Adrian Hunter67716322011-08-29 16:42:15 +03001669static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1670 int type)
1671{
1672 int err;
1673
1674 if (md->reset_done & type)
1675 return -EEXIST;
1676
1677 md->reset_done |= type;
1678 err = mmc_hw_reset(host);
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301679 if (err && err != -EOPNOTSUPP) {
1680 /* We failed to reset so we need to abort the request */
1681 pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
1682 __func__, err);
1683 return -ENODEV;
1684 }
1685
Adrian Hunter67716322011-08-29 16:42:15 +03001686 /* Ensure we switch back to the correct partition */
Sahitya Tummala943f51b2014-05-30 09:22:35 +05301687 if (host->card) {
Ulf Hanssonfc95e302014-10-06 14:34:09 +02001688 struct mmc_blk_data *main_md =
1689 dev_get_drvdata(&host->card->dev);
Adrian Hunter67716322011-08-29 16:42:15 +03001690 int part_err;
1691
1692 main_md->part_curr = main_md->part_type;
1693 part_err = mmc_blk_part_switch(host->card, md);
1694 if (part_err) {
1695 /*
1696 * We have failed to get back into the correct
1697 * partition, so we need to abort the whole request.
1698 */
1699 return -ENODEV;
1700 }
1701 }
1702 return err;
1703}
1704
1705static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1706{
1707 md->reset_done &= ~type;
1708}
1709
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +08001710int mmc_access_rpmb(struct mmc_queue *mq)
1711{
1712 struct mmc_blk_data *md = mq->data;
1713 /*
1714 * If this is a RPMB partition access, return ture
1715 */
1716 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1717 return true;
1718
1719 return false;
1720}
1721
Adrian Hunterbd788c92010-08-11 14:17:47 -07001722static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1723{
1724 struct mmc_blk_data *md = mq->data;
1725 struct mmc_card *card = md->queue.card;
1726 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001727 int err = 0, type = MMC_BLK_DISCARD;
Adrian Hunterbd788c92010-08-11 14:17:47 -07001728
Adrian Hunterbd788c92010-08-11 14:17:47 -07001729 if (!mmc_can_erase(card)) {
1730 err = -EOPNOTSUPP;
1731 goto out;
1732 }
1733
1734 from = blk_rq_pos(req);
1735 nr = blk_rq_sectors(req);
1736
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001737 if (mmc_can_discard(card))
1738 arg = MMC_DISCARD_ARG;
1739 else if (mmc_can_trim(card))
Adrian Hunterbd788c92010-08-11 14:17:47 -07001740 arg = MMC_TRIM_ARG;
1741 else
1742 arg = MMC_ERASE_ARG;
Adrian Hunter67716322011-08-29 16:42:15 +03001743retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001744 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1745 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1746 INAND_CMD38_ARG_EXT_CSD,
1747 arg == MMC_TRIM_ARG ?
1748 INAND_CMD38_ARG_TRIM :
1749 INAND_CMD38_ARG_ERASE,
1750 0);
1751 if (err)
1752 goto out;
1753 }
Adrian Hunterbd788c92010-08-11 14:17:47 -07001754 err = mmc_erase(card, from, nr, arg);
1755out:
Adrian Hunter67716322011-08-29 16:42:15 +03001756 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1757 goto retry;
1758 if (!err)
1759 mmc_blk_reset_success(md, type);
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301760 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunterbd788c92010-08-11 14:17:47 -07001761
Adrian Hunterbd788c92010-08-11 14:17:47 -07001762 return err ? 0 : 1;
1763}
1764
Adrian Hunter49804542010-08-11 14:17:50 -07001765static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1766 struct request *req)
1767{
1768 struct mmc_blk_data *md = mq->data;
1769 struct mmc_card *card = md->queue.card;
Maya Erez775a9362013-04-18 15:41:55 +03001770 unsigned int from, nr, arg;
Adrian Hunter67716322011-08-29 16:42:15 +03001771 int err = 0, type = MMC_BLK_SECDISCARD;
Adrian Hunter49804542010-08-11 14:17:50 -07001772
Maya Erez775a9362013-04-18 15:41:55 +03001773 if (!(mmc_can_secure_erase_trim(card))) {
Adrian Hunter49804542010-08-11 14:17:50 -07001774 err = -EOPNOTSUPP;
1775 goto out;
1776 }
1777
1778 from = blk_rq_pos(req);
1779 nr = blk_rq_sectors(req);
1780
Maya Erez775a9362013-04-18 15:41:55 +03001781 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1782 arg = MMC_SECURE_TRIM1_ARG;
1783 else
1784 arg = MMC_SECURE_ERASE_ARG;
Adrian Hunter28302812012-04-05 14:45:48 +03001785
Adrian Hunter67716322011-08-29 16:42:15 +03001786retry:
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001787 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1788 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1789 INAND_CMD38_ARG_EXT_CSD,
1790 arg == MMC_SECURE_TRIM1_ARG ?
1791 INAND_CMD38_ARG_SECTRIM1 :
1792 INAND_CMD38_ARG_SECERASE,
1793 0);
1794 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001795 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001796 }
Adrian Hunter28302812012-04-05 14:45:48 +03001797
Adrian Hunter49804542010-08-11 14:17:50 -07001798 err = mmc_erase(card, from, nr, arg);
Adrian Hunter28302812012-04-05 14:45:48 +03001799 if (err == -EIO)
1800 goto out_retry;
1801 if (err)
1802 goto out;
1803
1804 if (arg == MMC_SECURE_TRIM1_ARG) {
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001805 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1806 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1807 INAND_CMD38_ARG_EXT_CSD,
1808 INAND_CMD38_ARG_SECTRIM2,
1809 0);
1810 if (err)
Adrian Hunter28302812012-04-05 14:45:48 +03001811 goto out_retry;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001812 }
Adrian Hunter28302812012-04-05 14:45:48 +03001813
Adrian Hunter49804542010-08-11 14:17:50 -07001814 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
Adrian Hunter28302812012-04-05 14:45:48 +03001815 if (err == -EIO)
1816 goto out_retry;
1817 if (err)
1818 goto out;
Andrei Warkentin6a7a6b42011-04-12 15:06:53 -05001819 }
Adrian Hunter28302812012-04-05 14:45:48 +03001820
Adrian Hunter28302812012-04-05 14:45:48 +03001821out_retry:
1822 if (err && !mmc_blk_reset(md, card->host, type))
Adrian Hunter67716322011-08-29 16:42:15 +03001823 goto retry;
1824 if (!err)
1825 mmc_blk_reset_success(md, type);
Adrian Hunter28302812012-04-05 14:45:48 +03001826out:
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301827 blk_end_request(req, err, blk_rq_bytes(req));
Adrian Hunter49804542010-08-11 14:17:50 -07001828
Adrian Hunter49804542010-08-11 14:17:50 -07001829 return err ? 0 : 1;
1830}
1831
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001832static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1833{
1834 struct mmc_blk_data *md = mq->data;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001835 struct mmc_card *card = md->queue.card;
1836 int ret = 0;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001837
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001838 ret = mmc_flush_cache(card);
1839 if (ret)
1840 ret = -EIO;
1841
Mark Salyzyn6904e432016-01-28 11:12:25 -08001842#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
1843 else if (atomic_read(&mq->cache_size)) {
1844 long used = mmc_blk_cache_used(mq, jiffies);
1845
1846 if (used) {
1847 int speed = atomic_read(&mq->max_write_speed);
1848
1849 if (speed_valid(speed)) {
1850 unsigned long msecs = jiffies_to_msecs(
1851 size_and_speed_to_jiffies(
1852 used, speed));
1853 if (msecs)
1854 msleep(msecs);
1855 }
1856 }
1857 }
1858#endif
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05301859 blk_end_request_all(req, ret);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001860
Seungwon Jeon881d1c22011-10-14 14:03:21 +09001861 return ret ? 0 : 1;
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001862}
1863
1864/*
1865 * Reformat current write as a reliable write, supporting
1866 * both legacy and the enhanced reliable write MMC cards.
1867 * In each transfer we'll handle only as much as a single
1868 * reliable write can handle, thus finish the request in
1869 * partial completions.
1870 */
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05001871static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1872 struct mmc_card *card,
1873 struct request *req)
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001874{
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001875 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1876 /* Legacy mode imposes restrictions on transfers. */
1877 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1878 brq->data.blocks = 1;
1879
1880 if (brq->data.blocks > card->ext_csd.rel_sectors)
1881 brq->data.blocks = card->ext_csd.rel_sectors;
1882 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1883 brq->data.blocks = 1;
1884 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -05001885}
1886
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01001887#define CMD_ERRORS \
1888 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1889 R1_ADDRESS_ERROR | /* Misaligned address */ \
1890 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1891 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1892 R1_CC_ERROR | /* Card controller error */ \
1893 R1_ERROR) /* General/unknown error */
1894
Per Forlinee8a43a2011-07-01 18:55:33 +02001895static int mmc_blk_err_check(struct mmc_card *card,
1896 struct mmc_async_req *areq)
Per Forlind78d4a82011-07-01 18:55:30 +02001897{
Per Forlinee8a43a2011-07-01 18:55:33 +02001898 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1899 mmc_active);
1900 struct mmc_blk_request *brq = &mq_mrq->brq;
1901 struct request *req = mq_mrq->req;
Adrian Hunterb8360a42015-05-07 13:10:24 +03001902 int need_retune = card->host->need_retune;
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001903 int ecc_err = 0, gen_err = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02001904
1905 /*
1906 * sbc.error indicates a problem with the set block count
1907 * command. No data will have been transferred.
1908 *
1909 * cmd.error indicates a problem with the r/w command. No
1910 * data will have been transferred.
1911 *
1912 * stop.error indicates a problem with the stop command. Data
1913 * may have been transferred, or may still be transferring.
1914 */
Adrian Hunter67716322011-08-29 16:42:15 +03001915 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1916 brq->data.error) {
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001917 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
Per Forlind78d4a82011-07-01 18:55:30 +02001918 case ERR_RETRY:
1919 return MMC_BLK_RETRY;
1920 case ERR_ABORT:
1921 return MMC_BLK_ABORT;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05301922 case ERR_NOMEDIUM:
1923 return MMC_BLK_NOMEDIUM;
Per Forlind78d4a82011-07-01 18:55:30 +02001924 case ERR_CONTINUE:
1925 break;
1926 }
1927 }
1928
1929 /*
1930 * Check for errors relating to the execution of the
1931 * initial command - such as address errors. No data
1932 * has been transferred.
1933 */
1934 if (brq->cmd.resp[0] & CMD_ERRORS) {
1935 pr_err("%s: r/w command failed, status = %#x\n",
1936 req->rq_disk->disk_name, brq->cmd.resp[0]);
1937 return MMC_BLK_ABORT;
1938 }
1939
1940 /*
1941 * Everything else is either success, or a data error of some
1942 * kind. If it was a write, we may have transitioned to
1943 * program mode, which we have to wait for it to complete.
1944 */
1945 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001946 int err;
Trey Ramsay8fee4762012-11-16 09:31:41 -06001947
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001948 /* Check stop command response */
1949 if (brq->stop.resp[0] & R1_ERROR) {
1950 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1951 req->rq_disk->disk_name, __func__,
1952 brq->stop.resp[0]);
1953 gen_err = 1;
1954 }
1955
Ulf Hansson95a91292014-01-29 13:11:27 +01001956 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1957 &gen_err);
Ulf Hanssonc49433f2014-01-29 11:01:55 +01001958 if (err)
1959 return MMC_BLK_CMD_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001960 }
1961
KOBAYASHI Yoshitakec8760062013-07-07 07:35:45 +09001962 /* if general error occurs, retry the write operation. */
1963 if (gen_err) {
1964 pr_warn("%s: retrying write for general error\n",
1965 req->rq_disk->disk_name);
1966 return MMC_BLK_RETRY;
1967 }
1968
Per Forlind78d4a82011-07-01 18:55:30 +02001969 if (brq->data.error) {
Adrian Hunterb8360a42015-05-07 13:10:24 +03001970 if (need_retune && !brq->retune_retry_done) {
Russell King09faf612016-01-29 09:44:00 +00001971 pr_debug("%s: retrying because a re-tune was needed\n",
1972 req->rq_disk->disk_name);
Adrian Hunterb8360a42015-05-07 13:10:24 +03001973 brq->retune_retry_done = 1;
1974 return MMC_BLK_RETRY;
1975 }
Per Forlind78d4a82011-07-01 18:55:30 +02001976 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1977 req->rq_disk->disk_name, brq->data.error,
1978 (unsigned)blk_rq_pos(req),
1979 (unsigned)blk_rq_sectors(req),
1980 brq->cmd.resp[0], brq->stop.resp[0]);
1981
1982 if (rq_data_dir(req) == READ) {
Adrian Hunter67716322011-08-29 16:42:15 +03001983 if (ecc_err)
1984 return MMC_BLK_ECC_ERR;
Per Forlind78d4a82011-07-01 18:55:30 +02001985 return MMC_BLK_DATA_ERR;
1986 } else {
1987 return MMC_BLK_CMD_ERR;
1988 }
1989 }
1990
Adrian Hunter67716322011-08-29 16:42:15 +03001991 if (!brq->data.bytes_xfered)
1992 return MMC_BLK_RETRY;
Per Forlind78d4a82011-07-01 18:55:30 +02001993
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09001994 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1995 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1996 return MMC_BLK_PARTIAL;
1997 else
1998 return MMC_BLK_SUCCESS;
1999 }
2000
Adrian Hunter67716322011-08-29 16:42:15 +03002001 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
2002 return MMC_BLK_PARTIAL;
2003
2004 return MMC_BLK_SUCCESS;
Per Forlind78d4a82011-07-01 18:55:30 +02002005}
2006
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002007static int mmc_blk_packed_err_check(struct mmc_card *card,
2008 struct mmc_async_req *areq)
2009{
2010 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
2011 mmc_active);
2012 struct request *req = mq_rq->req;
2013 struct mmc_packed *packed = mq_rq->packed;
2014 int err, check, status;
2015 u8 *ext_csd;
2016
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002017 packed->retries--;
2018 check = mmc_blk_err_check(card, areq);
2019 err = get_card_status(card, &status, 0);
2020 if (err) {
2021 pr_err("%s: error %d sending status command\n",
2022 req->rq_disk->disk_name, err);
2023 return MMC_BLK_ABORT;
2024 }
2025
2026 if (status & R1_EXCEPTION_EVENT) {
Ulf Hansson86817ff2014-10-17 11:39:05 +02002027 err = mmc_get_ext_csd(card, &ext_csd);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002028 if (err) {
2029 pr_err("%s: error %d sending ext_csd\n",
2030 req->rq_disk->disk_name, err);
Ulf Hansson86817ff2014-10-17 11:39:05 +02002031 return MMC_BLK_ABORT;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002032 }
2033
2034 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
2035 EXT_CSD_PACKED_FAILURE) &&
2036 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2037 EXT_CSD_PACKED_GENERIC_ERROR)) {
2038 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
2039 EXT_CSD_PACKED_INDEXED_ERROR) {
2040 packed->idx_failure =
2041 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
2042 check = MMC_BLK_PARTIAL;
2043 }
2044 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
2045 "failure index: %d\n",
2046 req->rq_disk->disk_name, packed->nr_entries,
2047 packed->blocks, packed->idx_failure);
2048 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002049 kfree(ext_csd);
2050 }
2051
2052 return check;
2053}
2054
Per Forlin54d49d72011-07-01 18:55:29 +02002055static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
2056 struct mmc_card *card,
2057 int disable_multi,
2058 struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059{
Per Forlin54d49d72011-07-01 18:55:29 +02002060 u32 readcmd, writecmd;
2061 struct mmc_blk_request *brq = &mqrq->brq;
2062 struct request *req = mqrq->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 struct mmc_blk_data *md = mq->data;
Saugata Das42659002011-12-21 13:09:17 +05302064 bool do_data_tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002066 /*
2067 * Reliable writes are used to implement Forced Unit Access and
Luca Porziod3df0462015-11-06 15:12:26 +00002068 * are supported only on MMCs.
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002069 */
Luca Porziod3df0462015-11-06 15:12:26 +00002070 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002071 (rq_data_dir(req) == WRITE) &&
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002072 (md->flags & MMC_BLK_REL_WR);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002073
Per Forlin54d49d72011-07-01 18:55:29 +02002074 memset(brq, 0, sizeof(struct mmc_blk_request));
2075 brq->mrq.cmd = &brq->cmd;
2076 brq->mrq.data = &brq->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
Per Forlin54d49d72011-07-01 18:55:29 +02002078 brq->cmd.arg = blk_rq_pos(req);
2079 if (!mmc_card_blockaddr(card))
2080 brq->cmd.arg <<= 9;
2081 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2082 brq->data.blksz = 512;
2083 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2084 brq->stop.arg = 0;
Per Forlin54d49d72011-07-01 18:55:29 +02002085 brq->data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
Asutosh Dasf0665412012-07-27 18:10:19 +05302087 brq->data.fault_injected = false;
Per Forlin54d49d72011-07-01 18:55:29 +02002088 /*
2089 * The block layer doesn't support all sector count
2090 * restrictions, so we need to be prepared for too big
2091 * requests.
2092 */
2093 if (brq->data.blocks > card->host->max_blk_count)
2094 brq->data.blocks = card->host->max_blk_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002096 if (brq->data.blocks > 1) {
2097 /*
2098 * After a read error, we redo the request one sector
2099 * at a time in order to accurately determine which
2100 * sectors can be read successfully.
2101 */
2102 if (disable_multi)
2103 brq->data.blocks = 1;
2104
Kuninori Morimoto2e47e842014-09-02 19:08:53 -07002105 /*
2106 * Some controllers have HW issues while operating
2107 * in multiple I/O mode
2108 */
2109 if (card->host->ops->multi_io_quirk)
2110 brq->data.blocks = card->host->ops->multi_io_quirk(card,
2111 (rq_data_dir(req) == READ) ?
2112 MMC_DATA_READ : MMC_DATA_WRITE,
2113 brq->data.blocks);
Paul Walmsley2bf22b32011-10-06 14:50:33 -06002114 }
Per Forlin54d49d72011-07-01 18:55:29 +02002115
2116 if (brq->data.blocks > 1 || do_rel_wr) {
2117 /* SPI multiblock writes terminate using a special
2118 * token, not a STOP_TRANSMISSION request.
Pierre Ossman548d2de2009-04-10 17:52:57 +02002119 */
Per Forlin54d49d72011-07-01 18:55:29 +02002120 if (!mmc_host_is_spi(card->host) ||
2121 rq_data_dir(req) == READ)
2122 brq->mrq.stop = &brq->stop;
2123 readcmd = MMC_READ_MULTIPLE_BLOCK;
2124 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
2125 } else {
2126 brq->mrq.stop = NULL;
2127 readcmd = MMC_READ_SINGLE_BLOCK;
2128 writecmd = MMC_WRITE_BLOCK;
2129 }
2130 if (rq_data_dir(req) == READ) {
2131 brq->cmd.opcode = readcmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002132 brq->data.flags = MMC_DATA_READ;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002133 if (brq->mrq.stop)
2134 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
2135 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002136 } else {
2137 brq->cmd.opcode = writecmd;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002138 brq->data.flags = MMC_DATA_WRITE;
Ulf Hanssonbcc3e172014-01-14 21:24:21 +01002139 if (brq->mrq.stop)
2140 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
2141 MMC_CMD_AC;
Per Forlin54d49d72011-07-01 18:55:29 +02002142 }
Pierre Ossman548d2de2009-04-10 17:52:57 +02002143
Per Forlin54d49d72011-07-01 18:55:29 +02002144 if (do_rel_wr)
2145 mmc_apply_rel_rw(brq, card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +01002146
Per Forlin54d49d72011-07-01 18:55:29 +02002147 /*
Saugata Das42659002011-12-21 13:09:17 +05302148 * Data tag is used only during writing meta data to speed
2149 * up write and any subsequent read of this meta data
2150 */
2151 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2152 (req->cmd_flags & REQ_META) &&
2153 (rq_data_dir(req) == WRITE) &&
2154 ((brq->data.blocks * brq->data.blksz) >=
2155 card->ext_csd.data_tag_unit_size);
2156
2157 /*
Per Forlin54d49d72011-07-01 18:55:29 +02002158 * Pre-defined multi-block transfers are preferable to
2159 * open ended-ones (and necessary for reliable writes).
2160 * However, it is not sufficient to just send CMD23,
2161 * and avoid the final CMD12, as on an error condition
2162 * CMD12 (stop) needs to be sent anyway. This, coupled
2163 * with Auto-CMD23 enhancements provided by some
2164 * hosts, means that the complexity of dealing
2165 * with this is best left to the host. If CMD23 is
2166 * supported by card and host, we'll fill sbc in and let
2167 * the host deal with handling it correctly. This means
2168 * that for hosts that don't expose MMC_CAP_CMD23, no
2169 * change of behavior will be observed.
2170 *
2171 * N.B: Some MMC cards experience perf degradation.
2172 * We'll avoid using CMD23-bounded multiblock writes for
2173 * these, while retaining features like reliable writes.
2174 */
Saugata Das42659002011-12-21 13:09:17 +05302175 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
2176 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
2177 do_data_tag)) {
Per Forlin54d49d72011-07-01 18:55:29 +02002178 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2179 brq->sbc.arg = brq->data.blocks |
Saugata Das42659002011-12-21 13:09:17 +05302180 (do_rel_wr ? (1 << 31) : 0) |
2181 (do_data_tag ? (1 << 29) : 0);
Per Forlin54d49d72011-07-01 18:55:29 +02002182 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2183 brq->mrq.sbc = &brq->sbc;
2184 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002185
Per Forlin54d49d72011-07-01 18:55:29 +02002186 mmc_set_data_timeout(&brq->data, card);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05002187
Per Forlin54d49d72011-07-01 18:55:29 +02002188 brq->data.sg = mqrq->sg;
2189 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
Andrei Warkentinf4c55222011-03-31 18:40:00 -05002190
Per Forlin54d49d72011-07-01 18:55:29 +02002191 /*
2192 * Adjust the sg list so it is the same size as the
2193 * request.
2194 */
2195 if (brq->data.blocks != blk_rq_sectors(req)) {
2196 int i, data_size = brq->data.blocks << 9;
2197 struct scatterlist *sg;
Pierre Ossmanb146d262007-07-24 19:16:54 +02002198
Per Forlin54d49d72011-07-01 18:55:29 +02002199 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
2200 data_size -= sg->length;
2201 if (data_size <= 0) {
2202 sg->length += data_size;
2203 i++;
2204 break;
Adrian Hunter6a79e392008-12-31 18:21:17 +01002205 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002206 }
Per Forlin54d49d72011-07-01 18:55:29 +02002207 brq->data.sg_len = i;
2208 }
Adrian Hunter6a79e392008-12-31 18:21:17 +01002209
Per Forlinee8a43a2011-07-01 18:55:33 +02002210 mqrq->mmc_active.mrq = &brq->mrq;
2211 mqrq->mmc_active.err_check = mmc_blk_err_check;
2212
Per Forlin54d49d72011-07-01 18:55:29 +02002213 mmc_queue_bounce_pre(mqrq);
2214}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002216static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
2217 struct mmc_card *card)
2218{
2219 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
2220 unsigned int max_seg_sz = queue_max_segment_size(q);
2221 unsigned int len, nr_segs = 0;
2222
2223 do {
2224 len = min(hdr_sz, max_seg_sz);
2225 hdr_sz -= len;
2226 nr_segs++;
2227 } while (hdr_sz);
2228
2229 return nr_segs;
2230}
2231
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002232/**
2233 * mmc_blk_disable_wr_packing() - disables packing mode
2234 * @mq: MMC queue.
2235 *
2236 */
2237void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
2238{
2239 if (mq) {
2240 mq->wr_packing_enabled = false;
2241 mq->num_of_potential_packed_wr_reqs = 0;
2242 }
2243}
2244EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
2245
Lee Susman841fd132013-04-23 17:59:26 +03002246static int get_packed_trigger(int potential, struct mmc_card *card,
2247 struct request *req, int curr_trigger)
2248{
2249 static int num_mean_elements = 1;
2250 static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2251 unsigned int trigger = curr_trigger;
2252 unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
2253
2254 /* scale down the upper bound to 75% */
2255 pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
2256
2257 /*
2258 * since the most common calls for this function are with small
2259 * potential write values and since we don't want these calls to affect
2260 * the packed trigger, set a lower bound and ignore calls with
2261 * potential lower than that bound
2262 */
2263 if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
2264 return trigger;
2265
2266 /*
2267 * this is to prevent integer overflow in the following calculation:
2268 * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
2269 */
2270 if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
2271 num_mean_elements = 1;
2272 mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
2273 }
2274
2275 /*
2276 * get next mean value based on previous mean value and current
2277 * potential packed writes. Calculation is as follows:
2278 * mean_pot[i+1] =
2279 * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
2280 */
2281 mean_potential *= num_mean_elements;
2282 /*
2283 * add num_mean_elements so that the division of two integers doesn't
2284 * lower mean_potential too much
2285 */
2286 if (potential > mean_potential)
2287 mean_potential += num_mean_elements;
2288 mean_potential += potential;
2289 /* this is for gaining more precision when dividing two integers */
2290 mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
2291 /* this completes the mean calculation */
2292 mean_potential /= ++num_mean_elements;
2293 mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
2294
2295 /*
2296 * if current potential packed writes is greater than the mean potential
2297 * then the heuristic is that the following workload will contain many
2298 * write requests, therefore we lower the packed trigger. In the
2299 * opposite case we want to increase the trigger in order to get less
2300 * packing events.
2301 */
2302 if (potential >= mean_potential)
2303 trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
2304 PCKD_TRGR_LOWER_BOUND : trigger - 1;
2305 else
2306 trigger = (trigger >= pckd_trgr_upper_bound) ?
2307 pckd_trgr_upper_bound : trigger + 1;
2308
2309 /*
2310 * an urgent read request indicates a packed list being interrupted
2311 * by this read, therefore we aim for less packing, hence the trigger
2312 * gets increased
2313 */
2314 if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
2315 trigger += PCKD_TRGR_URGENT_PENALTY;
2316
2317 return trigger;
2318}
2319
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002320static void mmc_blk_write_packing_control(struct mmc_queue *mq,
2321 struct request *req)
2322{
2323 struct mmc_host *host = mq->card->host;
2324 int data_dir;
2325
2326 if (!(host->caps2 & MMC_CAP2_PACKED_WR))
2327 return;
2328
Maya Erez8e2b3c32012-12-02 13:27:15 +02002329 /* Support for the write packing on eMMC 4.5 or later */
2330 if (mq->card->ext_csd.rev <= 5)
2331 return;
2332
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002333 /*
2334 * In case the packing control is not supported by the host, it should
2335 * not have an effect on the write packing. Therefore we have to enable
2336 * the write packing
2337 */
2338 if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
2339 mq->wr_packing_enabled = true;
2340 return;
2341 }
2342
2343 if (!req || (req && (req->cmd_flags & REQ_PREFLUSH))) {
2344 if (mq->num_of_potential_packed_wr_reqs >
2345 mq->num_wr_reqs_to_start_packing)
2346 mq->wr_packing_enabled = true;
Lee Susman841fd132013-04-23 17:59:26 +03002347 mq->num_wr_reqs_to_start_packing =
2348 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2349 mq->card, req,
2350 mq->num_wr_reqs_to_start_packing);
Tatyana Brokhman843915a2012-10-07 10:26:27 +02002351 mq->num_of_potential_packed_wr_reqs = 0;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002352 return;
2353 }
2354
2355 data_dir = rq_data_dir(req);
2356
2357 if (data_dir == READ) {
Konstantin Dorfman225c9c72013-02-05 15:45:53 +02002358 mmc_blk_disable_wr_packing(mq);
Lee Susman841fd132013-04-23 17:59:26 +03002359 mq->num_wr_reqs_to_start_packing =
2360 get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
2361 mq->card, req,
2362 mq->num_wr_reqs_to_start_packing);
2363 mq->num_of_potential_packed_wr_reqs = 0;
2364 mq->wr_packing_enabled = false;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002365 return;
2366 } else if (data_dir == WRITE) {
2367 mq->num_of_potential_packed_wr_reqs++;
2368 }
2369
2370 if (mq->num_of_potential_packed_wr_reqs >
2371 mq->num_wr_reqs_to_start_packing)
2372 mq->wr_packing_enabled = true;
2373}
2374
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002375struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
2376{
2377 if (!card)
2378 return NULL;
2379
2380 return &card->wr_pack_stats;
2381}
2382EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
2383
2384void mmc_blk_init_packed_statistics(struct mmc_card *card)
2385{
2386 int max_num_of_packed_reqs = 0;
2387
2388 if (!card || !card->wr_pack_stats.packing_events)
2389 return;
2390
2391 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
2392
2393 spin_lock(&card->wr_pack_stats.lock);
2394 memset(card->wr_pack_stats.packing_events, 0,
2395 (max_num_of_packed_reqs + 1) *
2396 sizeof(*card->wr_pack_stats.packing_events));
2397 memset(&card->wr_pack_stats.pack_stop_reason, 0,
2398 sizeof(card->wr_pack_stats.pack_stop_reason));
2399 card->wr_pack_stats.enabled = true;
2400 spin_unlock(&card->wr_pack_stats.lock);
2401}
2402EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
2403
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002404static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
2405{
2406 struct request_queue *q = mq->queue;
2407 struct mmc_card *card = mq->card;
2408 struct request *cur = req, *next = NULL;
2409 struct mmc_blk_data *md = mq->data;
2410 struct mmc_queue_req *mqrq = mq->mqrq_cur;
2411 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
2412 unsigned int req_sectors = 0, phys_segments = 0;
2413 unsigned int max_blk_count, max_phys_segs;
2414 bool put_back = true;
2415 u8 max_packed_rw = 0;
2416 u8 reqs = 0;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002417 struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002418
Shawn Lin96e52da2016-08-26 08:49:55 +08002419 /*
2420 * We don't need to check packed for any further
2421 * operation of packed stuff as we set MMC_PACKED_NONE
2422 * and return zero for reqs if geting null packed. Also
2423 * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
2424 * it again when removing blk req.
2425 */
2426 if (!mqrq->packed) {
2427 md->flags &= (~MMC_BLK_PACKED_CMD);
2428 goto no_packed;
2429 }
2430
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002431 if (!(md->flags & MMC_BLK_PACKED_CMD))
2432 goto no_packed;
2433
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002434 if (!mq->wr_packing_enabled)
2435 goto no_packed;
2436
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002437 if ((rq_data_dir(cur) == WRITE) &&
2438 mmc_host_packed_wr(card->host))
2439 max_packed_rw = card->ext_csd.max_packed_writes;
2440
2441 if (max_packed_rw == 0)
2442 goto no_packed;
2443
2444 if (mmc_req_rel_wr(cur) &&
2445 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
2446 goto no_packed;
2447
2448 if (mmc_large_sector(card) &&
2449 !IS_ALIGNED(blk_rq_sectors(cur), 8))
2450 goto no_packed;
2451
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002452 if (cur->cmd_flags & REQ_FUA)
2453 goto no_packed;
2454
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002455 mmc_blk_clear_packed(mqrq);
2456
2457 max_blk_count = min(card->host->max_blk_count,
2458 card->host->max_req_size >> 9);
2459 if (unlikely(max_blk_count > 0xffff))
2460 max_blk_count = 0xffff;
2461
2462 max_phys_segs = queue_max_segments(q);
2463 req_sectors += blk_rq_sectors(cur);
2464 phys_segments += cur->nr_phys_segments;
2465
2466 if (rq_data_dir(cur) == WRITE) {
2467 req_sectors += mmc_large_sector(card) ? 8 : 1;
2468 phys_segments += mmc_calc_packed_hdr_segs(q, card);
2469 }
2470
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002471 spin_lock(&stats->lock);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002472 do {
2473 if (reqs >= max_packed_rw - 1) {
2474 put_back = false;
2475 break;
2476 }
2477
2478 spin_lock_irq(q->queue_lock);
2479 next = blk_fetch_request(q);
2480 spin_unlock_irq(q->queue_lock);
2481 if (!next) {
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002482 MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002483 put_back = false;
2484 break;
2485 }
2486
2487 if (mmc_large_sector(card) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002488 !IS_ALIGNED(blk_rq_sectors(next), 8)) {
2489 MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002490 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002491 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002492
Mike Christie3a5e02c2016-06-05 14:32:23 -05002493 if (req_op(next) == REQ_OP_DISCARD ||
Adrian Hunter7afafc82016-08-16 10:59:35 +03002494 req_op(next) == REQ_OP_SECURE_ERASE ||
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002495 req_op(next) == REQ_OP_FLUSH) {
2496 if (req_op(next) != REQ_OP_SECURE_ERASE)
2497 MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002498 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002499 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002500
Konstantin Dorfman31a482d2013-02-05 16:26:19 +02002501 if (next->cmd_flags & REQ_FUA) {
2502 MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
2503 break;
2504 }
2505
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002506 if (rq_data_dir(cur) != rq_data_dir(next)) {
2507 MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002508 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002509 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002510
2511 if (mmc_req_rel_wr(next) &&
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002512 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
2513 MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002514 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002515 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002516
2517 req_sectors += blk_rq_sectors(next);
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002518 if (req_sectors > max_blk_count) {
2519 if (stats->enabled)
2520 stats->pack_stop_reason[EXCEEDS_SECTORS]++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002521 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002522 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002523
2524 phys_segments += next->nr_phys_segments;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002525 if (phys_segments > max_phys_segs) {
2526 MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002527 break;
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002528 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002529
Maya Erez5a8dae12014-12-04 15:13:59 +02002530 if (mq->no_pack_for_random) {
2531 if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
2532 blk_rq_pos(next)) {
2533 MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
2534 put_back = 1;
2535 break;
2536 }
2537 }
2538
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002539 if (rq_data_dir(next) == WRITE)
2540 mq->num_of_potential_packed_wr_reqs++;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002541 list_add_tail(&next->queuelist, &mqrq->packed->list);
2542 cur = next;
2543 reqs++;
2544 } while (1);
2545
2546 if (put_back) {
2547 spin_lock_irq(q->queue_lock);
2548 blk_requeue_request(q, next);
2549 spin_unlock_irq(q->queue_lock);
2550 }
2551
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002552 if (stats->enabled) {
2553 if (reqs + 1 <= card->ext_csd.max_packed_writes)
2554 stats->packing_events[reqs + 1]++;
2555 if (reqs + 1 == max_packed_rw)
2556 MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
2557 }
2558
2559 spin_unlock(&stats->lock);
2560
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002561 if (reqs > 0) {
2562 list_add(&req->queuelist, &mqrq->packed->list);
2563 mqrq->packed->nr_entries = ++reqs;
2564 mqrq->packed->retries = reqs;
2565 return reqs;
2566 }
2567
2568no_packed:
2569 mqrq->cmd_type = MMC_PACKED_NONE;
2570 return 0;
2571}
2572
2573static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
2574 struct mmc_card *card,
2575 struct mmc_queue *mq)
2576{
2577 struct mmc_blk_request *brq = &mqrq->brq;
2578 struct request *req = mqrq->req;
2579 struct request *prq;
2580 struct mmc_blk_data *md = mq->data;
2581 struct mmc_packed *packed = mqrq->packed;
2582 bool do_rel_wr, do_data_tag;
Jiri Slaby3f2d2662016-10-03 10:58:28 +02002583 __le32 *packed_cmd_hdr;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002584 u8 hdr_blocks;
2585 u8 i = 1;
2586
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002587 mqrq->cmd_type = MMC_PACKED_WRITE;
2588 packed->blocks = 0;
2589 packed->idx_failure = MMC_PACKED_NR_IDX;
2590
2591 packed_cmd_hdr = packed->cmd_hdr;
2592 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002593 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
2594 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002595 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
2596
2597 /*
2598 * Argument for each entry of packed group
2599 */
2600 list_for_each_entry(prq, &packed->list, queuelist) {
2601 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
2602 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2603 (prq->cmd_flags & REQ_META) &&
2604 (rq_data_dir(prq) == WRITE) &&
Adrian Hunterd806b462016-06-10 16:22:16 +03002605 blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002606 /* Argument of CMD23 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002607 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002608 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
2609 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002610 blk_rq_sectors(prq));
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002611 /* Argument of CMD18 or CMD25 */
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002612 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002613 mmc_card_blockaddr(card) ?
Taras Kondratiukf68381a2016-07-13 22:05:38 +00002614 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002615 packed->blocks += blk_rq_sectors(prq);
2616 i++;
2617 }
2618
2619 memset(brq, 0, sizeof(struct mmc_blk_request));
2620 brq->mrq.cmd = &brq->cmd;
2621 brq->mrq.data = &brq->data;
2622 brq->mrq.sbc = &brq->sbc;
2623 brq->mrq.stop = &brq->stop;
2624
2625 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2626 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2627 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2628
2629 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2630 brq->cmd.arg = blk_rq_pos(req);
2631 if (!mmc_card_blockaddr(card))
2632 brq->cmd.arg <<= 9;
2633 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2634
2635 brq->data.blksz = 512;
2636 brq->data.blocks = packed->blocks + hdr_blocks;
Jaehoon Chungf53f1102016-02-01 21:07:36 +09002637 brq->data.flags = MMC_DATA_WRITE;
Asutosh Dasf0665412012-07-27 18:10:19 +05302638 brq->data.fault_injected = false;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002639
2640 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2641 brq->stop.arg = 0;
2642 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2643
2644 mmc_set_data_timeout(&brq->data, card);
2645
2646 brq->data.sg = mqrq->sg;
2647 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2648
2649 mqrq->mmc_active.mrq = &brq->mrq;
Tatyana Brokhman71aefb82012-10-09 13:50:56 +02002650
2651 /*
2652 * This is intended for packed commands tests usage - in case these
2653 * functions are not in use the respective pointers are NULL
2654 */
2655 if (mq->err_check_fn)
2656 mqrq->mmc_active.err_check = mq->err_check_fn;
2657 else
2658 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2659
2660 if (mq->packed_test_fn)
2661 mq->packed_test_fn(mq->queue, mqrq);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002662
2663 mmc_queue_bounce_pre(mqrq);
2664}
2665
Adrian Hunter67716322011-08-29 16:42:15 +03002666static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2667 struct mmc_blk_request *brq, struct request *req,
2668 int ret)
2669{
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002670 struct mmc_queue_req *mq_rq;
2671 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2672
Adrian Hunter67716322011-08-29 16:42:15 +03002673 /*
2674 * If this is an SD card and we're writing, we can first
2675 * mark the known good sectors as ok.
2676 *
2677 * If the card is not SD, we can still ok written sectors
2678 * as reported by the controller (which might be less than
2679 * the real number of written sectors, but never more).
2680 */
2681 if (mmc_card_sd(card)) {
2682 u32 blocks;
Asutosh Dasf0665412012-07-27 18:10:19 +05302683 if (!brq->data.fault_injected) {
2684 blocks = mmc_sd_num_wr_blocks(card);
2685 if (blocks != (u32)-1)
2686 ret = blk_end_request(req, 0, blocks << 9);
2687 } else
2688 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002689 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002690 if (!mmc_packed_cmd(mq_rq->cmd_type))
2691 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
Adrian Hunter67716322011-08-29 16:42:15 +03002692 }
2693 return ret;
2694}
2695
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002696static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2697{
2698 struct request *prq;
2699 struct mmc_packed *packed = mq_rq->packed;
2700 int idx = packed->idx_failure, i = 0;
2701 int ret = 0;
2702
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002703 while (!list_empty(&packed->list)) {
2704 prq = list_entry_rq(packed->list.next);
2705 if (idx == i) {
2706 /* retry from error index */
2707 packed->nr_entries -= idx;
2708 mq_rq->req = prq;
2709 ret = 1;
2710
2711 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2712 list_del_init(&prq->queuelist);
2713 mmc_blk_clear_packed(mq_rq);
2714 }
2715 return ret;
2716 }
2717 list_del_init(&prq->queuelist);
2718 blk_end_request(prq, 0, blk_rq_bytes(prq));
2719 i++;
2720 }
2721
2722 mmc_blk_clear_packed(mq_rq);
2723 return ret;
2724}
2725
2726static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2727{
2728 struct request *prq;
2729 struct mmc_packed *packed = mq_rq->packed;
2730
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002731 while (!list_empty(&packed->list)) {
2732 prq = list_entry_rq(packed->list.next);
2733 list_del_init(&prq->queuelist);
2734 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2735 }
2736
2737 mmc_blk_clear_packed(mq_rq);
2738}
2739
2740static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2741 struct mmc_queue_req *mq_rq)
2742{
2743 struct request *prq;
2744 struct request_queue *q = mq->queue;
2745 struct mmc_packed *packed = mq_rq->packed;
2746
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002747 while (!list_empty(&packed->list)) {
2748 prq = list_entry_rq(packed->list.prev);
2749 if (prq->queuelist.prev != &packed->list) {
2750 list_del_init(&prq->queuelist);
2751 spin_lock_irq(q->queue_lock);
2752 blk_requeue_request(mq->queue, prq);
2753 spin_unlock_irq(q->queue_lock);
2754 } else {
2755 list_del_init(&prq->queuelist);
2756 }
2757 }
2758
2759 mmc_blk_clear_packed(mq_rq);
2760}
2761
Per Forlinee8a43a2011-07-01 18:55:33 +02002762static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
Per Forlin54d49d72011-07-01 18:55:29 +02002763{
2764 struct mmc_blk_data *md = mq->data;
2765 struct mmc_card *card = md->queue.card;
2766 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
Adrian Hunterb8360a42015-05-07 13:10:24 +03002767 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
Per Forlind78d4a82011-07-01 18:55:30 +02002768 enum mmc_blk_status status;
Per Forlinee8a43a2011-07-01 18:55:33 +02002769 struct mmc_queue_req *mq_rq;
Saugata Dasa5075eb2012-05-17 16:32:21 +05302770 struct request *req = rqc;
Per Forlinee8a43a2011-07-01 18:55:33 +02002771 struct mmc_async_req *areq;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002772 const u8 packed_nr = 2;
2773 u8 reqs = 0;
Mark Salyzyn6904e432016-01-28 11:12:25 -08002774#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2775 unsigned long waitfor = jiffies;
2776#endif
Per Forlinee8a43a2011-07-01 18:55:33 +02002777
2778 if (!rqc && !mq->mqrq_prev->req)
2779 return 0;
Per Forlin54d49d72011-07-01 18:55:29 +02002780
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002781 if (rqc)
2782 reqs = mmc_blk_prep_packed_list(mq, rqc);
2783
Per Forlin54d49d72011-07-01 18:55:29 +02002784 do {
Per Forlinee8a43a2011-07-01 18:55:33 +02002785 if (rqc) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05302786 /*
2787 * When 4KB native sector is enabled, only 8 blocks
2788 * multiple read or write is allowed
2789 */
Yuan, Juntaoe87c8562016-05-13 07:59:24 +00002790 if (mmc_large_sector(card) &&
2791 !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
Saugata Dasa5075eb2012-05-17 16:32:21 +05302792 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2793 req->rq_disk->disk_name);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002794 mq_rq = mq->mqrq_cur;
Saugata Dasa5075eb2012-05-17 16:32:21 +05302795 goto cmd_abort;
2796 }
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002797
2798 if (reqs >= packed_nr)
2799 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2800 card, mq);
2801 else
2802 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
Per Forlinee8a43a2011-07-01 18:55:33 +02002803 areq = &mq->mqrq_cur->mmc_active;
2804 } else
2805 areq = NULL;
2806 areq = mmc_start_req(card->host, areq, (int *) &status);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002807 if (!areq) {
2808 if (status == MMC_BLK_NEW_REQUEST)
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02002809 set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Per Forlinee8a43a2011-07-01 18:55:33 +02002810 return 0;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002811 }
Pierre Ossman98ccf142007-05-12 00:26:16 +02002812
Per Forlinee8a43a2011-07-01 18:55:33 +02002813 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2814 brq = &mq_rq->brq;
2815 req = mq_rq->req;
Adrian Hunter67716322011-08-29 16:42:15 +03002816 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
Per Forlinee8a43a2011-07-01 18:55:33 +02002817 mmc_queue_bounce_post(mq_rq);
Pierre Ossman98ccf142007-05-12 00:26:16 +02002818
Per Forlind78d4a82011-07-01 18:55:30 +02002819 switch (status) {
2820 case MMC_BLK_SUCCESS:
2821 case MMC_BLK_PARTIAL:
2822 /*
2823 * A block was successfully transferred.
2824 */
Adrian Hunter67716322011-08-29 16:42:15 +03002825 mmc_blk_reset_success(md, type);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002826
Mark Salyzyn6904e432016-01-28 11:12:25 -08002827 mmc_blk_simulate_delay(mq, rqc, waitfor);
2828
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002829 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2830 ret = mmc_blk_end_packed_req(mq_rq);
2831 break;
2832 } else {
2833 ret = blk_end_request(req, 0,
Per Forlind78d4a82011-07-01 18:55:30 +02002834 brq->data.bytes_xfered);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002835 }
2836
Adrian Hunter67716322011-08-29 16:42:15 +03002837 /*
2838 * If the blk_end_request function returns non-zero even
2839 * though all data has been transferred and no errors
2840 * were returned by the host controller, it's a bug.
2841 */
Per Forlinee8a43a2011-07-01 18:55:33 +02002842 if (status == MMC_BLK_SUCCESS && ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05302843 pr_err("%s BUG rq_tot %d d_xfer %d\n",
Per Forlinee8a43a2011-07-01 18:55:33 +02002844 __func__, blk_rq_bytes(req),
2845 brq->data.bytes_xfered);
2846 rqc = NULL;
2847 goto cmd_abort;
2848 }
Per Forlind78d4a82011-07-01 18:55:30 +02002849 break;
2850 case MMC_BLK_CMD_ERR:
Adrian Hunter67716322011-08-29 16:42:15 +03002851 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
Ding Wang29535f72015-05-18 20:14:15 +08002852 if (mmc_blk_reset(md, card->host, type))
2853 goto cmd_abort;
2854 if (!ret)
2855 goto start_new_req;
2856 break;
Per Forlind78d4a82011-07-01 18:55:30 +02002857 case MMC_BLK_RETRY:
Adrian Hunterb8360a42015-05-07 13:10:24 +03002858 retune_retry_done = brq->retune_retry_done;
Per Forlind78d4a82011-07-01 18:55:30 +02002859 if (retry++ < 5)
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01002860 break;
Adrian Hunter67716322011-08-29 16:42:15 +03002861 /* Fall through */
Per Forlind78d4a82011-07-01 18:55:30 +02002862 case MMC_BLK_ABORT:
Adrian Hunter67716322011-08-29 16:42:15 +03002863 if (!mmc_blk_reset(md, card->host, type))
2864 break;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01002865 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03002866 case MMC_BLK_DATA_ERR: {
2867 int err;
2868
2869 err = mmc_blk_reset(md, card->host, type);
2870 if (!err)
2871 break;
Sahitya Tummalad0a19842014-10-31 09:46:20 +05302872 goto cmd_abort;
Adrian Hunter67716322011-08-29 16:42:15 +03002873 }
2874 case MMC_BLK_ECC_ERR:
2875 if (brq->data.blocks > 1) {
2876 /* Redo read one sector at a time */
Joe Perches66061102014-09-12 14:56:56 -07002877 pr_warn("%s: retrying using single block read\n",
2878 req->rq_disk->disk_name);
Adrian Hunter67716322011-08-29 16:42:15 +03002879 disable_multi = 1;
2880 break;
2881 }
Per Forlind78d4a82011-07-01 18:55:30 +02002882 /*
2883 * After an error, we redo I/O one sector at a
2884 * time, so we only reach here after trying to
2885 * read a single sector.
2886 */
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302887 ret = blk_end_request(req, -EIO,
Per Forlind78d4a82011-07-01 18:55:30 +02002888 brq->data.blksz);
Per Forlinee8a43a2011-07-01 18:55:33 +02002889 if (!ret)
2890 goto start_new_req;
Per Forlind78d4a82011-07-01 18:55:30 +02002891 break;
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +05302892 case MMC_BLK_NOMEDIUM:
2893 goto cmd_abort;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002894 default:
2895 pr_err("%s: Unhandled return value (%d)",
2896 req->rq_disk->disk_name, status);
2897 goto cmd_abort;
Russell King - ARM Linux4c2b8f22011-06-20 20:10:49 +01002898 }
2899
Per Forlinee8a43a2011-07-01 18:55:33 +02002900 if (ret) {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002901 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2902 if (!mq_rq->packed->retries)
2903 goto cmd_abort;
2904 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2905 mmc_start_req(card->host,
2906 &mq_rq->mmc_active, NULL);
2907 } else {
2908
2909 /*
2910 * In case of a incomplete request
2911 * prepare it again and resend.
2912 */
2913 mmc_blk_rw_rq_prep(mq_rq, card,
2914 disable_multi, mq);
2915 mmc_start_req(card->host,
2916 &mq_rq->mmc_active, NULL);
2917 }
Adrian Hunterb8360a42015-05-07 13:10:24 +03002918 mq_rq->brq.retune_retry_done = retune_retry_done;
Per Forlinee8a43a2011-07-01 18:55:33 +02002919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 } while (ret);
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 return 1;
2923
Russell King - ARM Linuxa01f3ccf2011-06-20 20:10:28 +01002924 cmd_abort:
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002925 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2926 mmc_blk_abort_packed_req(mq_rq);
2927 } else {
2928 if (mmc_card_removed(card))
2929 req->cmd_flags |= REQ_QUIET;
2930 while (ret)
2931 ret = blk_end_request(req, -EIO,
2932 blk_rq_cur_bytes(req));
2933 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
Per Forlinee8a43a2011-07-01 18:55:33 +02002935 start_new_req:
2936 if (rqc) {
Seungwon Jeon7a819022013-01-22 19:48:07 +09002937 if (mmc_card_removed(card)) {
2938 rqc->cmd_flags |= REQ_QUIET;
2939 blk_end_request_all(rqc, -EIO);
2940 } else {
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09002941 /*
2942 * If current request is packed, it needs to put back.
2943 */
2944 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2945 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2946
Seungwon Jeon7a819022013-01-22 19:48:07 +09002947 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2948 mmc_start_req(card->host,
2949 &mq->mqrq_cur->mmc_active, NULL);
2950 }
Per Forlinee8a43a2011-07-01 18:55:33 +02002951 }
2952
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 return 0;
2954}
2955
Linus Walleij29eb7bd2016-09-20 11:34:38 +02002956int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
Adrian Hunterbd788c92010-08-11 14:17:47 -07002957{
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002958 int ret;
2959 struct mmc_blk_data *md = mq->data;
2960 struct mmc_card *card = md->queue.card;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002961 struct mmc_host *host = card->host;
2962 unsigned long flags;
Adrian Hunter869c5542016-08-25 14:11:43 -06002963 bool req_is_special = mmc_req_is_special(req);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002964
Per Forlinee8a43a2011-07-01 18:55:33 +02002965 if (req && !mq->mqrq_prev->req)
2966 /* claim host only for the first request */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002967 mmc_get_card(card);
Per Forlinee8a43a2011-07-01 18:55:33 +02002968
Andrei Warkentin371a6892011-04-11 18:10:25 -05002969 ret = mmc_blk_part_switch(card, md);
2970 if (ret) {
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03002971 if (req) {
Subhash Jadavaniecf8b5d2012-06-07 15:46:58 +05302972 blk_end_request_all(req, -EIO);
Adrian Hunter0d7d85c2011-09-23 12:48:20 +03002973 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05002974 ret = 0;
2975 goto out;
2976 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002977
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02002978 mmc_blk_write_packing_control(mq, req);
2979
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02002980 clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
Mike Christiec2df40d2016-06-05 14:32:17 -05002981 if (req && req_op(req) == REQ_OP_DISCARD) {
Per Forlinee8a43a2011-07-01 18:55:33 +02002982 /* complete ongoing async transfer before issuing discard */
2983 if (card->host->areq)
2984 mmc_blk_issue_rw_rq(mq, NULL);
Christoph Hellwig288dab82016-06-09 16:00:36 +02002985 ret = mmc_blk_issue_discard_rq(mq, req);
2986 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
2987 /* complete ongoing async transfer before issuing secure erase*/
2988 if (card->host->areq)
2989 mmc_blk_issue_rw_rq(mq, NULL);
2990 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Mike Christie3a5e02c2016-06-05 14:32:23 -05002991 } else if (req && req_op(req) == REQ_OP_FLUSH) {
Jaehoon Chung393f9a02011-07-13 17:02:16 +09002992 /* complete ongoing async transfer before issuing flush */
2993 if (card->host->areq)
2994 mmc_blk_issue_rw_rq(mq, NULL);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05002995 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07002996 } else {
Konstantin Dorfman2220eed2013-01-14 14:28:17 -05002997 if (!req && host->areq) {
2998 spin_lock_irqsave(&host->context_info.lock, flags);
2999 host->context_info.is_waiting_last_req = true;
3000 spin_unlock_irqrestore(&host->context_info.lock, flags);
3001 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003002 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -07003003 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003004
Andrei Warkentin371a6892011-04-11 18:10:25 -05003005out:
Sujit Reddy Thumma55291992014-12-09 20:40:16 +02003006 if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
3007 req_is_special)
Seungwon Jeonef3a69c72013-03-14 15:17:13 +09003008 /*
3009 * Release host when there are no more requests
3010 * and after special request(discard, flush) is done.
3011 * In case sepecial request, there is no reentry to
3012 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
3013 */
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003014 mmc_put_card(card);
Andrei Warkentin1a258db2011-04-11 18:10:24 -05003015 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -07003016}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017
Russell Kinga6f6c962006-01-03 22:38:44 +00003018static inline int mmc_blk_readonly(struct mmc_card *card)
3019{
3020 return mmc_card_readonly(card) ||
3021 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
3022}
3023
Andrei Warkentin371a6892011-04-11 18:10:25 -05003024static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3025 struct device *parent,
3026 sector_t size,
3027 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003028 const char *subname,
3029 int area_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030{
3031 struct mmc_blk_data *md;
3032 int devidx, ret;
3033
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003034again:
3035 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
3036 return ERR_PTR(-ENOMEM);
3037
3038 spin_lock(&mmc_blk_lock);
3039 ret = ida_get_new(&mmc_blk_ida, &devidx);
3040 spin_unlock(&mmc_blk_lock);
3041
3042 if (ret == -EAGAIN)
3043 goto again;
3044 else if (ret)
3045 return ERR_PTR(ret);
3046
3047 if (devidx >= max_devices) {
3048 ret = -ENOSPC;
3049 goto out;
3050 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07003052 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +00003053 if (!md) {
3054 ret = -ENOMEM;
3055 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 }
Russell Kinga6f6c962006-01-03 22:38:44 +00003057
Johan Rudholmadd710e2011-12-02 08:51:06 +01003058 md->area_type = area_type;
3059
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003060 /*
Russell Kinga6f6c962006-01-03 22:38:44 +00003061 * Set the read-only status based on the supported commands
3062 * and the write protect switch.
3063 */
3064 md->read_only = mmc_blk_readonly(card);
3065
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003066 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +00003067 if (md->disk == NULL) {
3068 ret = -ENOMEM;
3069 goto err_kfree;
3070 }
3071
3072 spin_lock_init(&md->lock);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003073 INIT_LIST_HEAD(&md->part);
Russell Kinga6f6c962006-01-03 22:38:44 +00003074 md->usage = 1;
3075
Adrian Hunterd09408a2011-06-23 13:40:28 +03003076 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
Russell Kinga6f6c962006-01-03 22:38:44 +00003077 if (ret)
3078 goto err_putdisk;
3079
Russell Kinga6f6c962006-01-03 22:38:44 +00003080 md->queue.data = md;
3081
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003082 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003083 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +00003084 md->disk->fops = &mmc_bdops;
3085 md->disk->private_data = md;
3086 md->disk->queue = md->queue.queue;
Dan Williams307d8e62016-06-20 10:40:44 -07003087 md->parent = parent;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003088 set_disk_ro(md->disk, md->read_only || default_ro);
Colin Cross382c55f2015-10-22 10:00:41 -07003089 md->disk->flags = GENHD_FL_EXT_DEVT;
Ulf Hanssonf5b4d712014-09-03 11:02:23 +02003090 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
Loic Pallardy53d8f972012-08-06 17:12:28 +02003091 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
Russell Kinga6f6c962006-01-03 22:38:44 +00003092
3093 /*
3094 * As discussed on lkml, GENHD_FL_REMOVABLE should:
3095 *
3096 * - be set for removable media with permanent block devices
3097 * - be unset for removable block devices with permanent media
3098 *
3099 * Since MMC block devices clearly fall under the second
3100 * case, we do not set GENHD_FL_REMOVABLE. Userspace
3101 * should use the block device creation/destruction hotplug
3102 * messages to tell when the card is present.
3103 */
3104
Andrei Warkentinf06c9152011-04-21 22:46:13 -05003105 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
Ulf Hansson9aaf3432016-04-06 16:12:08 +02003106 "mmcblk%u%s", card->host->index, subname ? subname : "");
Russell Kinga6f6c962006-01-03 22:38:44 +00003107
Saugata Dasa5075eb2012-05-17 16:32:21 +05303108 if (mmc_card_mmc(card))
3109 blk_queue_logical_block_size(md->queue.queue,
3110 card->ext_csd.data_sector_size);
3111 else
3112 blk_queue_logical_block_size(md->queue.queue, 512);
3113
Andrei Warkentin371a6892011-04-11 18:10:25 -05003114 set_capacity(md->disk, size);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003115
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003116 if (mmc_host_cmd23(card->host)) {
Daniel Glöckner0ed50ab2016-08-30 14:17:30 +02003117 if ((mmc_card_mmc(card) &&
3118 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
Andrei Warkentinf0d89972011-05-23 15:06:38 -05003119 (mmc_card_sd(card) &&
3120 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
3121 md->flags |= MMC_BLK_CMD23;
3122 }
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003123
3124 if (mmc_card_mmc(card) &&
3125 md->flags & MMC_BLK_CMD23 &&
3126 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
3127 card->ext_csd.rel_sectors)) {
3128 md->flags |= MMC_BLK_REL_WR;
Jens Axboee9d5c742016-03-30 10:17:20 -06003129 blk_queue_write_cache(md->queue.queue, true, true);
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003130 }
3131
Seungwon Jeonce39f9d2013-02-06 17:02:46 +09003132 if (mmc_card_mmc(card) &&
3133 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
3134 (md->flags & MMC_BLK_CMD23) &&
3135 card->ext_csd.packed_event_en) {
3136 if (!mmc_packed_init(&md->queue, card))
3137 md->flags |= MMC_BLK_PACKED_CMD;
3138 }
3139
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +00003141
3142 err_putdisk:
3143 put_disk(md->disk);
3144 err_kfree:
3145 kfree(md);
3146 out:
Ulf Hanssonb10fa992016-04-07 14:36:46 +02003147 spin_lock(&mmc_blk_lock);
3148 ida_remove(&mmc_blk_ida, devidx);
3149 spin_unlock(&mmc_blk_lock);
Russell Kinga6f6c962006-01-03 22:38:44 +00003150 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151}
3152
Andrei Warkentin371a6892011-04-11 18:10:25 -05003153static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
3154{
3155 sector_t size;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003156
3157 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
3158 /*
3159 * The EXT_CSD sector count is in number or 512 byte
3160 * sectors.
3161 */
3162 size = card->ext_csd.sectors;
3163 } else {
3164 /*
3165 * The CSD capacity field is in units of read_blkbits.
3166 * set_capacity takes units of 512 bytes.
3167 */
Kuninori Morimoto087de9e2015-05-11 07:35:28 +00003168 size = (typeof(sector_t))card->csd.capacity
3169 << (card->csd.read_blkbits - 9);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003170 }
3171
Tobias Klauser7a30f2a2015-01-21 15:56:44 +01003172 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003173 MMC_BLK_DATA_AREA_MAIN);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003174}
3175
3176static int mmc_blk_alloc_part(struct mmc_card *card,
3177 struct mmc_blk_data *md,
3178 unsigned int part_type,
3179 sector_t size,
3180 bool default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003181 const char *subname,
3182 int area_type)
Andrei Warkentin371a6892011-04-11 18:10:25 -05003183{
3184 char cap_str[10];
3185 struct mmc_blk_data *part_md;
3186
3187 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003188 subname, area_type);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003189 if (IS_ERR(part_md))
3190 return PTR_ERR(part_md);
3191 part_md->part_type = part_type;
3192 list_add(&part_md->part, &md->part);
3193
James Bottomleyb9f28d82015-03-05 18:47:01 -08003194 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
Andrei Warkentin371a6892011-04-11 18:10:25 -05003195 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05303196 pr_info("%s: %s %s partition %u %s\n",
Andrei Warkentin371a6892011-04-11 18:10:25 -05003197 part_md->disk->disk_name, mmc_card_id(card),
3198 mmc_card_name(card), part_md->part_type, cap_str);
3199 return 0;
3200}
3201
Namjae Jeone0c368d2011-10-06 23:41:38 +09003202/* MMC Physical partitions consist of two boot partitions and
3203 * up to four general purpose partitions.
3204 * For each partition enabled in EXT_CSD a block device will be allocatedi
3205 * to provide access to the partition.
3206 */
3207
Andrei Warkentin371a6892011-04-11 18:10:25 -05003208static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
3209{
Namjae Jeone0c368d2011-10-06 23:41:38 +09003210 int idx, ret = 0;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003211
3212 if (!mmc_card_mmc(card))
3213 return 0;
3214
Namjae Jeone0c368d2011-10-06 23:41:38 +09003215 for (idx = 0; idx < card->nr_parts; idx++) {
3216 if (card->part[idx].size) {
3217 ret = mmc_blk_alloc_part(card, md,
3218 card->part[idx].part_cfg,
3219 card->part[idx].size >> 9,
3220 card->part[idx].force_ro,
Johan Rudholmadd710e2011-12-02 08:51:06 +01003221 card->part[idx].name,
3222 card->part[idx].area_type);
Namjae Jeone0c368d2011-10-06 23:41:38 +09003223 if (ret)
3224 return ret;
3225 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003226 }
3227
3228 return ret;
3229}
3230
Andrei Warkentin371a6892011-04-11 18:10:25 -05003231static void mmc_blk_remove_req(struct mmc_blk_data *md)
3232{
Johan Rudholmadd710e2011-12-02 08:51:06 +01003233 struct mmc_card *card;
3234
Andrei Warkentin371a6892011-04-11 18:10:25 -05003235 if (md) {
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003236 /*
3237 * Flush remaining requests and free queues. It
3238 * is freeing the queue that stops new requests
3239 * from being accepted.
3240 */
Franck Jullien8efb83a2013-07-24 15:17:48 +02003241 card = md->queue.card;
Paul Taysomfdfa20c2013-06-04 14:42:40 -07003242 mmc_cleanup_queue(&md->queue);
3243 if (md->flags & MMC_BLK_PACKED_CMD)
3244 mmc_packed_clean(&md->queue);
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003245 device_remove_file(disk_to_dev(md->disk),
3246 &md->num_wr_reqs_to_start_packing);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003247 if (md->disk->flags & GENHD_FL_UP) {
3248 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003249 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
3250 card->ext_csd.boot_ro_lockable)
3251 device_remove_file(disk_to_dev(md->disk),
3252 &md->power_ro_lock);
Mark Salyzyn6904e432016-01-28 11:12:25 -08003253#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3254 device_remove_file(disk_to_dev(md->disk),
3255 &dev_attr_max_write_speed);
3256 device_remove_file(disk_to_dev(md->disk),
3257 &dev_attr_max_read_speed);
3258 device_remove_file(disk_to_dev(md->disk),
3259 &dev_attr_cache_size);
3260#endif
Andrei Warkentin371a6892011-04-11 18:10:25 -05003261
Andrei Warkentin371a6892011-04-11 18:10:25 -05003262 del_gendisk(md->disk);
3263 }
Andrei Warkentin371a6892011-04-11 18:10:25 -05003264 mmc_blk_put(md);
3265 }
3266}
3267
3268static void mmc_blk_remove_parts(struct mmc_card *card,
3269 struct mmc_blk_data *md)
3270{
3271 struct list_head *pos, *q;
3272 struct mmc_blk_data *part_md;
3273
3274 list_for_each_safe(pos, q, &md->part) {
3275 part_md = list_entry(pos, struct mmc_blk_data, part);
3276 list_del(pos);
3277 mmc_blk_remove_req(part_md);
3278 }
3279}
3280
3281static int mmc_add_disk(struct mmc_blk_data *md)
3282{
3283 int ret;
Johan Rudholmadd710e2011-12-02 08:51:06 +01003284 struct mmc_card *card = md->queue.card;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003285
Dan Williams307d8e62016-06-20 10:40:44 -07003286 device_add_disk(md->parent, md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003287 md->force_ro.show = force_ro_show;
3288 md->force_ro.store = force_ro_store;
Rabin Vincent641c3182011-04-23 20:52:58 +05303289 sysfs_attr_init(&md->force_ro.attr);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003290 md->force_ro.attr.name = "force_ro";
3291 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
3292 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
3293 if (ret)
Johan Rudholmadd710e2011-12-02 08:51:06 +01003294 goto force_ro_fail;
Mark Salyzyn6904e432016-01-28 11:12:25 -08003295#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3296 atomic_set(&md->queue.max_write_speed, max_write_speed);
3297 ret = device_create_file(disk_to_dev(md->disk),
3298 &dev_attr_max_write_speed);
3299 if (ret)
3300 goto max_write_speed_fail;
3301 atomic_set(&md->queue.max_read_speed, max_read_speed);
3302 ret = device_create_file(disk_to_dev(md->disk),
3303 &dev_attr_max_read_speed);
3304 if (ret)
3305 goto max_read_speed_fail;
3306 atomic_set(&md->queue.cache_size, cache_size);
3307 atomic_long_set(&md->queue.cache_used, 0);
3308 md->queue.cache_jiffies = jiffies;
3309 ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
3310 if (ret)
3311 goto cache_size_fail;
3312#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01003313
3314 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
3315 card->ext_csd.boot_ro_lockable) {
Al Viro88187392012-03-20 06:00:24 -04003316 umode_t mode;
Johan Rudholmadd710e2011-12-02 08:51:06 +01003317
3318 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
3319 mode = S_IRUGO;
3320 else
3321 mode = S_IRUGO | S_IWUSR;
3322
3323 md->power_ro_lock.show = power_ro_lock_show;
3324 md->power_ro_lock.store = power_ro_lock_store;
Rabin Vincent00d9ac02012-02-01 16:31:56 +01003325 sysfs_attr_init(&md->power_ro_lock.attr);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003326 md->power_ro_lock.attr.mode = mode;
3327 md->power_ro_lock.attr.name =
3328 "ro_lock_until_next_power_on";
3329 ret = device_create_file(disk_to_dev(md->disk),
3330 &md->power_ro_lock);
3331 if (ret)
3332 goto power_ro_lock_fail;
3333 }
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003334
3335 md->num_wr_reqs_to_start_packing.show =
3336 num_wr_reqs_to_start_packing_show;
3337 md->num_wr_reqs_to_start_packing.store =
3338 num_wr_reqs_to_start_packing_store;
3339 sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
3340 md->num_wr_reqs_to_start_packing.attr.name =
3341 "num_wr_reqs_to_start_packing";
3342 md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
3343 ret = device_create_file(disk_to_dev(md->disk),
3344 &md->num_wr_reqs_to_start_packing);
3345 if (ret)
Maya Erez17022402014-12-04 00:15:42 +02003346 goto num_wr_reqs_to_start_packing_fail;
Tatyana Brokhmanc879b062014-12-03 23:38:06 +02003347
Maya Erez5a8dae12014-12-04 15:13:59 +02003348 md->no_pack_for_random.show = no_pack_for_random_show;
3349 md->no_pack_for_random.store = no_pack_for_random_store;
3350 sysfs_attr_init(&md->no_pack_for_random.attr);
3351 md->no_pack_for_random.attr.name = "no_pack_for_random";
3352 md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
3353 ret = device_create_file(disk_to_dev(md->disk),
3354 &md->no_pack_for_random);
3355 if (ret)
3356 goto no_pack_for_random_fails;
3357
Johan Rudholmadd710e2011-12-02 08:51:06 +01003358 return ret;
3359
Maya Erez5a8dae12014-12-04 15:13:59 +02003360no_pack_for_random_fails:
3361 device_remove_file(disk_to_dev(md->disk),
3362 &md->num_wr_reqs_to_start_packing);
Maya Erez17022402014-12-04 00:15:42 +02003363num_wr_reqs_to_start_packing_fail:
3364 device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
Johan Rudholmadd710e2011-12-02 08:51:06 +01003365power_ro_lock_fail:
Mark Salyzyn6904e432016-01-28 11:12:25 -08003366#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
3367 device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
3368cache_size_fail:
3369 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
3370max_read_speed_fail:
3371 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
3372max_write_speed_fail:
3373#endif
Johan Rudholmadd710e2011-12-02 08:51:06 +01003374 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
3375force_ro_fail:
3376 del_gendisk(md->disk);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003377
3378 return ret;
3379}
3380
Andrei Warkentin6f60c222011-04-11 19:11:04 -04003381static const struct mmc_fixup blk_fixups[] =
3382{
Chris Ballc59d4472011-11-11 22:01:43 -05003383 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
3384 MMC_QUIRK_INAND_CMD38),
3385 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
3386 MMC_QUIRK_INAND_CMD38),
3387 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
3388 MMC_QUIRK_INAND_CMD38),
3389 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
3390 MMC_QUIRK_INAND_CMD38),
3391 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
3392 MMC_QUIRK_INAND_CMD38),
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003393
3394 /*
3395 * Some MMC cards experience performance degradation with CMD23
3396 * instead of CMD12-bounded multiblock transfers. For now we'll
3397 * black list what's bad...
3398 * - Certain Toshiba cards.
3399 *
3400 * N.B. This doesn't affect SD cards.
3401 */
Yangbo Lu7d70d472015-07-10 11:44:03 +08003402 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
3403 MMC_QUIRK_BLK_NO_CMD23),
3404 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
3405 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05003406 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003407 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05003408 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003409 MMC_QUIRK_BLK_NO_CMD23),
Chris Ballc59d4472011-11-11 22:01:43 -05003410 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
Andrei Warkentind0c97cf2011-05-23 15:06:36 -05003411 MMC_QUIRK_BLK_NO_CMD23),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003412
3413 /*
Matt Gumbel32ecd322016-05-20 10:33:46 +03003414 * Some MMC cards need longer data read timeout than indicated in CSD.
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003415 */
Chris Ballc59d4472011-11-11 22:01:43 -05003416 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003417 MMC_QUIRK_LONG_READ_TIME),
Matt Gumbel32ecd322016-05-20 10:33:46 +03003418 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3419 MMC_QUIRK_LONG_READ_TIME),
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +01003420
Ian Chen3550ccd2012-08-29 15:05:36 +09003421 /*
Guoping Yu3c984a92014-08-06 12:44:55 +08003422 * Some Samsung MMC cards need longer data read timeout than
3423 * indicated in CSD.
3424 */
3425 MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
3426 MMC_QUIRK_LONG_READ_TIME),
3427
3428 /*
Ian Chen3550ccd2012-08-29 15:05:36 +09003429 * On these Samsung MoviNAND parts, performing secure erase or
3430 * secure trim can result in unrecoverable corruption due to a
3431 * firmware bug.
3432 */
3433 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3434 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3435 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3436 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3437 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3438 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3439 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3440 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3441 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3442 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3443 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3444 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3445 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3446 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3447 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3448 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3449
Shawn Linb5b4ff02015-08-12 13:08:32 +08003450 /*
3451 * On Some Kingston eMMCs, performing trim can result in
3452 * unrecoverable data conrruption occasionally due to a firmware bug.
3453 */
3454 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
3455 MMC_QUIRK_TRIM_BROKEN),
3456 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
3457 MMC_QUIRK_TRIM_BROKEN),
3458
Pratibhasagar V8d664e32014-12-03 18:26:42 +02003459 /* Some INAND MCP devices advertise incorrect timeout values */
3460 MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
3461 MMC_QUIRK_INAND_DATA_TIMEOUT),
3462
Andrei Warkentin6f60c222011-04-11 19:11:04 -04003463 END_FIXUP
3464};
3465
Ulf Hansson96541ba2015-04-14 13:06:12 +02003466static int mmc_blk_probe(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467{
Andrei Warkentin371a6892011-04-11 18:10:25 -05003468 struct mmc_blk_data *md, *part_md;
Pierre Ossmana7bbb572008-09-06 10:57:57 +02003469 char cap_str[10];
3470
Pierre Ossman912490d2005-05-21 10:27:02 +01003471 /*
3472 * Check that the card supports the command class(es) we need.
3473 */
3474 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 return -ENODEV;
3476
Lukas Czerner5204d002014-06-18 13:18:07 +02003477 mmc_fixup_device(card, blk_fixups);
3478
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 md = mmc_blk_alloc(card);
3480 if (IS_ERR(md))
3481 return PTR_ERR(md);
3482
James Bottomleyb9f28d82015-03-05 18:47:01 -08003483 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +02003484 cap_str, sizeof(cap_str));
Girish K Sa3c76eb2011-10-11 11:44:09 +05303485 pr_info("%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +02003487 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
Andrei Warkentin371a6892011-04-11 18:10:25 -05003489 if (mmc_blk_alloc_parts(card, md))
3490 goto out;
3491
Ulf Hansson96541ba2015-04-14 13:06:12 +02003492 dev_set_drvdata(&card->dev, md);
Andrei Warkentin6f60c222011-04-11 19:11:04 -04003493
Andrei Warkentin371a6892011-04-11 18:10:25 -05003494 if (mmc_add_disk(md))
3495 goto out;
3496
3497 list_for_each_entry(part_md, &md->part, part) {
3498 if (mmc_add_disk(part_md))
3499 goto out;
3500 }
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003501
3502 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
3503 pm_runtime_use_autosuspend(&card->dev);
3504
3505 /*
3506 * Don't enable runtime PM for SD-combo cards here. Leave that
3507 * decision to be taken during the SDIO init sequence instead.
3508 */
3509 if (card->type != MMC_TYPE_SD_COMBO) {
3510 pm_runtime_set_active(&card->dev);
3511 pm_runtime_enable(&card->dev);
3512 }
3513
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 return 0;
3515
3516 out:
Andrei Warkentin371a6892011-04-11 18:10:25 -05003517 mmc_blk_remove_parts(card, md);
3518 mmc_blk_remove_req(md);
Ulf Hansson5865f282012-03-22 11:47:26 +01003519 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520}
3521
Ulf Hansson96541ba2015-04-14 13:06:12 +02003522static void mmc_blk_remove(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523{
Ulf Hansson96541ba2015-04-14 13:06:12 +02003524 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525
Andrei Warkentin371a6892011-04-11 18:10:25 -05003526 mmc_blk_remove_parts(card, md);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003527 pm_runtime_get_sync(&card->dev);
Adrian Hunterddd6fa72011-06-23 13:40:26 +03003528 mmc_claim_host(card->host);
3529 mmc_blk_part_switch(card, md);
3530 mmc_release_host(card->host);
Ulf Hanssone94cfef2013-05-02 14:02:38 +02003531 if (card->type != MMC_TYPE_SD_COMBO)
3532 pm_runtime_disable(&card->dev);
3533 pm_runtime_put_noidle(&card->dev);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003534 mmc_blk_remove_req(md);
Ulf Hansson96541ba2015-04-14 13:06:12 +02003535 dev_set_drvdata(&card->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536}
3537
Ulf Hansson96541ba2015-04-14 13:06:12 +02003538static int _mmc_blk_suspend(struct mmc_card *card)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539{
Andrei Warkentin371a6892011-04-11 18:10:25 -05003540 struct mmc_blk_data *part_md;
Ulf Hansson96541ba2015-04-14 13:06:12 +02003541 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303542 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
3544 if (md) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05303545 rc = mmc_queue_suspend(&md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303546 if (rc)
3547 goto out;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003548 list_for_each_entry(part_md, &md->part, part) {
Subhash Jadavani4893b392013-06-20 18:15:50 +05303549 rc = mmc_queue_suspend(&part_md->queue, 0);
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303550 if (rc)
3551 goto out_resume;
Andrei Warkentin371a6892011-04-11 18:10:25 -05003552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 }
Subhash Jadavani5cd341a2013-02-26 17:32:58 +05303554 goto out;
3555
3556 out_resume:
3557 mmc_queue_resume(&md->queue);
3558 list_for_each_entry(part_md, &md->part, part) {
3559 mmc_queue_resume(&part_md->queue);
3560 }
3561 out:
3562 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563}
3564
Ulf Hansson96541ba2015-04-14 13:06:12 +02003565static void mmc_blk_shutdown(struct mmc_card *card)
Ulf Hansson76287742013-06-10 17:03:40 +02003566{
Ulf Hansson96541ba2015-04-14 13:06:12 +02003567 _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02003568}
3569
Ulf Hansson0967edc2014-10-06 11:29:42 +02003570#ifdef CONFIG_PM_SLEEP
3571static int mmc_blk_suspend(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02003572{
Ulf Hansson96541ba2015-04-14 13:06:12 +02003573 struct mmc_card *card = mmc_dev_to_card(dev);
3574
3575 return _mmc_blk_suspend(card);
Ulf Hansson76287742013-06-10 17:03:40 +02003576}
3577
Ulf Hansson0967edc2014-10-06 11:29:42 +02003578static int mmc_blk_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579{
Andrei Warkentin371a6892011-04-11 18:10:25 -05003580 struct mmc_blk_data *part_md;
Ulf Hanssonfc95e302014-10-06 14:34:09 +02003581 struct mmc_blk_data *md = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582
3583 if (md) {
Andrei Warkentin371a6892011-04-11 18:10:25 -05003584 /*
3585 * Resume involves the card going into idle state,
3586 * so current partition is always the main one.
3587 */
3588 md->part_curr = md->part_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 mmc_queue_resume(&md->queue);
Andrei Warkentin371a6892011-04-11 18:10:25 -05003590 list_for_each_entry(part_md, &md->part, part) {
3591 mmc_queue_resume(&part_md->queue);
3592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 }
3594 return 0;
3595}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596#endif
3597
Ulf Hansson0967edc2014-10-06 11:29:42 +02003598static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
3599
Ulf Hansson96541ba2015-04-14 13:06:12 +02003600static struct mmc_driver mmc_driver = {
3601 .drv = {
3602 .name = "mmcblk",
3603 .pm = &mmc_blk_pm_ops,
3604 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 .probe = mmc_blk_probe,
3606 .remove = mmc_blk_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02003607 .shutdown = mmc_blk_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608};
3609
3610static int __init mmc_blk_init(void)
3611{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09003612 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003614 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3615 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3616
Ben Hutchingsa26eba62014-11-06 03:35:09 +00003617 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
Olof Johansson5e71b7a2010-09-17 21:19:57 -04003618
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003619 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3620 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09003623 res = mmc_register_driver(&mmc_driver);
3624 if (res)
3625 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626
Akinobu Mita9d4e98e2008-09-13 19:02:07 +09003627 return 0;
3628 out2:
3629 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 out:
3631 return res;
3632}
3633
3634static void __exit mmc_blk_exit(void)
3635{
3636 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +02003637 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638}
3639
3640module_init(mmc_blk_init);
3641module_exit(mmc_blk_exit);
3642
3643MODULE_LICENSE("GPL");
3644MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3645