blob: 3e9082b729ff9772cb8edcdf1bddeb391d6818cf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
Pierre Ossman979ce722008-06-29 12:19:47 +02005 * Copyright 2005-2008 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
Arjan van de Vena621aae2006-01-12 18:43:35 +000031#include <linux/mutex.h>
Pierre Ossmanec5a19d2006-10-06 00:44:03 -070032#include <linux/scatterlist.h>
Pierre Ossmana7bbb572008-09-06 10:57:57 +020033#include <linux/string_helpers.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <linux/mmc/card.h>
Pierre Ossman385e32272006-06-18 14:34:37 +020036#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010037#include <linux/mmc/mmc.h>
38#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#include <asm/system.h>
41#include <asm/uaccess.h>
42
Pierre Ossman98ac2162006-12-23 20:03:02 +010043#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Andy Whitcroft6b0b6282009-02-23 12:38:41 +000045MODULE_ALIAS("mmc:block");
Olof Johansson5e71b7a2010-09-17 21:19:57 -040046#ifdef MODULE_PARAM_PREFIX
47#undef MODULE_PARAM_PREFIX
48#endif
49#define MODULE_PARAM_PREFIX "mmcblk."
David Woodhouse1dff3142007-11-21 18:45:12 +010050
Andrei Warkentinf4c55222011-03-31 18:40:00 -050051#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
52 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
53 ((card)->ext_csd.rel_sectors)))
54
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020055static DEFINE_MUTEX(block_mutex);
Olof Johansson5e71b7a2010-09-17 21:19:57 -040056
57/*
58 * The defaults come from config options but can be overriden by module
59 * or bootarg options.
60 */
61static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
62
63/*
64 * We've only got one major, so number of mmcblk devices is
65 * limited to 256 / number of minors per device.
66 */
67static int max_devices;
68
69/* 256 minors, so at most 256 separate devices */
70static DECLARE_BITMAP(dev_use, 256);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/*
73 * There is one mmc_blk_data per slot.
74 */
75struct mmc_blk_data {
76 spinlock_t lock;
77 struct gendisk *disk;
78 struct mmc_queue queue;
79
80 unsigned int usage;
Russell Kinga6f6c962006-01-03 22:38:44 +000081 unsigned int read_only;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
Arjan van de Vena621aae2006-01-12 18:43:35 +000084static DEFINE_MUTEX(open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Olof Johansson5e71b7a2010-09-17 21:19:57 -040086module_param(perdev_minors, int, 0444);
87MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
90{
91 struct mmc_blk_data *md;
92
Arjan van de Vena621aae2006-01-12 18:43:35 +000093 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 md = disk->private_data;
95 if (md && md->usage == 0)
96 md = NULL;
97 if (md)
98 md->usage++;
Arjan van de Vena621aae2006-01-12 18:43:35 +000099 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 return md;
102}
103
104static void mmc_blk_put(struct mmc_blk_data *md)
105{
Arjan van de Vena621aae2006-01-12 18:43:35 +0000106 mutex_lock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 md->usage--;
108 if (md->usage == 0) {
Anna Lemehova7d92df62010-01-08 14:42:58 -0800109 int devmaj = MAJOR(disk_devt(md->disk));
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400110 int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
Anna Lemehova7d92df62010-01-08 14:42:58 -0800111
112 if (!devmaj)
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400113 devidx = md->disk->first_minor / perdev_minors;
Anna Lemehova7d92df62010-01-08 14:42:58 -0800114
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800115 blk_cleanup_queue(md->queue.queue);
116
David Woodhouse1dff3142007-11-21 18:45:12 +0100117 __clear_bit(devidx, dev_use);
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 put_disk(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 kfree(md);
121 }
Arjan van de Vena621aae2006-01-12 18:43:35 +0000122 mutex_unlock(&open_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123}
124
Al Viroa5a15612008-03-02 10:33:30 -0500125static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
Al Viroa5a15612008-03-02 10:33:30 -0500127 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 int ret = -ENXIO;
129
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200130 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 if (md) {
132 if (md->usage == 2)
Al Viroa5a15612008-03-02 10:33:30 -0500133 check_disk_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 ret = 0;
Pierre Ossmana00fc092005-09-06 15:18:52 -0700135
Al Viroa5a15612008-03-02 10:33:30 -0500136 if ((mode & FMODE_WRITE) && md->read_only) {
Andrew Morton70bb0892008-09-05 14:00:24 -0700137 mmc_blk_put(md);
Pierre Ossmana00fc092005-09-06 15:18:52 -0700138 ret = -EROFS;
Andrew Morton70bb0892008-09-05 14:00:24 -0700139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 }
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200141 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 return ret;
144}
145
Al Viroa5a15612008-03-02 10:33:30 -0500146static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
Al Viroa5a15612008-03-02 10:33:30 -0500148 struct mmc_blk_data *md = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200150 mutex_lock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 mmc_blk_put(md);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200152 mutex_unlock(&block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 return 0;
154}
155
156static int
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800157mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800159 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
160 geo->heads = 4;
161 geo->sectors = 16;
162 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700165static const struct block_device_operations mmc_bdops = {
Al Viroa5a15612008-03-02 10:33:30 -0500166 .open = mmc_blk_open,
167 .release = mmc_blk_release,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -0800168 .getgeo = mmc_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 .owner = THIS_MODULE,
170};
171
172struct mmc_blk_request {
173 struct mmc_request mrq;
174 struct mmc_command cmd;
175 struct mmc_command stop;
176 struct mmc_data data;
177};
178
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700179static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
180{
181 int err;
Ben Dooks051913d2009-06-08 23:33:57 +0100182 u32 result;
183 __be32 *blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700184
185 struct mmc_request mrq;
186 struct mmc_command cmd;
187 struct mmc_data data;
188 unsigned int timeout_us;
189
190 struct scatterlist sg;
191
192 memset(&cmd, 0, sizeof(struct mmc_command));
193
194 cmd.opcode = MMC_APP_CMD;
195 cmd.arg = card->rca << 16;
David Brownell7213d172007-08-08 09:10:23 -0700196 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700197
198 err = mmc_wait_for_cmd(card->host, &cmd, 0);
David Brownell7213d172007-08-08 09:10:23 -0700199 if (err)
200 return (u32)-1;
201 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700202 return (u32)-1;
203
204 memset(&cmd, 0, sizeof(struct mmc_command));
205
206 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
207 cmd.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700208 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700209
210 memset(&data, 0, sizeof(struct mmc_data));
211
212 data.timeout_ns = card->csd.tacc_ns * 100;
213 data.timeout_clks = card->csd.tacc_clks * 100;
214
215 timeout_us = data.timeout_ns / 1000;
216 timeout_us += data.timeout_clks * 1000 /
217 (card->host->ios.clock / 1000);
218
219 if (timeout_us > 100000) {
220 data.timeout_ns = 100000000;
221 data.timeout_clks = 0;
222 }
223
224 data.blksz = 4;
225 data.blocks = 1;
226 data.flags = MMC_DATA_READ;
227 data.sg = &sg;
228 data.sg_len = 1;
229
230 memset(&mrq, 0, sizeof(struct mmc_request));
231
232 mrq.cmd = &cmd;
233 mrq.data = &data;
234
Ben Dooks051913d2009-06-08 23:33:57 +0100235 blocks = kmalloc(4, GFP_KERNEL);
236 if (!blocks)
237 return (u32)-1;
238
239 sg_init_one(&sg, blocks, 4);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700240
241 mmc_wait_for_req(card->host, &mrq);
242
Ben Dooks051913d2009-06-08 23:33:57 +0100243 result = ntohl(*blocks);
244 kfree(blocks);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700245
Ben Dooks051913d2009-06-08 23:33:57 +0100246 if (cmd.error || data.error)
247 result = (u32)-1;
248
249 return result;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700250}
251
Adrian Hunter504f1912008-10-16 12:55:25 +0300252static u32 get_card_status(struct mmc_card *card, struct request *req)
253{
254 struct mmc_command cmd;
255 int err;
256
257 memset(&cmd, 0, sizeof(struct mmc_command));
258 cmd.opcode = MMC_SEND_STATUS;
259 if (!mmc_host_is_spi(card->host))
260 cmd.arg = card->rca << 16;
261 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
262 err = mmc_wait_for_cmd(card->host, &cmd, 0);
263 if (err)
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400264 printk(KERN_ERR "%s: error %d sending status command",
Adrian Hunter504f1912008-10-16 12:55:25 +0300265 req->rq_disk->disk_name, err);
266 return cmd.resp[0];
267}
268
Adrian Hunterbd788c92010-08-11 14:17:47 -0700269static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
270{
271 struct mmc_blk_data *md = mq->data;
272 struct mmc_card *card = md->queue.card;
273 unsigned int from, nr, arg;
274 int err = 0;
275
Adrian Hunterbd788c92010-08-11 14:17:47 -0700276 if (!mmc_can_erase(card)) {
277 err = -EOPNOTSUPP;
278 goto out;
279 }
280
281 from = blk_rq_pos(req);
282 nr = blk_rq_sectors(req);
283
284 if (mmc_can_trim(card))
285 arg = MMC_TRIM_ARG;
286 else
287 arg = MMC_ERASE_ARG;
288
289 err = mmc_erase(card, from, nr, arg);
290out:
291 spin_lock_irq(&md->lock);
292 __blk_end_request(req, err, blk_rq_bytes(req));
293 spin_unlock_irq(&md->lock);
294
Adrian Hunterbd788c92010-08-11 14:17:47 -0700295 return err ? 0 : 1;
296}
297
Adrian Hunter49804542010-08-11 14:17:50 -0700298static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
299 struct request *req)
300{
301 struct mmc_blk_data *md = mq->data;
302 struct mmc_card *card = md->queue.card;
303 unsigned int from, nr, arg;
304 int err = 0;
305
Adrian Hunter49804542010-08-11 14:17:50 -0700306 if (!mmc_can_secure_erase_trim(card)) {
307 err = -EOPNOTSUPP;
308 goto out;
309 }
310
311 from = blk_rq_pos(req);
312 nr = blk_rq_sectors(req);
313
314 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
315 arg = MMC_SECURE_TRIM1_ARG;
316 else
317 arg = MMC_SECURE_ERASE_ARG;
318
319 err = mmc_erase(card, from, nr, arg);
320 if (!err && arg == MMC_SECURE_TRIM1_ARG)
321 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
322out:
323 spin_lock_irq(&md->lock);
324 __blk_end_request(req, err, blk_rq_bytes(req));
325 spin_unlock_irq(&md->lock);
326
Adrian Hunter49804542010-08-11 14:17:50 -0700327 return err ? 0 : 1;
328}
329
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500330static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
331{
332 struct mmc_blk_data *md = mq->data;
333
334 /*
335 * No-op, only service this because we need REQ_FUA for reliable
336 * writes.
337 */
338 spin_lock_irq(&md->lock);
339 __blk_end_request_all(req, 0);
340 spin_unlock_irq(&md->lock);
341
342 return 1;
343}
344
345/*
346 * Reformat current write as a reliable write, supporting
347 * both legacy and the enhanced reliable write MMC cards.
348 * In each transfer we'll handle only as much as a single
349 * reliable write can handle, thus finish the request in
350 * partial completions.
351 */
352static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
353 struct mmc_card *card,
354 struct request *req)
355{
356 int err;
357 struct mmc_command set_count;
358
359 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
360 /* Legacy mode imposes restrictions on transfers. */
361 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
362 brq->data.blocks = 1;
363
364 if (brq->data.blocks > card->ext_csd.rel_sectors)
365 brq->data.blocks = card->ext_csd.rel_sectors;
366 else if (brq->data.blocks < card->ext_csd.rel_sectors)
367 brq->data.blocks = 1;
368 }
369
370 memset(&set_count, 0, sizeof(struct mmc_command));
371 set_count.opcode = MMC_SET_BLOCK_COUNT;
372 set_count.arg = brq->data.blocks | (1 << 31);
373 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
374 err = mmc_wait_for_cmd(card->host, &set_count, 0);
375 if (err)
376 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
377 req->rq_disk->disk_name, err);
378 return err;
379}
380
Adrian Hunterbd788c92010-08-11 14:17:47 -0700381static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
383 struct mmc_blk_data *md = mq->data;
384 struct mmc_card *card = md->queue.card;
Pierre Ossman176f00f2006-10-04 02:15:41 -0700385 struct mmc_blk_request brq;
Adrian Hunter6a79e392008-12-31 18:21:17 +0100386 int ret = 1, disable_multi = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500388 /*
389 * Reliable writes are used to implement Forced Unit Access and
390 * REQ_META accesses, and are supported only on MMCs.
391 */
392 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
393 (req->cmd_flags & REQ_META)) &&
394 (rq_data_dir(req) == WRITE) &&
395 REL_WRITES_SUPPORTED(card);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 struct mmc_command cmd;
Adrian Hunter504f1912008-10-16 12:55:25 +0300399 u32 readcmd, writecmd, status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 memset(&brq, 0, sizeof(struct mmc_blk_request));
402 brq.mrq.cmd = &brq.cmd;
403 brq.mrq.data = &brq.data;
404
Tejun Heo83096eb2009-05-07 22:24:39 +0900405 brq.cmd.arg = blk_rq_pos(req);
Philip Langdalefba68bd2007-01-04 06:57:32 -0800406 if (!mmc_card_blockaddr(card))
407 brq.cmd.arg <<= 9;
David Brownell7213d172007-08-08 09:10:23 -0700408 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
Pierre Ossman08846692008-08-31 14:10:08 +0200409 brq.data.blksz = 512;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 brq.stop.opcode = MMC_STOP_TRANSMISSION;
411 brq.stop.arg = 0;
David Brownell7213d172007-08-08 09:10:23 -0700412 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
Tejun Heo83096eb2009-05-07 22:24:39 +0900413 brq.data.blocks = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Adrian Hunter6a79e392008-12-31 18:21:17 +0100415 /*
Pierre Ossman548d2de2009-04-10 17:52:57 +0200416 * The block layer doesn't support all sector count
417 * restrictions, so we need to be prepared for too big
418 * requests.
419 */
420 if (brq.data.blocks > card->host->max_blk_count)
421 brq.data.blocks = card->host->max_blk_count;
422
423 /*
Adrian Hunter6a79e392008-12-31 18:21:17 +0100424 * After a read error, we redo the request one sector at a time
425 * in order to accurately determine which sectors can be read
426 * successfully.
427 */
428 if (disable_multi && brq.data.blocks > 1)
429 brq.data.blocks = 1;
430
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500431 if (brq.data.blocks > 1 || do_rel_wr) {
David Brownell7213d172007-08-08 09:10:23 -0700432 /* SPI multiblock writes terminate using a special
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500433 * token, not a STOP_TRANSMISSION request. Reliable
434 * writes use SET_BLOCK_COUNT and do not use a
435 * STOP_TRANSMISSION request either.
David Brownell7213d172007-08-08 09:10:23 -0700436 */
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500437 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
438 rq_data_dir(req) == READ)
David Brownell7213d172007-08-08 09:10:23 -0700439 brq.mrq.stop = &brq.stop;
Russell Kingdb53f282006-08-30 15:14:56 +0100440 readcmd = MMC_READ_MULTIPLE_BLOCK;
441 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
Russell King788ee7b2006-01-09 21:12:17 +0000442 } else {
443 brq.mrq.stop = NULL;
Russell Kingdb53f282006-08-30 15:14:56 +0100444 readcmd = MMC_READ_SINGLE_BLOCK;
445 writecmd = MMC_WRITE_BLOCK;
446 }
Russell Kingdb53f282006-08-30 15:14:56 +0100447 if (rq_data_dir(req) == READ) {
448 brq.cmd.opcode = readcmd;
449 brq.data.flags |= MMC_DATA_READ;
450 } else {
451 brq.cmd.opcode = writecmd;
452 brq.data.flags |= MMC_DATA_WRITE;
Russell King788ee7b2006-01-09 21:12:17 +0000453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500455 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
456 goto cmd_err;
457
Pierre Ossmanb146d262007-07-24 19:16:54 +0200458 mmc_set_data_timeout(&brq.data, card);
459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 brq.data.sg = mq->sg;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200461 brq.data.sg_len = mmc_queue_map_sg(mq);
462
Adrian Hunter6a79e392008-12-31 18:21:17 +0100463 /*
464 * Adjust the sg list so it is the same size as the
465 * request.
466 */
Tejun Heo83096eb2009-05-07 22:24:39 +0900467 if (brq.data.blocks != blk_rq_sectors(req)) {
Adrian Hunter6a79e392008-12-31 18:21:17 +0100468 int i, data_size = brq.data.blocks << 9;
469 struct scatterlist *sg;
470
471 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
472 data_size -= sg->length;
473 if (data_size <= 0) {
474 sg->length += data_size;
475 i++;
476 break;
477 }
478 }
479 brq.data.sg_len = i;
480 }
481
Pierre Ossman98ccf142007-05-12 00:26:16 +0200482 mmc_queue_bounce_pre(mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 mmc_wait_for_req(card->host, &brq.mrq);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200485
486 mmc_queue_bounce_post(mq);
487
Pierre Ossman979ce722008-06-29 12:19:47 +0200488 /*
489 * Check for errors here, but don't jump to cmd_err
490 * until later as we need to wait for the card to leave
491 * programming mode even when things go wrong.
492 */
Adrian Hunter6a79e392008-12-31 18:21:17 +0100493 if (brq.cmd.error || brq.data.error || brq.stop.error) {
494 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
495 /* Redo read one sector at a time */
496 printk(KERN_WARNING "%s: retrying using single "
497 "block read\n", req->rq_disk->disk_name);
498 disable_multi = 1;
499 continue;
500 }
Adrian Hunter504f1912008-10-16 12:55:25 +0300501 status = get_card_status(card, req);
Adrian Hunter6a79e392008-12-31 18:21:17 +0100502 }
Adrian Hunter504f1912008-10-16 12:55:25 +0300503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (brq.cmd.error) {
Adrian Hunter504f1912008-10-16 12:55:25 +0300505 printk(KERN_ERR "%s: error %d sending read/write "
506 "command, response %#x, card status %#x\n",
507 req->rq_disk->disk_name, brq.cmd.error,
508 brq.cmd.resp[0], status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 }
510
511 if (brq.data.error) {
Adrian Hunter504f1912008-10-16 12:55:25 +0300512 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
513 /* 'Stop' response contains card status */
514 status = brq.mrq.stop->resp[0];
515 printk(KERN_ERR "%s: error %d transferring data,"
516 " sector %u, nr %u, card status %#x\n",
517 req->rq_disk->disk_name, brq.data.error,
Tejun Heo83096eb2009-05-07 22:24:39 +0900518 (unsigned)blk_rq_pos(req),
519 (unsigned)blk_rq_sectors(req), status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 }
521
522 if (brq.stop.error) {
Adrian Hunter504f1912008-10-16 12:55:25 +0300523 printk(KERN_ERR "%s: error %d sending stop command, "
524 "response %#x, card status %#x\n",
525 req->rq_disk->disk_name, brq.stop.error,
526 brq.stop.resp[0], status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 }
528
David Brownell7213d172007-08-08 09:10:23 -0700529 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
Russell King2ed6d222006-09-24 10:46:43 +0100530 do {
531 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Russell King2ed6d222006-09-24 10:46:43 +0100533 cmd.opcode = MMC_SEND_STATUS;
534 cmd.arg = card->rca << 16;
535 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
536 err = mmc_wait_for_cmd(card->host, &cmd, 5);
537 if (err) {
538 printk(KERN_ERR "%s: error %d requesting status\n",
539 req->rq_disk->disk_name, err);
540 goto cmd_err;
541 }
Pierre Ossmand198f102007-11-02 18:21:13 +0100542 /*
543 * Some cards mishandle the status bits,
544 * so make sure to check both the busy
545 * indication and the card state.
546 */
547 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
548 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550#if 0
Russell King2ed6d222006-09-24 10:46:43 +0100551 if (cmd.resp[0] & ~0x00000900)
552 printk(KERN_ERR "%s: status = %08x\n",
553 req->rq_disk->disk_name, cmd.resp[0]);
554 if (mmc_decode_status(cmd.resp))
555 goto cmd_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556#endif
Russell King2ed6d222006-09-24 10:46:43 +0100557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Adrian Hunter6a79e392008-12-31 18:21:17 +0100559 if (brq.cmd.error || brq.stop.error || brq.data.error) {
560 if (rq_data_dir(req) == READ) {
561 /*
562 * After an error, we redo I/O one sector at a
563 * time, so we only reach here after trying to
564 * read a single sector.
565 */
566 spin_lock_irq(&md->lock);
567 ret = __blk_end_request(req, -EIO, brq.data.blksz);
568 spin_unlock_irq(&md->lock);
569 continue;
570 }
Pierre Ossman979ce722008-06-29 12:19:47 +0200571 goto cmd_err;
Adrian Hunter6a79e392008-12-31 18:21:17 +0100572 }
Pierre Ossman979ce722008-06-29 12:19:47 +0200573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 /*
575 * A block was successfully transferred.
576 */
577 spin_lock_irq(&md->lock);
Kiyoshi Uedafd539832007-12-11 17:48:29 -0500578 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 spin_unlock_irq(&md->lock);
580 } while (ret);
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 return 1;
583
584 cmd_err:
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700585 /*
586 * If this is an SD card and we're writing, we can first
587 * mark the known good sectors as ok.
588 *
589 * If the card is not SD, we can still ok written sectors
Pierre Ossman23af6032008-07-06 01:10:27 +0200590 * as reported by the controller (which might be less than
591 * the real number of written sectors, but never more).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 */
Adrian Hunter6a79e392008-12-31 18:21:17 +0100593 if (mmc_card_sd(card)) {
594 u32 blocks;
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700595
Adrian Hunter6a79e392008-12-31 18:21:17 +0100596 blocks = mmc_sd_num_wr_blocks(card);
597 if (blocks != (u32)-1) {
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700598 spin_lock_irq(&md->lock);
Adrian Hunter6a79e392008-12-31 18:21:17 +0100599 ret = __blk_end_request(req, 0, blocks << 9);
Pierre Ossmanec5a19d2006-10-06 00:44:03 -0700600 spin_unlock_irq(&md->lock);
601 }
Adrian Hunter6a79e392008-12-31 18:21:17 +0100602 } else {
603 spin_lock_irq(&md->lock);
604 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
605 spin_unlock_irq(&md->lock);
Pierre Ossman176f00f2006-10-04 02:15:41 -0700606 }
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 spin_lock_irq(&md->lock);
Kiyoshi Uedafd539832007-12-11 17:48:29 -0500609 while (ret)
610 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 spin_unlock_irq(&md->lock);
612
613 return 0;
614}
615
Adrian Hunterbd788c92010-08-11 14:17:47 -0700616static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
617{
Andrei Warkentin1a258db2011-04-11 18:10:24 -0500618 int ret;
619 struct mmc_blk_data *md = mq->data;
620 struct mmc_card *card = md->queue.card;
621
622 mmc_claim_host(card->host);
623
Adrian Hunter49804542010-08-11 14:17:50 -0700624 if (req->cmd_flags & REQ_DISCARD) {
625 if (req->cmd_flags & REQ_SECURE)
Andrei Warkentin1a258db2011-04-11 18:10:24 -0500626 ret = mmc_blk_issue_secdiscard_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -0700627 else
Andrei Warkentin1a258db2011-04-11 18:10:24 -0500628 ret = mmc_blk_issue_discard_rq(mq, req);
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500629 } else if (req->cmd_flags & REQ_FLUSH) {
Andrei Warkentin1a258db2011-04-11 18:10:24 -0500630 ret = mmc_blk_issue_flush(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -0700631 } else {
Andrei Warkentin1a258db2011-04-11 18:10:24 -0500632 ret = mmc_blk_issue_rw_rq(mq, req);
Adrian Hunter49804542010-08-11 14:17:50 -0700633 }
Andrei Warkentin1a258db2011-04-11 18:10:24 -0500634
635 mmc_release_host(card->host);
636 return ret;
Adrian Hunterbd788c92010-08-11 14:17:47 -0700637}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Russell Kinga6f6c962006-01-03 22:38:44 +0000639static inline int mmc_blk_readonly(struct mmc_card *card)
640{
641 return mmc_card_readonly(card) ||
642 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
646{
647 struct mmc_blk_data *md;
648 int devidx, ret;
649
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400650 devidx = find_first_zero_bit(dev_use, max_devices);
651 if (devidx >= max_devices)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return ERR_PTR(-ENOSPC);
653 __set_bit(devidx, dev_use);
654
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700655 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
Russell Kinga6f6c962006-01-03 22:38:44 +0000656 if (!md) {
657 ret = -ENOMEM;
658 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 }
Russell Kinga6f6c962006-01-03 22:38:44 +0000660
Russell Kinga6f6c962006-01-03 22:38:44 +0000661
662 /*
663 * Set the read-only status based on the supported commands
664 * and the write protect switch.
665 */
666 md->read_only = mmc_blk_readonly(card);
667
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400668 md->disk = alloc_disk(perdev_minors);
Russell Kinga6f6c962006-01-03 22:38:44 +0000669 if (md->disk == NULL) {
670 ret = -ENOMEM;
671 goto err_kfree;
672 }
673
674 spin_lock_init(&md->lock);
675 md->usage = 1;
676
677 ret = mmc_init_queue(&md->queue, card, &md->lock);
678 if (ret)
679 goto err_putdisk;
680
Russell Kinga6f6c962006-01-03 22:38:44 +0000681 md->queue.issue_fn = mmc_blk_issue_rq;
682 md->queue.data = md;
683
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +0200684 md->disk->major = MMC_BLOCK_MAJOR;
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400685 md->disk->first_minor = devidx * perdev_minors;
Russell Kinga6f6c962006-01-03 22:38:44 +0000686 md->disk->fops = &mmc_bdops;
687 md->disk->private_data = md;
688 md->disk->queue = md->queue.queue;
689 md->disk->driverfs_dev = &card->dev;
Marc-André Hébert33621772011-01-31 12:31:24 -0500690 set_disk_ro(md->disk, md->read_only);
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500691 if (REL_WRITES_SUPPORTED(card))
692 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
Russell Kinga6f6c962006-01-03 22:38:44 +0000693
694 /*
695 * As discussed on lkml, GENHD_FL_REMOVABLE should:
696 *
697 * - be set for removable media with permanent block devices
698 * - be unset for removable block devices with permanent media
699 *
700 * Since MMC block devices clearly fall under the second
701 * case, we do not set GENHD_FL_REMOVABLE. Userspace
702 * should use the block device creation/destruction hotplug
703 * messages to tell when the card is present.
704 */
705
JiebingLi12578f62010-06-17 15:58:46 +0100706 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
707 "mmcblk%d", devidx);
Russell Kinga6f6c962006-01-03 22:38:44 +0000708
Martin K. Petersene1defc42009-05-22 17:17:49 -0400709 blk_queue_logical_block_size(md->queue.queue, 512);
Russell Kinga6f6c962006-01-03 22:38:44 +0000710
Pierre Ossman85a18ad2007-02-17 22:15:27 +0100711 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
712 /*
713 * The EXT_CSD sector count is in number or 512 byte
714 * sectors.
715 */
716 set_capacity(md->disk, card->ext_csd.sectors);
717 } else {
718 /*
719 * The CSD capacity field is in units of read_blkbits.
720 * set_capacity takes units of 512 bytes.
721 */
722 set_capacity(md->disk,
723 card->csd.capacity << (card->csd.read_blkbits - 9));
724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return md;
Russell Kinga6f6c962006-01-03 22:38:44 +0000726
727 err_putdisk:
728 put_disk(md->disk);
729 err_kfree:
730 kfree(md);
731 out:
732 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
734
735static int
736mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
737{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 int err;
739
Pierre Ossmanb8558852007-01-03 19:47:29 +0100740 mmc_claim_host(card->host);
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +0300741 err = mmc_set_blocklen(card, 512);
Pierre Ossmanb8558852007-01-03 19:47:29 +0100742 mmc_release_host(card->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
744 if (err) {
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +0300745 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
746 md->disk->disk_name, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 return -EINVAL;
748 }
749
750 return 0;
751}
752
753static int mmc_blk_probe(struct mmc_card *card)
754{
755 struct mmc_blk_data *md;
756 int err;
Pierre Ossmana7bbb572008-09-06 10:57:57 +0200757 char cap_str[10];
758
Pierre Ossman912490d2005-05-21 10:27:02 +0100759 /*
760 * Check that the card supports the command class(es) we need.
761 */
762 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 return -ENODEV;
764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 md = mmc_blk_alloc(card);
766 if (IS_ERR(md))
767 return PTR_ERR(md);
768
769 err = mmc_blk_set_blksize(md, card);
770 if (err)
771 goto out;
772
Yi Li444122f2009-02-05 15:31:57 +0800773 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
Pierre Ossmana7bbb572008-09-06 10:57:57 +0200774 cap_str, sizeof(cap_str));
775 printk(KERN_INFO "%s: %s %s %s %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
Pierre Ossmana7bbb572008-09-06 10:57:57 +0200777 cap_str, md->read_only ? "(ro)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 mmc_set_drvdata(card, md);
780 add_disk(md->disk);
781 return 0;
782
783 out:
Jarkko Lavinen0a74ff22010-01-08 14:42:59 -0800784 mmc_cleanup_queue(&md->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 mmc_blk_put(md);
786
787 return err;
788}
789
790static void mmc_blk_remove(struct mmc_card *card)
791{
792 struct mmc_blk_data *md = mmc_get_drvdata(card);
793
794 if (md) {
Pierre Ossman89b4e132006-11-14 22:08:16 +0100795 /* Stop new requests from getting into the queue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 del_gendisk(md->disk);
797
Pierre Ossman89b4e132006-11-14 22:08:16 +0100798 /* Then flush out any already in there */
799 mmc_cleanup_queue(&md->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 mmc_blk_put(md);
802 }
803 mmc_set_drvdata(card, NULL);
804}
805
806#ifdef CONFIG_PM
807static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
808{
809 struct mmc_blk_data *md = mmc_get_drvdata(card);
810
811 if (md) {
812 mmc_queue_suspend(&md->queue);
813 }
814 return 0;
815}
816
817static int mmc_blk_resume(struct mmc_card *card)
818{
819 struct mmc_blk_data *md = mmc_get_drvdata(card);
820
821 if (md) {
822 mmc_blk_set_blksize(md, card);
823 mmc_queue_resume(&md->queue);
824 }
825 return 0;
826}
827#else
828#define mmc_blk_suspend NULL
829#define mmc_blk_resume NULL
830#endif
831
832static struct mmc_driver mmc_driver = {
833 .drv = {
834 .name = "mmcblk",
835 },
836 .probe = mmc_blk_probe,
837 .remove = mmc_blk_remove,
838 .suspend = mmc_blk_suspend,
839 .resume = mmc_blk_resume,
840};
841
842static int __init mmc_blk_init(void)
843{
Akinobu Mita9d4e98e2008-09-13 19:02:07 +0900844 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Olof Johansson5e71b7a2010-09-17 21:19:57 -0400846 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
847 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
848
849 max_devices = 256 / perdev_minors;
850
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +0200851 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
852 if (res)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
Akinobu Mita9d4e98e2008-09-13 19:02:07 +0900855 res = mmc_register_driver(&mmc_driver);
856 if (res)
857 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Akinobu Mita9d4e98e2008-09-13 19:02:07 +0900859 return 0;
860 out2:
861 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 out:
863 return res;
864}
865
866static void __exit mmc_blk_exit(void)
867{
868 mmc_unregister_driver(&mmc_driver);
Pierre Ossmanfe6b4c82007-05-14 17:27:29 +0200869 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870}
871
872module_init(mmc_blk_init);
873module_exit(mmc_blk_exit);
874
875MODULE_LICENSE("GPL");
876MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
877