blob: ba821fe70bca03dd3fbf17661e150ff737af242c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossmanaaac1b42007-02-28 15:33:10 +01002 * linux/drivers/mmc/core/core.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
Pierre Ossman5b4fd9a2005-09-06 15:18:56 -07005 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
Pierre Ossmanad3868b2008-06-28 12:52:45 +02006 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
Philip Langdalebce40a32006-10-21 12:35:02 +02007 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
Pierre Ossmanaf8350c2007-09-24 07:15:48 +020021#include <linux/leds.h>
Pierre Ossmanb57c43a2005-09-06 15:18:53 -070022#include <linux/scatterlist.h>
Anton Vorontsov86e82862008-11-26 22:54:17 +030023#include <linux/log2.h>
David Brownell5c139412009-03-11 03:30:43 -080024#include <linux/regulator/consumer.h>
Ohad Ben-Cohene5945732010-11-28 07:21:30 +020025#include <linux/pm_runtime.h>
Amerigo Wang35eb6db2011-07-25 17:13:11 -070026#include <linux/suspend.h>
Per Forlin1b676f72011-08-19 14:52:37 +020027#include <linux/fault-inject.h>
28#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <linux/mmc/card.h>
31#include <linux/mmc/host.h>
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010032#include <linux/mmc/mmc.h>
33#include <linux/mmc/sd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Pierre Ossmanaaac1b42007-02-28 15:33:10 +010035#include "core.h"
Pierre Ossmanffce2e72007-05-19 14:32:22 +020036#include "bus.h"
37#include "host.h"
Pierre Ossmane29a7d72007-05-26 13:48:18 +020038#include "sdio_bus.h"
Pierre Ossmanda7fbe52006-12-24 22:46:55 +010039
40#include "mmc_ops.h"
41#include "sd_ops.h"
Pierre Ossman5c4e6f12007-05-21 20:23:20 +020042#include "sdio_ops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Pierre Ossmanffce2e72007-05-19 14:32:22 +020044static struct workqueue_struct *workqueue;
45
46/*
David Brownellaf517152007-08-08 09:11:32 -070047 * Enabling software CRCs on the data blocks can be a significant (30%)
48 * performance cost, and for other reasons may not always be desired.
49 * So we allow it it to be disabled.
50 */
Rusty Russell90ab5ee2012-01-13 09:32:20 +103051bool use_spi_crc = 1;
David Brownellaf517152007-08-08 09:11:32 -070052module_param(use_spi_crc, bool, 0);
53
54/*
Ben Hutchingsbd68e082009-12-14 18:01:29 -080055 * We normally treat cards as removed during suspend if they are not
56 * known to be on a non-removable bus, to avoid the risk of writing
57 * back data to a different card after resume. Allow this to be
58 * overridden if necessary.
59 */
60#ifdef CONFIG_MMC_UNSAFE_RESUME
Rusty Russell90ab5ee2012-01-13 09:32:20 +103061bool mmc_assume_removable;
Ben Hutchingsbd68e082009-12-14 18:01:29 -080062#else
Rusty Russell90ab5ee2012-01-13 09:32:20 +103063bool mmc_assume_removable = 1;
Ben Hutchingsbd68e082009-12-14 18:01:29 -080064#endif
Matt Fleming71d7d3d2010-09-27 09:42:19 +010065EXPORT_SYMBOL(mmc_assume_removable);
Ben Hutchingsbd68e082009-12-14 18:01:29 -080066module_param_named(removable, mmc_assume_removable, bool, 0644);
67MODULE_PARM_DESC(
68 removable,
69 "MMC/SD cards are removable and may be removed during suspend");
70
71/*
Pierre Ossmanffce2e72007-05-19 14:32:22 +020072 * Internal function. Schedule delayed work in the MMC work queue.
73 */
74static int mmc_schedule_delayed_work(struct delayed_work *work,
75 unsigned long delay)
76{
77 return queue_delayed_work(workqueue, work, delay);
78}
79
80/*
81 * Internal function. Flush all scheduled work from the MMC work queue.
82 */
83static void mmc_flush_scheduled_work(void)
84{
85 flush_workqueue(workqueue);
86}
87
Per Forlin1b676f72011-08-19 14:52:37 +020088#ifdef CONFIG_FAIL_MMC_REQUEST
89
90/*
91 * Internal function. Inject random data errors.
92 * If mmc_data is NULL no errors are injected.
93 */
94static void mmc_should_fail_request(struct mmc_host *host,
95 struct mmc_request *mrq)
96{
97 struct mmc_command *cmd = mrq->cmd;
98 struct mmc_data *data = mrq->data;
99 static const int data_errors[] = {
100 -ETIMEDOUT,
101 -EILSEQ,
102 -EIO,
103 };
104
105 if (!data)
106 return;
107
108 if (cmd->error || data->error ||
109 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
110 return;
111
112 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
113 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
114}
115
116#else /* CONFIG_FAIL_MMC_REQUEST */
117
118static inline void mmc_should_fail_request(struct mmc_host *host,
119 struct mmc_request *mrq)
120{
121}
122
123#endif /* CONFIG_FAIL_MMC_REQUEST */
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125/**
Russell Kingfe10c6a2006-05-04 13:51:45 +0100126 * mmc_request_done - finish processing an MMC request
127 * @host: MMC host which completed request
128 * @mrq: MMC request which request
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 *
130 * MMC drivers should call this function when they have completed
Russell Kingfe10c6a2006-05-04 13:51:45 +0100131 * their processing of a request.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 */
133void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
134{
135 struct mmc_command *cmd = mrq->cmd;
Russell King920e70c2006-05-04 18:22:51 +0100136 int err = cmd->error;
137
David Brownellaf517152007-08-08 09:11:32 -0700138 if (err && cmd->retries && mmc_host_is_spi(host)) {
139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
140 cmd->retries = 0;
141 }
142
Adrian Hunterd3049502011-11-28 16:22:00 +0200143 if (err && cmd->retries && !mmc_card_removed(host->card)) {
Adrian Hunter08a7e1d2011-10-03 15:33:33 +0300144 /*
145 * Request starter must handle retries - see
146 * mmc_wait_for_req_done().
147 */
148 if (mrq->done)
149 mrq->done(mrq);
Pierre Ossmane4d21702007-07-24 21:46:49 +0200150 } else {
Per Forlin1b676f72011-08-19 14:52:37 +0200151 mmc_should_fail_request(host, mrq);
152
Pierre Ossmanaf8350c2007-09-24 07:15:48 +0200153 led_trigger_event(host->led, LED_OFF);
154
Pierre Ossmane4d21702007-07-24 21:46:49 +0200155 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
156 mmc_hostname(host), cmd->opcode, err,
157 cmd->resp[0], cmd->resp[1],
158 cmd->resp[2], cmd->resp[3]);
159
160 if (mrq->data) {
161 pr_debug("%s: %d bytes transferred: %d\n",
162 mmc_hostname(host),
163 mrq->data->bytes_xfered, mrq->data->error);
164 }
165
166 if (mrq->stop) {
167 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
168 mmc_hostname(host), mrq->stop->opcode,
169 mrq->stop->error,
170 mrq->stop->resp[0], mrq->stop->resp[1],
171 mrq->stop->resp[2], mrq->stop->resp[3]);
172 }
173
174 if (mrq->done)
175 mrq->done(mrq);
Linus Walleij04566832010-11-08 21:36:50 -0500176
Mika Westerberg08c14072011-08-18 15:23:47 +0300177 mmc_host_clk_release(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 }
179}
180
181EXPORT_SYMBOL(mmc_request_done);
182
Adrian Bunk39361852007-07-25 00:40:58 +0200183static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
185{
Pierre Ossman976d9272007-04-13 22:47:01 +0200186#ifdef CONFIG_MMC_DEBUG
187 unsigned int i, sz;
Pierre Ossmana84756c2008-07-29 01:09:37 +0200188 struct scatterlist *sg;
Pierre Ossman976d9272007-04-13 22:47:01 +0200189#endif
190
Jaehoon Chung7b2fd4f2012-02-07 14:13:10 +0900191 if (mrq->sbc) {
192 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
193 mmc_hostname(host), mrq->sbc->opcode,
194 mrq->sbc->arg, mrq->sbc->flags);
195 }
196
Russell King920e70c2006-05-04 18:22:51 +0100197 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
198 mmc_hostname(host), mrq->cmd->opcode,
199 mrq->cmd->arg, mrq->cmd->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Pierre Ossmane4d21702007-07-24 21:46:49 +0200201 if (mrq->data) {
202 pr_debug("%s: blksz %d blocks %d flags %08x "
203 "tsac %d ms nsac %d\n",
204 mmc_hostname(host), mrq->data->blksz,
205 mrq->data->blocks, mrq->data->flags,
Pierre Ossmance252ed2007-08-07 14:06:18 +0200206 mrq->data->timeout_ns / 1000000,
Pierre Ossmane4d21702007-07-24 21:46:49 +0200207 mrq->data->timeout_clks);
208 }
209
210 if (mrq->stop) {
211 pr_debug("%s: CMD%u arg %08x flags %08x\n",
212 mmc_hostname(host), mrq->stop->opcode,
213 mrq->stop->arg, mrq->stop->flags);
214 }
215
Pierre Ossmanf22ee4e2006-12-26 15:11:23 +0100216 WARN_ON(!host->claimed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 mrq->cmd->error = 0;
219 mrq->cmd->mrq = mrq;
220 if (mrq->data) {
Pierre Ossmanfe4a3c72006-11-21 17:54:23 +0100221 BUG_ON(mrq->data->blksz > host->max_blk_size);
Pierre Ossman55db8902006-11-21 17:55:45 +0100222 BUG_ON(mrq->data->blocks > host->max_blk_count);
223 BUG_ON(mrq->data->blocks * mrq->data->blksz >
224 host->max_req_size);
Pierre Ossmanfe4a3c72006-11-21 17:54:23 +0100225
Pierre Ossman976d9272007-04-13 22:47:01 +0200226#ifdef CONFIG_MMC_DEBUG
227 sz = 0;
Pierre Ossmana84756c2008-07-29 01:09:37 +0200228 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
229 sz += sg->length;
Pierre Ossman976d9272007-04-13 22:47:01 +0200230 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
231#endif
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 mrq->cmd->data = mrq->data;
234 mrq->data->error = 0;
235 mrq->data->mrq = mrq;
236 if (mrq->stop) {
237 mrq->data->stop = mrq->stop;
238 mrq->stop->error = 0;
239 mrq->stop->mrq = mrq;
240 }
241 }
Mika Westerberg08c14072011-08-18 15:23:47 +0300242 mmc_host_clk_hold(host);
Pierre Tardy66c036e2011-02-06 19:02:48 +0100243 led_trigger_event(host->led, LED_FULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 host->ops->request(host, mrq);
245}
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247static void mmc_wait_done(struct mmc_request *mrq)
248{
Per Forlinaa8b6832011-07-01 18:55:22 +0200249 complete(&mrq->completion);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Ulf Hansson956d9fd2012-03-05 15:52:43 +0100252static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
Per Forlinaa8b6832011-07-01 18:55:22 +0200253{
254 init_completion(&mrq->completion);
255 mrq->done = mmc_wait_done;
Adrian Hunterd3049502011-11-28 16:22:00 +0200256 if (mmc_card_removed(host->card)) {
257 mrq->cmd->error = -ENOMEDIUM;
258 complete(&mrq->completion);
Ulf Hansson956d9fd2012-03-05 15:52:43 +0100259 return -ENOMEDIUM;
Adrian Hunterd3049502011-11-28 16:22:00 +0200260 }
Per Forlinaa8b6832011-07-01 18:55:22 +0200261 mmc_start_request(host, mrq);
Ulf Hansson956d9fd2012-03-05 15:52:43 +0100262 return 0;
Per Forlinaa8b6832011-07-01 18:55:22 +0200263}
264
265static void mmc_wait_for_req_done(struct mmc_host *host,
266 struct mmc_request *mrq)
267{
Adrian Hunter08a7e1d2011-10-03 15:33:33 +0300268 struct mmc_command *cmd;
269
270 while (1) {
271 wait_for_completion(&mrq->completion);
272
273 cmd = mrq->cmd;
Adrian Hunterd3049502011-11-28 16:22:00 +0200274 if (!cmd->error || !cmd->retries ||
275 mmc_card_removed(host->card))
Adrian Hunter08a7e1d2011-10-03 15:33:33 +0300276 break;
277
278 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
279 mmc_hostname(host), cmd->opcode, cmd->error);
280 cmd->retries--;
281 cmd->error = 0;
282 host->ops->request(host, mrq);
283 }
Per Forlinaa8b6832011-07-01 18:55:22 +0200284}
285
286/**
287 * mmc_pre_req - Prepare for a new request
288 * @host: MMC host to prepare command
289 * @mrq: MMC request to prepare for
290 * @is_first_req: true if there is no previous started request
291 * that may run in parellel to this call, otherwise false
292 *
293 * mmc_pre_req() is called in prior to mmc_start_req() to let
294 * host prepare for the new request. Preparation of a request may be
295 * performed while another request is running on the host.
296 */
297static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
298 bool is_first_req)
299{
Sujit Reddy Thumma2c4967f742012-02-04 16:14:50 -0500300 if (host->ops->pre_req) {
301 mmc_host_clk_hold(host);
Per Forlinaa8b6832011-07-01 18:55:22 +0200302 host->ops->pre_req(host, mrq, is_first_req);
Sujit Reddy Thumma2c4967f742012-02-04 16:14:50 -0500303 mmc_host_clk_release(host);
304 }
Per Forlinaa8b6832011-07-01 18:55:22 +0200305}
306
307/**
308 * mmc_post_req - Post process a completed request
309 * @host: MMC host to post process command
310 * @mrq: MMC request to post process for
311 * @err: Error, if non zero, clean up any resources made in pre_req
312 *
313 * Let the host post process a completed request. Post processing of
314 * a request may be performed while another reuqest is running.
315 */
316static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
317 int err)
318{
Sujit Reddy Thumma2c4967f742012-02-04 16:14:50 -0500319 if (host->ops->post_req) {
320 mmc_host_clk_hold(host);
Per Forlinaa8b6832011-07-01 18:55:22 +0200321 host->ops->post_req(host, mrq, err);
Sujit Reddy Thumma2c4967f742012-02-04 16:14:50 -0500322 mmc_host_clk_release(host);
323 }
Per Forlinaa8b6832011-07-01 18:55:22 +0200324}
325
326/**
327 * mmc_start_req - start a non-blocking request
328 * @host: MMC host to start command
329 * @areq: async request to start
330 * @error: out parameter returns 0 for success, otherwise non zero
331 *
332 * Start a new MMC custom command request for a host.
333 * If there is on ongoing async request wait for completion
334 * of that request and start the new one and return.
335 * Does not wait for the new request to complete.
336 *
337 * Returns the completed request, NULL in case of none completed.
338 * Wait for the an ongoing request (previoulsy started) to complete and
339 * return the completed request. If there is no ongoing request, NULL
340 * is returned without waiting. NULL is not an error condition.
341 */
342struct mmc_async_req *mmc_start_req(struct mmc_host *host,
343 struct mmc_async_req *areq, int *error)
344{
345 int err = 0;
Ulf Hansson956d9fd2012-03-05 15:52:43 +0100346 int start_err = 0;
Per Forlinaa8b6832011-07-01 18:55:22 +0200347 struct mmc_async_req *data = host->areq;
348
349 /* Prepare a new request */
350 if (areq)
351 mmc_pre_req(host, areq->mrq, !host->areq);
352
353 if (host->areq) {
354 mmc_wait_for_req_done(host, host->areq->mrq);
355 err = host->areq->err_check(host->card, host->areq);
Per Forlinaa8b6832011-07-01 18:55:22 +0200356 }
357
Ulf Hansson956d9fd2012-03-05 15:52:43 +0100358 if (!err && areq)
359 start_err = __mmc_start_req(host, areq->mrq);
Per Forlinaa8b6832011-07-01 18:55:22 +0200360
361 if (host->areq)
362 mmc_post_req(host, host->areq->mrq, 0);
363
Ulf Hansson956d9fd2012-03-05 15:52:43 +0100364 /* Cancel a prepared request if it was not started. */
365 if ((err || start_err) && areq)
366 mmc_post_req(host, areq->mrq, -EINVAL);
367
368 if (err)
369 host->areq = NULL;
370 else
371 host->areq = areq;
372
Per Forlinaa8b6832011-07-01 18:55:22 +0200373 if (error)
374 *error = err;
375 return data;
376}
377EXPORT_SYMBOL(mmc_start_req);
378
Pierre Ossman67a61c42007-07-11 20:22:11 +0200379/**
380 * mmc_wait_for_req - start a request and wait for completion
381 * @host: MMC host to start command
382 * @mrq: MMC request to start
383 *
384 * Start a new MMC custom command request for a host, and wait
385 * for the command to complete. Does not attempt to parse the
386 * response.
387 */
388void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
Per Forlinaa8b6832011-07-01 18:55:22 +0200390 __mmc_start_req(host, mrq);
391 mmc_wait_for_req_done(host, mrq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393EXPORT_SYMBOL(mmc_wait_for_req);
394
395/**
Jaehoon Chungeb0d8f12011-10-18 01:26:42 -0400396 * mmc_interrupt_hpi - Issue for High priority Interrupt
397 * @card: the MMC card associated with the HPI transfer
398 *
399 * Issued High Priority Interrupt, and check for card status
400 * util out-of prg-state.
401 */
402int mmc_interrupt_hpi(struct mmc_card *card)
403{
404 int err;
405 u32 status;
406
407 BUG_ON(!card);
408
409 if (!card->ext_csd.hpi_en) {
410 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
411 return 1;
412 }
413
414 mmc_claim_host(card->host);
415 err = mmc_send_status(card, &status);
416 if (err) {
417 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
418 goto out;
419 }
420
421 /*
422 * If the card status is in PRG-state, we can send the HPI command.
423 */
424 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
425 do {
426 /*
427 * We don't know when the HPI command will finish
428 * processing, so we need to resend HPI until out
429 * of prg-state, and keep checking the card status
430 * with SEND_STATUS. If a timeout error occurs when
431 * sending the HPI command, we are already out of
432 * prg-state.
433 */
434 err = mmc_send_hpi_cmd(card, &status);
435 if (err)
436 pr_debug("%s: abort HPI (%d error)\n",
437 mmc_hostname(card->host), err);
438
439 err = mmc_send_status(card, &status);
440 if (err)
441 break;
442 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
443 } else
444 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
445
446out:
447 mmc_release_host(card->host);
448 return err;
449}
450EXPORT_SYMBOL(mmc_interrupt_hpi);
451
452/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 * mmc_wait_for_cmd - start a command and wait for completion
454 * @host: MMC host to start command
455 * @cmd: MMC command to start
456 * @retries: maximum number of retries
457 *
458 * Start a new MMC command for a host, and wait for the command
459 * to complete. Return any error that occurred while the command
460 * was executing. Do not attempt to parse the response.
461 */
462int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
463{
Venkatraman Sad5fd972011-08-25 00:30:50 +0530464 struct mmc_request mrq = {NULL};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Pierre Ossmand84075c82007-08-09 13:23:56 +0200466 WARN_ON(!host->claimed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 memset(cmd->resp, 0, sizeof(cmd->resp));
469 cmd->retries = retries;
470
471 mrq.cmd = cmd;
472 cmd->data = NULL;
473
474 mmc_wait_for_req(host, &mrq);
475
476 return cmd->error;
477}
478
479EXPORT_SYMBOL(mmc_wait_for_cmd);
480
Pierre Ossman335eadf2005-09-06 15:18:50 -0700481/**
Russell Kingd773d722006-09-07 15:57:12 +0100482 * mmc_set_data_timeout - set the timeout for a data command
483 * @data: data phase for command
484 * @card: the MMC card associated with the data transfer
Pierre Ossman67a61c42007-07-11 20:22:11 +0200485 *
486 * Computes the data timeout parameters according to the
487 * correct algorithm given the card type.
Russell Kingd773d722006-09-07 15:57:12 +0100488 */
Pierre Ossmanb146d262007-07-24 19:16:54 +0200489void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
Russell Kingd773d722006-09-07 15:57:12 +0100490{
491 unsigned int mult;
492
493 /*
Pierre Ossmane6f918b2007-08-07 14:11:55 +0200494 * SDIO cards only define an upper 1 s limit on access.
495 */
496 if (mmc_card_sdio(card)) {
497 data->timeout_ns = 1000000000;
498 data->timeout_clks = 0;
499 return;
500 }
501
502 /*
Russell Kingd773d722006-09-07 15:57:12 +0100503 * SD cards use a 100 multiplier rather than 10
504 */
505 mult = mmc_card_sd(card) ? 100 : 10;
506
507 /*
508 * Scale up the multiplier (and therefore the timeout) by
509 * the r2w factor for writes.
510 */
Pierre Ossmanb146d262007-07-24 19:16:54 +0200511 if (data->flags & MMC_DATA_WRITE)
Russell Kingd773d722006-09-07 15:57:12 +0100512 mult <<= card->csd.r2w_factor;
513
514 data->timeout_ns = card->csd.tacc_ns * mult;
515 data->timeout_clks = card->csd.tacc_clks * mult;
516
517 /*
518 * SD cards also have an upper limit on the timeout.
519 */
520 if (mmc_card_sd(card)) {
521 unsigned int timeout_us, limit_us;
522
523 timeout_us = data->timeout_ns / 1000;
Linus Walleije9b86842011-01-05 00:44:32 +0100524 if (mmc_host_clk_rate(card->host))
525 timeout_us += data->timeout_clks * 1000 /
526 (mmc_host_clk_rate(card->host) / 1000);
Russell Kingd773d722006-09-07 15:57:12 +0100527
Pierre Ossmanb146d262007-07-24 19:16:54 +0200528 if (data->flags & MMC_DATA_WRITE)
Pierre Ossman493890e2008-10-26 12:37:25 +0100529 /*
Paul Walmsley3bdc9ba2012-03-12 04:58:00 -0600530 * The MMC spec "It is strongly recommended
531 * for hosts to implement more than 500ms
532 * timeout value even if the card indicates
533 * the 250ms maximum busy length." Even the
534 * previous value of 300ms is known to be
535 * insufficient for some cards.
Pierre Ossman493890e2008-10-26 12:37:25 +0100536 */
Paul Walmsley3bdc9ba2012-03-12 04:58:00 -0600537 limit_us = 3000000;
Russell Kingd773d722006-09-07 15:57:12 +0100538 else
539 limit_us = 100000;
540
Philip Langdalefba68bd2007-01-04 06:57:32 -0800541 /*
542 * SDHC cards always use these fixed values.
543 */
544 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
Russell Kingd773d722006-09-07 15:57:12 +0100545 data->timeout_ns = limit_us * 1000;
546 data->timeout_clks = 0;
547 }
548 }
Stefan Nilsson XK6de5fc92011-11-03 09:44:12 +0100549
550 /*
551 * Some cards require longer data read timeout than indicated in CSD.
552 * Address this by setting the read timeout to a "reasonably high"
553 * value. For the cards tested, 300ms has proven enough. If necessary,
554 * this value can be increased if other problematic cards require this.
555 */
556 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
557 data->timeout_ns = 300000000;
558 data->timeout_clks = 0;
559 }
560
Wolfgang Mueesc0c88872009-03-11 14:28:39 +0100561 /*
562 * Some cards need very high timeouts if driven in SPI mode.
563 * The worst observed timeout was 900ms after writing a
564 * continuous stream of data until the internal logic
565 * overflowed.
566 */
567 if (mmc_host_is_spi(card->host)) {
568 if (data->flags & MMC_DATA_WRITE) {
569 if (data->timeout_ns < 1000000000)
570 data->timeout_ns = 1000000000; /* 1s */
571 } else {
572 if (data->timeout_ns < 100000000)
573 data->timeout_ns = 100000000; /* 100ms */
574 }
575 }
Russell Kingd773d722006-09-07 15:57:12 +0100576}
577EXPORT_SYMBOL(mmc_set_data_timeout);
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579/**
Pierre Ossmanad3868b2008-06-28 12:52:45 +0200580 * mmc_align_data_size - pads a transfer size to a more optimal value
581 * @card: the MMC card associated with the data transfer
582 * @sz: original transfer size
583 *
584 * Pads the original data size with a number of extra bytes in
585 * order to avoid controller bugs and/or performance hits
586 * (e.g. some controllers revert to PIO for certain sizes).
587 *
588 * Returns the improved size, which might be unmodified.
589 *
590 * Note that this function is only relevant when issuing a
591 * single scatter gather entry.
592 */
593unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
594{
595 /*
596 * FIXME: We don't have a system for the controller to tell
597 * the core about its problems yet, so for now we just 32-bit
598 * align the size.
599 */
600 sz = ((sz + 3) / 4) * 4;
601
602 return sz;
603}
604EXPORT_SYMBOL(mmc_align_data_size);
605
606/**
Nicolas Pitre2342f332007-06-30 16:21:52 +0200607 * __mmc_claim_host - exclusively claim a host
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * @host: mmc host to claim
Nicolas Pitre2342f332007-06-30 16:21:52 +0200609 * @abort: whether or not the operation should be aborted
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 *
Nicolas Pitre2342f332007-06-30 16:21:52 +0200611 * Claim a host for a set of operations. If @abort is non null and
612 * dereference a non-zero value then this will return prematurely with
613 * that non-zero value without acquiring the lock. Returns zero
614 * with the lock held otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 */
Nicolas Pitre2342f332007-06-30 16:21:52 +0200616int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617{
618 DECLARE_WAITQUEUE(wait, current);
619 unsigned long flags;
Nicolas Pitre2342f332007-06-30 16:21:52 +0200620 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Pierre Ossmancf795bf2007-07-11 20:28:02 +0200622 might_sleep();
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 add_wait_queue(&host->wq, &wait);
625 spin_lock_irqsave(&host->lock, flags);
626 while (1) {
627 set_current_state(TASK_UNINTERRUPTIBLE);
Nicolas Pitre2342f332007-06-30 16:21:52 +0200628 stop = abort ? atomic_read(abort) : 0;
Adrian Hunter319a3f12009-09-22 16:44:30 -0700629 if (stop || !host->claimed || host->claimer == current)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 break;
631 spin_unlock_irqrestore(&host->lock, flags);
632 schedule();
633 spin_lock_irqsave(&host->lock, flags);
634 }
635 set_current_state(TASK_RUNNING);
Adrian Hunter319a3f12009-09-22 16:44:30 -0700636 if (!stop) {
Nicolas Pitre2342f332007-06-30 16:21:52 +0200637 host->claimed = 1;
Adrian Hunter319a3f12009-09-22 16:44:30 -0700638 host->claimer = current;
639 host->claim_cnt += 1;
640 } else
Nicolas Pitre2342f332007-06-30 16:21:52 +0200641 wake_up(&host->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 spin_unlock_irqrestore(&host->lock, flags);
643 remove_wait_queue(&host->wq, &wait);
Adrian Hunter907d2e72012-02-29 09:17:21 +0200644 if (host->ops->enable && !stop && host->claim_cnt == 1)
645 host->ops->enable(host);
Nicolas Pitre2342f332007-06-30 16:21:52 +0200646 return stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
Nicolas Pitre2342f332007-06-30 16:21:52 +0200649EXPORT_SYMBOL(__mmc_claim_host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Adrian Hunter319a3f12009-09-22 16:44:30 -0700651/**
652 * mmc_try_claim_host - try exclusively to claim a host
653 * @host: mmc host to claim
654 *
655 * Returns %1 if the host is claimed, %0 otherwise.
656 */
657int mmc_try_claim_host(struct mmc_host *host)
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700658{
659 int claimed_host = 0;
660 unsigned long flags;
661
662 spin_lock_irqsave(&host->lock, flags);
Adrian Hunter319a3f12009-09-22 16:44:30 -0700663 if (!host->claimed || host->claimer == current) {
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700664 host->claimed = 1;
Adrian Hunter319a3f12009-09-22 16:44:30 -0700665 host->claimer = current;
666 host->claim_cnt += 1;
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700667 claimed_host = 1;
668 }
669 spin_unlock_irqrestore(&host->lock, flags);
Adrian Hunter907d2e72012-02-29 09:17:21 +0200670 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
671 host->ops->enable(host);
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700672 return claimed_host;
673}
Adrian Hunter319a3f12009-09-22 16:44:30 -0700674EXPORT_SYMBOL(mmc_try_claim_host);
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700675
Ulf Hanssonab1efd22011-03-09 09:11:02 +0100676/**
Adrian Hunter907d2e72012-02-29 09:17:21 +0200677 * mmc_release_host - release a host
Ulf Hanssonab1efd22011-03-09 09:11:02 +0100678 * @host: mmc host to release
679 *
Adrian Hunter907d2e72012-02-29 09:17:21 +0200680 * Release a MMC host, allowing others to claim the host
681 * for their operations.
Ulf Hanssonab1efd22011-03-09 09:11:02 +0100682 */
Adrian Hunter907d2e72012-02-29 09:17:21 +0200683void mmc_release_host(struct mmc_host *host)
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700684{
685 unsigned long flags;
686
Adrian Hunter907d2e72012-02-29 09:17:21 +0200687 WARN_ON(!host->claimed);
688
689 if (host->ops->disable && host->claim_cnt == 1)
690 host->ops->disable(host);
691
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700692 spin_lock_irqsave(&host->lock, flags);
Adrian Hunter319a3f12009-09-22 16:44:30 -0700693 if (--host->claim_cnt) {
694 /* Release for nested claim */
695 spin_unlock_irqrestore(&host->lock, flags);
696 } else {
697 host->claimed = 0;
698 host->claimer = NULL;
699 spin_unlock_irqrestore(&host->lock, flags);
700 wake_up(&host->wq);
701 }
Adrian Hunter8ea926b2009-09-22 16:44:29 -0700702}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703EXPORT_SYMBOL(mmc_release_host);
704
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100705/*
706 * Internal function that does the actual ios call to the host driver,
707 * optionally printing some debug output.
708 */
Russell King920e70c2006-05-04 18:22:51 +0100709static inline void mmc_set_ios(struct mmc_host *host)
710{
711 struct mmc_ios *ios = &host->ios;
712
Pierre Ossmancd9277c2007-02-18 12:07:47 +0100713 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
714 "width %u timing %u\n",
Russell King920e70c2006-05-04 18:22:51 +0100715 mmc_hostname(host), ios->clock, ios->bus_mode,
716 ios->power_mode, ios->chip_select, ios->vdd,
Pierre Ossmancd9277c2007-02-18 12:07:47 +0100717 ios->bus_width, ios->timing);
Philip Langdalefba68bd2007-01-04 06:57:32 -0800718
Linus Walleij04566832010-11-08 21:36:50 -0500719 if (ios->clock > 0)
720 mmc_set_ungated(host);
Russell King920e70c2006-05-04 18:22:51 +0100721 host->ops->set_ios(host, ios);
722}
723
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100724/*
725 * Control chip select pin on a host.
726 */
Pierre Ossmanda7fbe52006-12-24 22:46:55 +0100727void mmc_set_chip_select(struct mmc_host *host, int mode)
Pierre Ossmanb57c43a2005-09-06 15:18:53 -0700728{
Mika Westerberg778e2772011-08-18 15:23:48 +0300729 mmc_host_clk_hold(host);
Pierre Ossmanda7fbe52006-12-24 22:46:55 +0100730 host->ios.chip_select = mode;
731 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +0300732 mmc_host_clk_release(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
734
735/*
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100736 * Sets the host clock to the highest possible frequency that
737 * is below "hz".
738 */
Mika Westerberg778e2772011-08-18 15:23:48 +0300739static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100740{
741 WARN_ON(hz < host->f_min);
742
743 if (hz > host->f_max)
744 hz = host->f_max;
745
746 host->ios.clock = hz;
747 mmc_set_ios(host);
748}
749
Mika Westerberg778e2772011-08-18 15:23:48 +0300750void mmc_set_clock(struct mmc_host *host, unsigned int hz)
751{
752 mmc_host_clk_hold(host);
753 __mmc_set_clock(host, hz);
754 mmc_host_clk_release(host);
755}
756
Linus Walleij04566832010-11-08 21:36:50 -0500757#ifdef CONFIG_MMC_CLKGATE
758/*
759 * This gates the clock by setting it to 0 Hz.
760 */
761void mmc_gate_clock(struct mmc_host *host)
762{
763 unsigned long flags;
764
765 spin_lock_irqsave(&host->clk_lock, flags);
766 host->clk_old = host->ios.clock;
767 host->ios.clock = 0;
768 host->clk_gated = true;
769 spin_unlock_irqrestore(&host->clk_lock, flags);
770 mmc_set_ios(host);
771}
772
773/*
774 * This restores the clock from gating by using the cached
775 * clock value.
776 */
777void mmc_ungate_clock(struct mmc_host *host)
778{
779 /*
780 * We should previously have gated the clock, so the clock shall
781 * be 0 here! The clock may however be 0 during initialization,
782 * when some request operations are performed before setting
783 * the frequency. When ungate is requested in that situation
784 * we just ignore the call.
785 */
786 if (host->clk_old) {
787 BUG_ON(host->ios.clock);
788 /* This call will also set host->clk_gated to false */
Mika Westerberg778e2772011-08-18 15:23:48 +0300789 __mmc_set_clock(host, host->clk_old);
Linus Walleij04566832010-11-08 21:36:50 -0500790 }
791}
792
793void mmc_set_ungated(struct mmc_host *host)
794{
795 unsigned long flags;
796
797 /*
798 * We've been given a new frequency while the clock is gated,
799 * so make sure we regard this as ungating it.
800 */
801 spin_lock_irqsave(&host->clk_lock, flags);
802 host->clk_gated = false;
803 spin_unlock_irqrestore(&host->clk_lock, flags);
804}
805
806#else
807void mmc_set_ungated(struct mmc_host *host)
808{
809}
810#endif
811
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100812/*
813 * Change the bus mode (open drain/push-pull) of a host.
814 */
815void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
816{
Mika Westerberg778e2772011-08-18 15:23:48 +0300817 mmc_host_clk_hold(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100818 host->ios.bus_mode = mode;
819 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +0300820 mmc_host_clk_release(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100821}
822
823/*
824 * Change data bus width of a host.
825 */
826void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
827{
Mika Westerberg778e2772011-08-18 15:23:48 +0300828 mmc_host_clk_hold(host);
Philip Rakity4c4cb172011-05-13 11:17:18 +0530829 host->ios.bus_width = width;
830 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +0300831 mmc_host_clk_release(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100832}
833
Anton Vorontsov86e82862008-11-26 22:54:17 +0300834/**
835 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
836 * @vdd: voltage (mV)
837 * @low_bits: prefer low bits in boundary cases
838 *
839 * This function returns the OCR bit number according to the provided @vdd
840 * value. If conversion is not possible a negative errno value returned.
841 *
842 * Depending on the @low_bits flag the function prefers low or high OCR bits
843 * on boundary voltages. For example,
844 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
845 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
846 *
847 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
848 */
849static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
850{
851 const int max_bit = ilog2(MMC_VDD_35_36);
852 int bit;
853
854 if (vdd < 1650 || vdd > 3600)
855 return -EINVAL;
856
857 if (vdd >= 1650 && vdd <= 1950)
858 return ilog2(MMC_VDD_165_195);
859
860 if (low_bits)
861 vdd -= 1;
862
863 /* Base 2000 mV, step 100 mV, bit's base 8. */
864 bit = (vdd - 2000) / 100 + 8;
865 if (bit > max_bit)
866 return max_bit;
867 return bit;
868}
869
870/**
871 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
872 * @vdd_min: minimum voltage value (mV)
873 * @vdd_max: maximum voltage value (mV)
874 *
875 * This function returns the OCR mask bits according to the provided @vdd_min
876 * and @vdd_max values. If conversion is not possible the function returns 0.
877 *
878 * Notes wrt boundary cases:
879 * This function sets the OCR bits for all boundary voltages, for example
880 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
881 * MMC_VDD_34_35 mask.
882 */
883u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
884{
885 u32 mask = 0;
886
887 if (vdd_max < vdd_min)
888 return 0;
889
890 /* Prefer high bits for the boundary vdd_max values. */
891 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
892 if (vdd_max < 0)
893 return 0;
894
895 /* Prefer low bits for the boundary vdd_min values. */
896 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
897 if (vdd_min < 0)
898 return 0;
899
900 /* Fill the mask, from max bit to min bit. */
901 while (vdd_max >= vdd_min)
902 mask |= 1 << vdd_max--;
903
904 return mask;
905}
906EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
907
David Brownell5c139412009-03-11 03:30:43 -0800908#ifdef CONFIG_REGULATOR
909
910/**
911 * mmc_regulator_get_ocrmask - return mask of supported voltages
912 * @supply: regulator to use
913 *
914 * This returns either a negative errno, or a mask of voltages that
915 * can be provided to MMC/SD/SDIO devices using the specified voltage
916 * regulator. This would normally be called before registering the
917 * MMC host adapter.
918 */
919int mmc_regulator_get_ocrmask(struct regulator *supply)
920{
921 int result = 0;
922 int count;
923 int i;
924
925 count = regulator_count_voltages(supply);
926 if (count < 0)
927 return count;
928
929 for (i = 0; i < count; i++) {
930 int vdd_uV;
931 int vdd_mV;
932
933 vdd_uV = regulator_list_voltage(supply, i);
934 if (vdd_uV <= 0)
935 continue;
936
937 vdd_mV = vdd_uV / 1000;
938 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
939 }
940
941 return result;
942}
943EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
944
945/**
946 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
Linus Walleij99fc5132010-09-29 01:08:27 -0400947 * @mmc: the host to regulate
David Brownell5c139412009-03-11 03:30:43 -0800948 * @supply: regulator to use
Linus Walleij99fc5132010-09-29 01:08:27 -0400949 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
David Brownell5c139412009-03-11 03:30:43 -0800950 *
951 * Returns zero on success, else negative errno.
952 *
953 * MMC host drivers may use this to enable or disable a regulator using
954 * a particular supply voltage. This would normally be called from the
955 * set_ios() method.
956 */
Linus Walleij99fc5132010-09-29 01:08:27 -0400957int mmc_regulator_set_ocr(struct mmc_host *mmc,
958 struct regulator *supply,
959 unsigned short vdd_bit)
David Brownell5c139412009-03-11 03:30:43 -0800960{
961 int result = 0;
962 int min_uV, max_uV;
David Brownell5c139412009-03-11 03:30:43 -0800963
964 if (vdd_bit) {
965 int tmp;
966 int voltage;
967
968 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
969 * bits this regulator doesn't quite support ... don't
970 * be too picky, most cards and regulators are OK with
971 * a 0.1V range goof (it's a small error percentage).
972 */
973 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
974 if (tmp == 0) {
975 min_uV = 1650 * 1000;
976 max_uV = 1950 * 1000;
977 } else {
978 min_uV = 1900 * 1000 + tmp * 100 * 1000;
979 max_uV = min_uV + 100 * 1000;
980 }
981
982 /* avoid needless changes to this voltage; the regulator
983 * might not allow this operation
984 */
985 voltage = regulator_get_voltage(supply);
Jaehoon Chung6e8201f2012-01-16 17:49:01 +0900986
987 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
988 min_uV = max_uV = voltage;
989
David Brownell5c139412009-03-11 03:30:43 -0800990 if (voltage < 0)
991 result = voltage;
992 else if (voltage < min_uV || voltage > max_uV)
993 result = regulator_set_voltage(supply, min_uV, max_uV);
994 else
995 result = 0;
996
Linus Walleij99fc5132010-09-29 01:08:27 -0400997 if (result == 0 && !mmc->regulator_enabled) {
David Brownell5c139412009-03-11 03:30:43 -0800998 result = regulator_enable(supply);
Linus Walleij99fc5132010-09-29 01:08:27 -0400999 if (!result)
1000 mmc->regulator_enabled = true;
1001 }
1002 } else if (mmc->regulator_enabled) {
David Brownell5c139412009-03-11 03:30:43 -08001003 result = regulator_disable(supply);
Linus Walleij99fc5132010-09-29 01:08:27 -04001004 if (result == 0)
1005 mmc->regulator_enabled = false;
David Brownell5c139412009-03-11 03:30:43 -08001006 }
1007
Linus Walleij99fc5132010-09-29 01:08:27 -04001008 if (result)
1009 dev_err(mmc_dev(mmc),
1010 "could not set regulator OCR (%d)\n", result);
David Brownell5c139412009-03-11 03:30:43 -08001011 return result;
1012}
1013EXPORT_SYMBOL(mmc_regulator_set_ocr);
1014
Linus Walleij99fc5132010-09-29 01:08:27 -04001015#endif /* CONFIG_REGULATOR */
David Brownell5c139412009-03-11 03:30:43 -08001016
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001017/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 * Mask off any voltages we don't support and select
1019 * the lowest voltage
1020 */
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001021u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022{
1023 int bit;
1024
1025 ocr &= host->ocr_avail;
1026
1027 bit = ffs(ocr);
1028 if (bit) {
1029 bit -= 1;
1030
Timo Teras63ef7312006-11-02 19:43:27 +01001031 ocr &= 3 << bit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Mika Westerberg778e2772011-08-18 15:23:48 +03001033 mmc_host_clk_hold(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 host->ios.vdd = bit;
Russell King920e70c2006-05-04 18:22:51 +01001035 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +03001036 mmc_host_clk_release(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 } else {
David Brownellf6e10b82008-12-31 09:50:30 -08001038 pr_warning("%s: host doesn't support card's voltages\n",
1039 mmc_hostname(host));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 ocr = 0;
1041 }
1042
1043 return ocr;
1044}
1045
Philip Rakity261bbd42011-05-13 11:17:17 +05301046int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
Arindam Nathf2119df2011-05-05 12:18:57 +05301047{
1048 struct mmc_command cmd = {0};
1049 int err = 0;
1050
1051 BUG_ON(!host);
1052
1053 /*
1054 * Send CMD11 only if the request is to switch the card to
1055 * 1.8V signalling.
1056 */
Philip Rakity261bbd42011-05-13 11:17:17 +05301057 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
Arindam Nathf2119df2011-05-05 12:18:57 +05301058 cmd.opcode = SD_SWITCH_VOLTAGE;
1059 cmd.arg = 0;
1060 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1061
1062 err = mmc_wait_for_cmd(host, &cmd, 0);
1063 if (err)
1064 return err;
1065
1066 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1067 return -EIO;
1068 }
1069
1070 host->ios.signal_voltage = signal_voltage;
1071
Sujit Reddy Thumma2c4967f742012-02-04 16:14:50 -05001072 if (host->ops->start_signal_voltage_switch) {
1073 mmc_host_clk_hold(host);
Arindam Nathf2119df2011-05-05 12:18:57 +05301074 err = host->ops->start_signal_voltage_switch(host, &host->ios);
Sujit Reddy Thumma2c4967f742012-02-04 16:14:50 -05001075 mmc_host_clk_release(host);
1076 }
Arindam Nathf2119df2011-05-05 12:18:57 +05301077
1078 return err;
1079}
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081/*
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001082 * Select timing parameters for host.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 */
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001084void mmc_set_timing(struct mmc_host *host, unsigned int timing)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
Mika Westerberg778e2772011-08-18 15:23:48 +03001086 mmc_host_clk_hold(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001087 host->ios.timing = timing;
1088 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +03001089 mmc_host_clk_release(host);
Pierre Ossmanb57c43a2005-09-06 15:18:53 -07001090}
1091
1092/*
Arindam Nathd6d50a12011-05-05 12:18:59 +05301093 * Select appropriate driver type for host.
1094 */
1095void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1096{
Mika Westerberg778e2772011-08-18 15:23:48 +03001097 mmc_host_clk_hold(host);
Arindam Nathd6d50a12011-05-05 12:18:59 +05301098 host->ios.drv_type = drv_type;
1099 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +03001100 mmc_host_clk_release(host);
Arindam Nathd6d50a12011-05-05 12:18:59 +05301101}
1102
Girish K Sa80f1622011-11-15 11:55:46 +05301103static void mmc_poweroff_notify(struct mmc_host *host)
1104{
1105 struct mmc_card *card;
1106 unsigned int timeout;
1107 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1108 int err = 0;
1109
1110 card = host->card;
Girish K S3e73c362012-01-31 15:44:03 +05301111 mmc_claim_host(host);
Girish K Sa80f1622011-11-15 11:55:46 +05301112
1113 /*
1114 * Send power notify command only if card
1115 * is mmc and notify state is powered ON
1116 */
1117 if (card && mmc_card_mmc(card) &&
1118 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1119
1120 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1121 notify_type = EXT_CSD_POWER_OFF_SHORT;
1122 timeout = card->ext_csd.generic_cmd6_time;
1123 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1124 } else {
1125 notify_type = EXT_CSD_POWER_OFF_LONG;
1126 timeout = card->ext_csd.power_off_longtime;
1127 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1128 }
1129
1130 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1131 EXT_CSD_POWER_OFF_NOTIFICATION,
1132 notify_type, timeout);
1133
1134 if (err && err != -EBADMSG)
1135 pr_err("Device failed to respond within %d poweroff "
1136 "time. Forcefully powering down the device\n",
1137 timeout);
1138
1139 /* Set the card state to no notification after the poweroff */
1140 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1141 }
Girish K S3e73c362012-01-31 15:44:03 +05301142 mmc_release_host(host);
Girish K Sa80f1622011-11-15 11:55:46 +05301143}
1144
Arindam Nathd6d50a12011-05-05 12:18:59 +05301145/*
Russell King45f82452005-12-14 14:57:35 +00001146 * Apply power to the MMC stack. This is a two-stage process.
1147 * First, we enable power to the card without the clock running.
1148 * We then wait a bit for the power to stabilise. Finally,
1149 * enable the bus drivers and clock to the card.
1150 *
1151 * We must _NOT_ enable the clock prior to power stablising.
1152 *
1153 * If a host does all the power sequencing itself, ignore the
1154 * initial MMC_POWER_UP stage.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 */
1156static void mmc_power_up(struct mmc_host *host)
1157{
Balaji Rao500f3562009-09-22 16:44:18 -07001158 int bit;
1159
Mika Westerberg778e2772011-08-18 15:23:48 +03001160 mmc_host_clk_hold(host);
1161
Balaji Rao500f3562009-09-22 16:44:18 -07001162 /* If ocr is set, we use it */
1163 if (host->ocr)
1164 bit = ffs(host->ocr) - 1;
1165 else
1166 bit = fls(host->ocr_avail) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 host->ios.vdd = bit;
Stefan Nilsson XK44669032011-09-15 17:50:38 +02001169 if (mmc_host_is_spi(host))
David Brownellaf517152007-08-08 09:11:32 -07001170 host->ios.chip_select = MMC_CS_HIGH;
Stefan Nilsson XK44669032011-09-15 17:50:38 +02001171 else
David Brownellaf517152007-08-08 09:11:32 -07001172 host->ios.chip_select = MMC_CS_DONTCARE;
Stefan Nilsson XK44669032011-09-15 17:50:38 +02001173 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 host->ios.power_mode = MMC_POWER_UP;
Pierre Ossmanf2182782005-09-06 15:18:55 -07001175 host->ios.bus_width = MMC_BUS_WIDTH_1;
Pierre Ossmancd9277c2007-02-18 12:07:47 +01001176 host->ios.timing = MMC_TIMING_LEGACY;
Russell King920e70c2006-05-04 18:22:51 +01001177 mmc_set_ios(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Pierre Ossmanf9996ae2007-09-19 18:38:50 +02001179 /*
1180 * This delay should be sufficient to allow the power supply
1181 * to reach the minimum voltage.
1182 */
José M. Fernández79bccc52009-03-10 02:21:21 +01001183 mmc_delay(10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Hein Tibosch88ae8b82010-09-06 09:37:19 +08001185 host->ios.clock = host->f_init;
Sascha Hauer8dfd0372009-04-09 08:32:02 +02001186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 host->ios.power_mode = MMC_POWER_ON;
Russell King920e70c2006-05-04 18:22:51 +01001188 mmc_set_ios(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
Pierre Ossmanf9996ae2007-09-19 18:38:50 +02001190 /*
1191 * This delay must be at least 74 clock sizes, or 1 ms, or the
1192 * time required to reach a stable voltage.
1193 */
José M. Fernández79bccc52009-03-10 02:21:21 +01001194 mmc_delay(10);
Mika Westerberg778e2772011-08-18 15:23:48 +03001195
1196 mmc_host_clk_release(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197}
1198
Ulf Hansson7f7e4122011-09-21 14:08:13 -04001199void mmc_power_off(struct mmc_host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
Girish K S3e73c362012-01-31 15:44:03 +05301201 int err = 0;
Mika Westerberg778e2772011-08-18 15:23:48 +03001202 mmc_host_clk_hold(host);
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 host->ios.clock = 0;
1205 host->ios.vdd = 0;
Ulf Hanssonb33d46c2011-03-05 14:36:24 +01001206
Girish K S3e73c362012-01-31 15:44:03 +05301207 /*
1208 * For eMMC 4.5 device send AWAKE command before
1209 * POWER_OFF_NOTIFY command, because in sleep state
1210 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1211 */
1212 if (host->card && mmc_card_is_sleep(host->card) &&
1213 host->bus_ops->resume) {
1214 err = host->bus_ops->resume(host);
1215
1216 if (!err)
1217 mmc_poweroff_notify(host);
1218 else
1219 pr_warning("%s: error %d during resume "
1220 "(continue with poweroff sequence)\n",
1221 mmc_hostname(host), err);
1222 }
Girish K Sbec87262011-10-13 12:04:16 +05301223
Ulf Hanssonb33d46c2011-03-05 14:36:24 +01001224 /*
1225 * Reset ocr mask to be the highest possible voltage supported for
1226 * this mmc host. This value will be used at next power up.
1227 */
1228 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1229
David Brownellaf517152007-08-08 09:11:32 -07001230 if (!mmc_host_is_spi(host)) {
1231 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1232 host->ios.chip_select = MMC_CS_DONTCARE;
1233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 host->ios.power_mode = MMC_POWER_OFF;
Pierre Ossmanf2182782005-09-06 15:18:55 -07001235 host->ios.bus_width = MMC_BUS_WIDTH_1;
Pierre Ossmancd9277c2007-02-18 12:07:47 +01001236 host->ios.timing = MMC_TIMING_LEGACY;
Russell King920e70c2006-05-04 18:22:51 +01001237 mmc_set_ios(host);
Mika Westerberg778e2772011-08-18 15:23:48 +03001238
Daniel Drake041beb12011-09-07 10:22:09 +01001239 /*
1240 * Some configurations, such as the 802.11 SDIO card in the OLPC
1241 * XO-1.5, require a short delay after poweroff before the card
1242 * can be successfully turned on again.
1243 */
1244 mmc_delay(1);
1245
Mika Westerberg778e2772011-08-18 15:23:48 +03001246 mmc_host_clk_release(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247}
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249/*
Adrian Bunk39361852007-07-25 00:40:58 +02001250 * Cleanup when the last reference to the bus operator is dropped.
1251 */
Adrian Bunk261172f2008-04-13 21:15:47 +03001252static void __mmc_release_bus(struct mmc_host *host)
Adrian Bunk39361852007-07-25 00:40:58 +02001253{
1254 BUG_ON(!host);
1255 BUG_ON(host->bus_refs);
1256 BUG_ON(!host->bus_dead);
1257
1258 host->bus_ops = NULL;
1259}
1260
1261/*
1262 * Increase reference count of bus operator
1263 */
1264static inline void mmc_bus_get(struct mmc_host *host)
1265{
1266 unsigned long flags;
1267
1268 spin_lock_irqsave(&host->lock, flags);
1269 host->bus_refs++;
1270 spin_unlock_irqrestore(&host->lock, flags);
1271}
1272
1273/*
1274 * Decrease reference count of bus operator and free it if
1275 * it is the last reference.
1276 */
1277static inline void mmc_bus_put(struct mmc_host *host)
1278{
1279 unsigned long flags;
1280
1281 spin_lock_irqsave(&host->lock, flags);
1282 host->bus_refs--;
1283 if ((host->bus_refs == 0) && host->bus_ops)
1284 __mmc_release_bus(host);
1285 spin_unlock_irqrestore(&host->lock, flags);
1286}
1287
1288/*
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001289 * Assign a mmc bus handler to a host. Only one bus handler may control a
1290 * host at any given time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 */
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001292void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293{
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001294 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001296 BUG_ON(!host);
1297 BUG_ON(!ops);
Pierre Ossmanb8558852007-01-03 19:47:29 +01001298
Pierre Ossmand84075c82007-08-09 13:23:56 +02001299 WARN_ON(!host->claimed);
Pierre Ossmanb8558852007-01-03 19:47:29 +01001300
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001301 spin_lock_irqsave(&host->lock, flags);
Pierre Ossmanb8558852007-01-03 19:47:29 +01001302
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001303 BUG_ON(host->bus_ops);
1304 BUG_ON(host->bus_refs);
Pierre Ossmanb8558852007-01-03 19:47:29 +01001305
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001306 host->bus_ops = ops;
1307 host->bus_refs = 1;
1308 host->bus_dead = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001310 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311}
1312
1313/*
Ulf Hansson7f7e4122011-09-21 14:08:13 -04001314 * Remove the current bus handler from a host.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 */
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001316void mmc_detach_bus(struct mmc_host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317{
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001318 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001320 BUG_ON(!host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Pierre Ossmand84075c82007-08-09 13:23:56 +02001322 WARN_ON(!host->claimed);
1323 WARN_ON(!host->bus_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001325 spin_lock_irqsave(&host->lock, flags);
1326
1327 host->bus_dead = 1;
1328
1329 spin_unlock_irqrestore(&host->lock, flags);
1330
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001331 mmc_bus_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332}
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334/**
1335 * mmc_detect_change - process change of state on a MMC socket
1336 * @host: host which changed state.
Richard Purdie8dc00332005-09-08 17:53:01 +01001337 * @delay: optional delay to wait before detection (jiffies)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 *
Pierre Ossman67a61c42007-07-11 20:22:11 +02001339 * MMC drivers should call this when they detect a card has been
1340 * inserted or removed. The MMC layer will confirm that any
1341 * present card is still functional, and initialize any newly
1342 * inserted.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 */
Richard Purdie8dc00332005-09-08 17:53:01 +01001344void mmc_detect_change(struct mmc_host *host, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
Pierre Ossman3b91e552007-02-11 20:43:19 +01001346#ifdef CONFIG_MMC_DEBUG
Pierre Ossman1efd48b2007-05-08 22:35:17 +02001347 unsigned long flags;
Andrew Morton01f41ec2007-05-09 02:32:34 -07001348 spin_lock_irqsave(&host->lock, flags);
Pierre Ossmand84075c82007-08-09 13:23:56 +02001349 WARN_ON(host->removed);
Andrew Morton01f41ec2007-05-09 02:32:34 -07001350 spin_unlock_irqrestore(&host->lock, flags);
Pierre Ossman3b91e552007-02-11 20:43:19 +01001351#endif
Adrian Hunterd3049502011-11-28 16:22:00 +02001352 host->detect_change = 1;
David Howellsc4028952006-11-22 14:57:56 +00001353 mmc_schedule_delayed_work(&host->detect, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354}
1355
1356EXPORT_SYMBOL(mmc_detect_change);
1357
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001358void mmc_init_erase(struct mmc_card *card)
1359{
1360 unsigned int sz;
1361
1362 if (is_power_of_2(card->erase_size))
1363 card->erase_shift = ffs(card->erase_size) - 1;
1364 else
1365 card->erase_shift = 0;
1366
1367 /*
1368 * It is possible to erase an arbitrarily large area of an SD or MMC
1369 * card. That is not desirable because it can take a long time
1370 * (minutes) potentially delaying more important I/O, and also the
1371 * timeout calculations become increasingly hugely over-estimated.
1372 * Consequently, 'pref_erase' is defined as a guide to limit erases
1373 * to that size and alignment.
1374 *
1375 * For SD cards that define Allocation Unit size, limit erases to one
1376 * Allocation Unit at a time. For MMC cards that define High Capacity
1377 * Erase Size, whether it is switched on or not, limit to that size.
1378 * Otherwise just have a stab at a good value. For modern cards it
1379 * will end up being 4MiB. Note that if the value is too small, it
1380 * can end up taking longer to erase.
1381 */
1382 if (mmc_card_sd(card) && card->ssr.au) {
1383 card->pref_erase = card->ssr.au;
1384 card->erase_shift = ffs(card->ssr.au) - 1;
1385 } else if (card->ext_csd.hc_erase_size) {
1386 card->pref_erase = card->ext_csd.hc_erase_size;
1387 } else {
1388 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1389 if (sz < 128)
1390 card->pref_erase = 512 * 1024 / 512;
1391 else if (sz < 512)
1392 card->pref_erase = 1024 * 1024 / 512;
1393 else if (sz < 1024)
1394 card->pref_erase = 2 * 1024 * 1024 / 512;
1395 else
1396 card->pref_erase = 4 * 1024 * 1024 / 512;
1397 if (card->pref_erase < card->erase_size)
1398 card->pref_erase = card->erase_size;
1399 else {
1400 sz = card->pref_erase % card->erase_size;
1401 if (sz)
1402 card->pref_erase += card->erase_size - sz;
1403 }
1404 }
1405}
1406
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001407static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1408 unsigned int arg, unsigned int qty)
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001409{
1410 unsigned int erase_timeout;
1411
Adrian Hunter7194efb2012-04-05 14:45:47 +03001412 if (arg == MMC_DISCARD_ARG ||
1413 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1414 erase_timeout = card->ext_csd.trim_timeout;
1415 } else if (card->ext_csd.erase_group_def & 1) {
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001416 /* High Capacity Erase Group Size uses HC timeouts */
1417 if (arg == MMC_TRIM_ARG)
1418 erase_timeout = card->ext_csd.trim_timeout;
1419 else
1420 erase_timeout = card->ext_csd.hc_erase_timeout;
1421 } else {
1422 /* CSD Erase Group Size uses write timeout */
1423 unsigned int mult = (10 << card->csd.r2w_factor);
1424 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1425 unsigned int timeout_us;
1426
1427 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1428 if (card->csd.tacc_ns < 1000000)
1429 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1430 else
1431 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1432
1433 /*
1434 * ios.clock is only a target. The real clock rate might be
1435 * less but not that much less, so fudge it by multiplying by 2.
1436 */
1437 timeout_clks <<= 1;
1438 timeout_us += (timeout_clks * 1000) /
Adrian Hunter4cf8c6d2011-06-23 13:40:27 +03001439 (mmc_host_clk_rate(card->host) / 1000);
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001440
1441 erase_timeout = timeout_us / 1000;
1442
1443 /*
1444 * Theoretically, the calculation could underflow so round up
1445 * to 1ms in that case.
1446 */
1447 if (!erase_timeout)
1448 erase_timeout = 1;
1449 }
1450
1451 /* Multiplier for secure operations */
1452 if (arg & MMC_SECURE_ARGS) {
1453 if (arg == MMC_SECURE_ERASE_ARG)
1454 erase_timeout *= card->ext_csd.sec_erase_mult;
1455 else
1456 erase_timeout *= card->ext_csd.sec_trim_mult;
1457 }
1458
1459 erase_timeout *= qty;
1460
1461 /*
1462 * Ensure at least a 1 second timeout for SPI as per
1463 * 'mmc_set_data_timeout()'
1464 */
1465 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1466 erase_timeout = 1000;
1467
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001468 return erase_timeout;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001469}
1470
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001471static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1472 unsigned int arg,
1473 unsigned int qty)
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001474{
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001475 unsigned int erase_timeout;
1476
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001477 if (card->ssr.erase_timeout) {
1478 /* Erase timeout specified in SD Status Register (SSR) */
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001479 erase_timeout = card->ssr.erase_timeout * qty +
1480 card->ssr.erase_offset;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001481 } else {
1482 /*
1483 * Erase timeout not specified in SD Status Register (SSR) so
1484 * use 250ms per write block.
1485 */
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001486 erase_timeout = 250 * qty;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001487 }
1488
1489 /* Must not be less than 1 second */
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001490 if (erase_timeout < 1000)
1491 erase_timeout = 1000;
1492
1493 return erase_timeout;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001494}
1495
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001496static unsigned int mmc_erase_timeout(struct mmc_card *card,
1497 unsigned int arg,
1498 unsigned int qty)
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001499{
1500 if (mmc_card_sd(card))
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001501 return mmc_sd_erase_timeout(card, arg, qty);
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001502 else
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001503 return mmc_mmc_erase_timeout(card, arg, qty);
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001504}
1505
1506static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1507 unsigned int to, unsigned int arg)
1508{
Chris Ball1278dba2011-04-13 23:40:30 -04001509 struct mmc_command cmd = {0};
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001510 unsigned int qty = 0;
1511 int err;
1512
1513 /*
1514 * qty is used to calculate the erase timeout which depends on how many
1515 * erase groups (or allocation units in SD terminology) are affected.
1516 * We count erasing part of an erase group as one erase group.
1517 * For SD, the allocation units are always a power of 2. For MMC, the
1518 * erase group size is almost certainly also power of 2, but it does not
1519 * seem to insist on that in the JEDEC standard, so we fall back to
1520 * division in that case. SD may not specify an allocation unit size,
1521 * in which case the timeout is based on the number of write blocks.
1522 *
1523 * Note that the timeout for secure trim 2 will only be correct if the
1524 * number of erase groups specified is the same as the total of all
1525 * preceding secure trim 1 commands. Since the power may have been
1526 * lost since the secure trim 1 commands occurred, it is generally
1527 * impossible to calculate the secure trim 2 timeout correctly.
1528 */
1529 if (card->erase_shift)
1530 qty += ((to >> card->erase_shift) -
1531 (from >> card->erase_shift)) + 1;
1532 else if (mmc_card_sd(card))
1533 qty += to - from + 1;
1534 else
1535 qty += ((to / card->erase_size) -
1536 (from / card->erase_size)) + 1;
1537
1538 if (!mmc_card_blockaddr(card)) {
1539 from <<= 9;
1540 to <<= 9;
1541 }
1542
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001543 if (mmc_card_sd(card))
1544 cmd.opcode = SD_ERASE_WR_BLK_START;
1545 else
1546 cmd.opcode = MMC_ERASE_GROUP_START;
1547 cmd.arg = from;
1548 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1549 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1550 if (err) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301551 pr_err("mmc_erase: group start error %d, "
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001552 "status %#x\n", err, cmd.resp[0]);
Adrian Hunter67716322011-08-29 16:42:15 +03001553 err = -EIO;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001554 goto out;
1555 }
1556
1557 memset(&cmd, 0, sizeof(struct mmc_command));
1558 if (mmc_card_sd(card))
1559 cmd.opcode = SD_ERASE_WR_BLK_END;
1560 else
1561 cmd.opcode = MMC_ERASE_GROUP_END;
1562 cmd.arg = to;
1563 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1564 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1565 if (err) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301566 pr_err("mmc_erase: group end error %d, status %#x\n",
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001567 err, cmd.resp[0]);
Adrian Hunter67716322011-08-29 16:42:15 +03001568 err = -EIO;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001569 goto out;
1570 }
1571
1572 memset(&cmd, 0, sizeof(struct mmc_command));
1573 cmd.opcode = MMC_ERASE;
1574 cmd.arg = arg;
1575 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
Andrei Warkentineaa02f72011-04-11 16:13:41 -05001576 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001577 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1578 if (err) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301579 pr_err("mmc_erase: erase error %d, status %#x\n",
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001580 err, cmd.resp[0]);
1581 err = -EIO;
1582 goto out;
1583 }
1584
1585 if (mmc_host_is_spi(card->host))
1586 goto out;
1587
1588 do {
1589 memset(&cmd, 0, sizeof(struct mmc_command));
1590 cmd.opcode = MMC_SEND_STATUS;
1591 cmd.arg = card->rca << 16;
1592 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1593 /* Do not retry else we can't see errors */
1594 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1595 if (err || (cmd.resp[0] & 0xFDF92000)) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05301596 pr_err("error %d requesting status %#x\n",
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001597 err, cmd.resp[0]);
1598 err = -EIO;
1599 goto out;
1600 }
1601 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
Jaehoon Chung7435bb72011-08-10 18:46:28 +09001602 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001603out:
1604 return err;
1605}
1606
1607/**
1608 * mmc_erase - erase sectors.
1609 * @card: card to erase
1610 * @from: first sector to erase
1611 * @nr: number of sectors to erase
1612 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1613 *
1614 * Caller must claim host before calling this function.
1615 */
1616int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1617 unsigned int arg)
1618{
1619 unsigned int rem, to = from + nr;
1620
1621 if (!(card->host->caps & MMC_CAP_ERASE) ||
1622 !(card->csd.cmdclass & CCC_ERASE))
1623 return -EOPNOTSUPP;
1624
1625 if (!card->erase_size)
1626 return -EOPNOTSUPP;
1627
1628 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1629 return -EOPNOTSUPP;
1630
1631 if ((arg & MMC_SECURE_ARGS) &&
1632 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1633 return -EOPNOTSUPP;
1634
1635 if ((arg & MMC_TRIM_ARGS) &&
1636 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1637 return -EOPNOTSUPP;
1638
1639 if (arg == MMC_SECURE_ERASE_ARG) {
1640 if (from % card->erase_size || nr % card->erase_size)
1641 return -EINVAL;
1642 }
1643
1644 if (arg == MMC_ERASE_ARG) {
1645 rem = from % card->erase_size;
1646 if (rem) {
1647 rem = card->erase_size - rem;
1648 from += rem;
1649 if (nr > rem)
1650 nr -= rem;
1651 else
1652 return 0;
1653 }
1654 rem = nr % card->erase_size;
1655 if (rem)
1656 nr -= rem;
1657 }
1658
1659 if (nr == 0)
1660 return 0;
1661
1662 to = from + nr;
1663
1664 if (to <= from)
1665 return -EINVAL;
1666
1667 /* 'from' and 'to' are inclusive */
1668 to -= 1;
1669
1670 return mmc_do_erase(card, from, to, arg);
1671}
1672EXPORT_SYMBOL(mmc_erase);
1673
1674int mmc_can_erase(struct mmc_card *card)
1675{
1676 if ((card->host->caps & MMC_CAP_ERASE) &&
1677 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1678 return 1;
1679 return 0;
1680}
1681EXPORT_SYMBOL(mmc_can_erase);
1682
1683int mmc_can_trim(struct mmc_card *card)
1684{
1685 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1686 return 1;
1687 return 0;
1688}
1689EXPORT_SYMBOL(mmc_can_trim);
1690
Kyungmin Parkb3bf9152011-10-18 09:34:04 +09001691int mmc_can_discard(struct mmc_card *card)
1692{
1693 /*
1694 * As there's no way to detect the discard support bit at v4.5
1695 * use the s/w feature support filed.
1696 */
1697 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1698 return 1;
1699 return 0;
1700}
1701EXPORT_SYMBOL(mmc_can_discard);
1702
Kyungmin Parkd9ddd622011-10-14 14:15:48 +09001703int mmc_can_sanitize(struct mmc_card *card)
1704{
Adrian Hunter28302812012-04-05 14:45:48 +03001705 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1706 return 0;
Kyungmin Parkd9ddd622011-10-14 14:15:48 +09001707 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1708 return 1;
1709 return 0;
1710}
1711EXPORT_SYMBOL(mmc_can_sanitize);
1712
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07001713int mmc_can_secure_erase_trim(struct mmc_card *card)
1714{
1715 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1716 return 1;
1717 return 0;
1718}
1719EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1720
1721int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1722 unsigned int nr)
1723{
1724 if (!card->erase_size)
1725 return 0;
1726 if (from % card->erase_size || nr % card->erase_size)
1727 return 0;
1728 return 1;
1729}
1730EXPORT_SYMBOL(mmc_erase_group_aligned);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Adrian Huntere056a1b2011-06-28 17:16:02 +03001732static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1733 unsigned int arg)
1734{
1735 struct mmc_host *host = card->host;
1736 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1737 unsigned int last_timeout = 0;
1738
1739 if (card->erase_shift)
1740 max_qty = UINT_MAX >> card->erase_shift;
1741 else if (mmc_card_sd(card))
1742 max_qty = UINT_MAX;
1743 else
1744 max_qty = UINT_MAX / card->erase_size;
1745
1746 /* Find the largest qty with an OK timeout */
1747 do {
1748 y = 0;
1749 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1750 timeout = mmc_erase_timeout(card, arg, qty + x);
1751 if (timeout > host->max_discard_to)
1752 break;
1753 if (timeout < last_timeout)
1754 break;
1755 last_timeout = timeout;
1756 y = x;
1757 }
1758 qty += y;
1759 } while (y);
1760
1761 if (!qty)
1762 return 0;
1763
1764 if (qty == 1)
1765 return 1;
1766
1767 /* Convert qty to sectors */
1768 if (card->erase_shift)
1769 max_discard = --qty << card->erase_shift;
1770 else if (mmc_card_sd(card))
1771 max_discard = qty;
1772 else
1773 max_discard = --qty * card->erase_size;
1774
1775 return max_discard;
1776}
1777
1778unsigned int mmc_calc_max_discard(struct mmc_card *card)
1779{
1780 struct mmc_host *host = card->host;
1781 unsigned int max_discard, max_trim;
1782
1783 if (!host->max_discard_to)
1784 return UINT_MAX;
1785
1786 /*
1787 * Without erase_group_def set, MMC erase timeout depends on clock
1788 * frequence which can change. In that case, the best choice is
1789 * just the preferred erase size.
1790 */
1791 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1792 return card->pref_erase;
1793
1794 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1795 if (mmc_can_trim(card)) {
1796 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1797 if (max_trim < max_discard)
1798 max_discard = max_trim;
1799 } else if (max_discard < card->erase_size) {
1800 max_discard = 0;
1801 }
1802 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1803 mmc_hostname(host), max_discard, host->max_discard_to);
1804 return max_discard;
1805}
1806EXPORT_SYMBOL(mmc_calc_max_discard);
1807
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03001808int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1809{
Chris Ball1278dba2011-04-13 23:40:30 -04001810 struct mmc_command cmd = {0};
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03001811
1812 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1813 return 0;
1814
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +03001815 cmd.opcode = MMC_SET_BLOCKLEN;
1816 cmd.arg = blocklen;
1817 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1818 return mmc_wait_for_cmd(card->host, &cmd, 5);
1819}
1820EXPORT_SYMBOL(mmc_set_blocklen);
1821
Adrian Hunterb2499512011-08-29 16:42:11 +03001822static void mmc_hw_reset_for_init(struct mmc_host *host)
1823{
1824 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1825 return;
1826 mmc_host_clk_hold(host);
1827 host->ops->hw_reset(host);
1828 mmc_host_clk_release(host);
1829}
1830
1831int mmc_can_reset(struct mmc_card *card)
1832{
1833 u8 rst_n_function;
1834
1835 if (!mmc_card_mmc(card))
1836 return 0;
1837 rst_n_function = card->ext_csd.rst_n_function;
1838 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1839 return 0;
1840 return 1;
1841}
1842EXPORT_SYMBOL(mmc_can_reset);
1843
1844static int mmc_do_hw_reset(struct mmc_host *host, int check)
1845{
1846 struct mmc_card *card = host->card;
1847
1848 if (!host->bus_ops->power_restore)
1849 return -EOPNOTSUPP;
1850
1851 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1852 return -EOPNOTSUPP;
1853
1854 if (!card)
1855 return -EINVAL;
1856
1857 if (!mmc_can_reset(card))
1858 return -EOPNOTSUPP;
1859
1860 mmc_host_clk_hold(host);
1861 mmc_set_clock(host, host->f_init);
1862
1863 host->ops->hw_reset(host);
1864
1865 /* If the reset has happened, then a status command will fail */
1866 if (check) {
1867 struct mmc_command cmd = {0};
1868 int err;
1869
1870 cmd.opcode = MMC_SEND_STATUS;
1871 if (!mmc_host_is_spi(card->host))
1872 cmd.arg = card->rca << 16;
1873 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1874 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1875 if (!err) {
1876 mmc_host_clk_release(host);
1877 return -ENOSYS;
1878 }
1879 }
1880
1881 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1882 if (mmc_host_is_spi(host)) {
1883 host->ios.chip_select = MMC_CS_HIGH;
1884 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1885 } else {
1886 host->ios.chip_select = MMC_CS_DONTCARE;
1887 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1888 }
1889 host->ios.bus_width = MMC_BUS_WIDTH_1;
1890 host->ios.timing = MMC_TIMING_LEGACY;
1891 mmc_set_ios(host);
1892
1893 mmc_host_clk_release(host);
1894
1895 return host->bus_ops->power_restore(host);
1896}
1897
1898int mmc_hw_reset(struct mmc_host *host)
1899{
1900 return mmc_do_hw_reset(host, 0);
1901}
1902EXPORT_SYMBOL(mmc_hw_reset);
1903
1904int mmc_hw_reset_check(struct mmc_host *host)
1905{
1906 return mmc_do_hw_reset(host, 1);
1907}
1908EXPORT_SYMBOL(mmc_hw_reset_check);
1909
Andy Ross807e8e42011-01-03 10:36:56 -08001910static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1911{
1912 host->f_init = freq;
1913
1914#ifdef CONFIG_MMC_DEBUG
1915 pr_info("%s: %s: trying to init card at %u Hz\n",
1916 mmc_hostname(host), __func__, host->f_init);
1917#endif
1918 mmc_power_up(host);
Philip Rakity2f94e552011-02-13 23:12:28 -08001919
1920 /*
Adrian Hunterb2499512011-08-29 16:42:11 +03001921 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
1922 * do a hardware reset if possible.
1923 */
1924 mmc_hw_reset_for_init(host);
1925
Ulf Hanssone7747472012-03-01 13:18:05 +01001926 /* Initialization should be done at 3.3 V I/O voltage. */
1927 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
1928
Adrian Hunterb2499512011-08-29 16:42:11 +03001929 /*
Philip Rakity2f94e552011-02-13 23:12:28 -08001930 * sdio_reset sends CMD52 to reset card. Since we do not know
1931 * if the card is being re-initialized, just send it. CMD52
1932 * should be ignored by SD/eMMC cards.
1933 */
Andy Ross807e8e42011-01-03 10:36:56 -08001934 sdio_reset(host);
1935 mmc_go_idle(host);
1936
1937 mmc_send_if_cond(host, host->ocr_avail);
1938
1939 /* Order's important: probe SDIO, then SD, then MMC */
1940 if (!mmc_attach_sdio(host))
1941 return 0;
1942 if (!mmc_attach_sd(host))
1943 return 0;
1944 if (!mmc_attach_mmc(host))
1945 return 0;
1946
1947 mmc_power_off(host);
1948 return -EIO;
1949}
1950
Adrian Hunterd3049502011-11-28 16:22:00 +02001951int _mmc_detect_card_removed(struct mmc_host *host)
1952{
1953 int ret;
1954
1955 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
1956 return 0;
1957
1958 if (!host->card || mmc_card_removed(host->card))
1959 return 1;
1960
1961 ret = host->bus_ops->alive(host);
1962 if (ret) {
1963 mmc_card_set_removed(host->card);
1964 pr_debug("%s: card remove detected\n", mmc_hostname(host));
1965 }
1966
1967 return ret;
1968}
1969
1970int mmc_detect_card_removed(struct mmc_host *host)
1971{
1972 struct mmc_card *card = host->card;
Ulf Hanssonf0cc9cf2012-02-06 10:42:39 +01001973 int ret;
Adrian Hunterd3049502011-11-28 16:22:00 +02001974
1975 WARN_ON(!host->claimed);
Ulf Hanssonf0cc9cf2012-02-06 10:42:39 +01001976
1977 if (!card)
1978 return 1;
1979
1980 ret = mmc_card_removed(card);
Adrian Hunterd3049502011-11-28 16:22:00 +02001981 /*
1982 * The card will be considered unchanged unless we have been asked to
1983 * detect a change or host requires polling to provide card detection.
1984 */
Ulf Hanssonf0cc9cf2012-02-06 10:42:39 +01001985 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1986 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
1987 return ret;
Adrian Hunterd3049502011-11-28 16:22:00 +02001988
1989 host->detect_change = 0;
Ulf Hanssonf0cc9cf2012-02-06 10:42:39 +01001990 if (!ret) {
1991 ret = _mmc_detect_card_removed(host);
1992 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
1993 /*
1994 * Schedule a detect work as soon as possible to let a
1995 * rescan handle the card removal.
1996 */
1997 cancel_delayed_work(&host->detect);
1998 mmc_detect_change(host, 0);
1999 }
2000 }
Adrian Hunterd3049502011-11-28 16:22:00 +02002001
Ulf Hanssonf0cc9cf2012-02-06 10:42:39 +01002002 return ret;
Adrian Hunterd3049502011-11-28 16:22:00 +02002003}
2004EXPORT_SYMBOL(mmc_detect_card_removed);
2005
Pierre Ossmanb93931a2007-05-19 14:06:24 +02002006void mmc_rescan(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007{
Andy Ross807e8e42011-01-03 10:36:56 -08002008 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
David Howellsc4028952006-11-22 14:57:56 +00002009 struct mmc_host *host =
2010 container_of(work, struct mmc_host, detect.work);
Hein Tibosch88ae8b82010-09-06 09:37:19 +08002011 int i;
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002012
Andy Ross807e8e42011-01-03 10:36:56 -08002013 if (host->rescan_disable)
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002014 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002016 mmc_bus_get(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Ohad Ben-Cohen30201e72010-11-28 07:21:28 +02002018 /*
2019 * if there is a _removable_ card registered, check whether it is
2020 * still present
2021 */
2022 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
Ohad Ben-Cohenbad3bab2011-03-08 23:32:02 +02002023 && !(host->caps & MMC_CAP_NONREMOVABLE))
Jorg Schummer94d89ef2009-03-31 17:51:21 +03002024 host->bus_ops->detect(host);
2025
Adrian Hunterd3049502011-11-28 16:22:00 +02002026 host->detect_change = 0;
2027
Chris Ballc5841792011-01-04 12:20:22 -05002028 /*
2029 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2030 * the card is no longer present.
2031 */
Jorg Schummer94d89ef2009-03-31 17:51:21 +03002032 mmc_bus_put(host);
Jorg Schummer94d89ef2009-03-31 17:51:21 +03002033 mmc_bus_get(host);
2034
2035 /* if there still is a card present, stop here */
2036 if (host->bus_ops != NULL) {
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002037 mmc_bus_put(host);
Jorg Schummer94d89ef2009-03-31 17:51:21 +03002038 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 }
Jorg Schummer94d89ef2009-03-31 17:51:21 +03002040
Jorg Schummer94d89ef2009-03-31 17:51:21 +03002041 /*
2042 * Only we can add a new handler, so it's safe to
2043 * release the lock here.
2044 */
2045 mmc_bus_put(host);
2046
2047 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
2048 goto out;
2049
Andy Ross807e8e42011-01-03 10:36:56 -08002050 mmc_claim_host(host);
Hein Tibosch88ae8b82010-09-06 09:37:19 +08002051 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
Andy Ross807e8e42011-01-03 10:36:56 -08002052 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2053 break;
Jaehoon Chung06b22332011-05-12 17:18:59 +09002054 if (freqs[i] <= host->f_min)
Andy Ross807e8e42011-01-03 10:36:56 -08002055 break;
Hein Tibosch88ae8b82010-09-06 09:37:19 +08002056 }
Andy Ross807e8e42011-01-03 10:36:56 -08002057 mmc_release_host(host);
2058
2059 out:
Anton Vorontsov28f52482008-06-17 18:17:15 +04002060 if (host->caps & MMC_CAP_NEEDS_POLL)
2061 mmc_schedule_delayed_work(&host->detect, HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062}
2063
Pierre Ossmanb93931a2007-05-19 14:06:24 +02002064void mmc_start_host(struct mmc_host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
Pierre Ossmanb93931a2007-05-19 14:06:24 +02002066 mmc_power_off(host);
2067 mmc_detect_change(host, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068}
2069
Pierre Ossmanb93931a2007-05-19 14:06:24 +02002070void mmc_stop_host(struct mmc_host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
Pierre Ossman3b91e552007-02-11 20:43:19 +01002072#ifdef CONFIG_MMC_DEBUG
Pierre Ossman1efd48b2007-05-08 22:35:17 +02002073 unsigned long flags;
2074 spin_lock_irqsave(&host->lock, flags);
Pierre Ossman3b91e552007-02-11 20:43:19 +01002075 host->removed = 1;
Pierre Ossman1efd48b2007-05-08 22:35:17 +02002076 spin_unlock_irqrestore(&host->lock, flags);
Pierre Ossman3b91e552007-02-11 20:43:19 +01002077#endif
2078
Guennadi Liakhovetskid9bcbf32010-11-11 17:32:25 +01002079 cancel_delayed_work_sync(&host->detect);
Pierre Ossman3b91e552007-02-11 20:43:19 +01002080 mmc_flush_scheduled_work();
2081
Nicolas Pitreda68c4e2010-03-05 13:43:31 -08002082 /* clear pm flags now and let card drivers set them as needed */
2083 host->pm_flags = 0;
2084
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002085 mmc_bus_get(host);
2086 if (host->bus_ops && !host->bus_dead) {
Guennadi Liakhovetski0db13fc2012-01-04 15:28:45 +01002087 /* Calling bus_ops->remove() with a claimed host can deadlock */
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002088 if (host->bus_ops->remove)
2089 host->bus_ops->remove(host);
2090
2091 mmc_claim_host(host);
2092 mmc_detach_bus(host);
Ulf Hansson7f7e4122011-09-21 14:08:13 -04002093 mmc_power_off(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002094 mmc_release_host(host);
Denis Karpov53509f02009-09-22 16:44:36 -07002095 mmc_bus_put(host);
2096 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002098 mmc_bus_put(host);
2099
2100 BUG_ON(host->card);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 mmc_power_off(host);
2103}
2104
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002105int mmc_power_save_host(struct mmc_host *host)
Adrian Huntereae1aee2009-09-22 16:44:33 -07002106{
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002107 int ret = 0;
2108
Daniel Drakebb9cab92011-07-17 16:38:41 +01002109#ifdef CONFIG_MMC_DEBUG
2110 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2111#endif
2112
Adrian Huntereae1aee2009-09-22 16:44:33 -07002113 mmc_bus_get(host);
2114
2115 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2116 mmc_bus_put(host);
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002117 return -EINVAL;
Adrian Huntereae1aee2009-09-22 16:44:33 -07002118 }
2119
2120 if (host->bus_ops->power_save)
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002121 ret = host->bus_ops->power_save(host);
Adrian Huntereae1aee2009-09-22 16:44:33 -07002122
2123 mmc_bus_put(host);
2124
2125 mmc_power_off(host);
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002126
2127 return ret;
Adrian Huntereae1aee2009-09-22 16:44:33 -07002128}
2129EXPORT_SYMBOL(mmc_power_save_host);
2130
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002131int mmc_power_restore_host(struct mmc_host *host)
Adrian Huntereae1aee2009-09-22 16:44:33 -07002132{
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002133 int ret;
2134
Daniel Drakebb9cab92011-07-17 16:38:41 +01002135#ifdef CONFIG_MMC_DEBUG
2136 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2137#endif
2138
Adrian Huntereae1aee2009-09-22 16:44:33 -07002139 mmc_bus_get(host);
2140
2141 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2142 mmc_bus_put(host);
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002143 return -EINVAL;
Adrian Huntereae1aee2009-09-22 16:44:33 -07002144 }
2145
2146 mmc_power_up(host);
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002147 ret = host->bus_ops->power_restore(host);
Adrian Huntereae1aee2009-09-22 16:44:33 -07002148
2149 mmc_bus_put(host);
Ohad Ben-Cohen12ae6372010-10-02 13:54:06 +02002150
2151 return ret;
Adrian Huntereae1aee2009-09-22 16:44:33 -07002152}
2153EXPORT_SYMBOL(mmc_power_restore_host);
2154
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -07002155int mmc_card_awake(struct mmc_host *host)
2156{
2157 int err = -ENOSYS;
2158
Ulf Hanssonaa9df4f2011-12-19 16:24:19 +01002159 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2160 return 0;
2161
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -07002162 mmc_bus_get(host);
2163
2164 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2165 err = host->bus_ops->awake(host);
2166
2167 mmc_bus_put(host);
2168
2169 return err;
2170}
2171EXPORT_SYMBOL(mmc_card_awake);
2172
2173int mmc_card_sleep(struct mmc_host *host)
2174{
2175 int err = -ENOSYS;
2176
Ulf Hanssonaa9df4f2011-12-19 16:24:19 +01002177 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2178 return 0;
2179
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -07002180 mmc_bus_get(host);
2181
Kyungmin Parkc99872a2011-11-17 13:34:33 +09002182 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -07002183 err = host->bus_ops->sleep(host);
2184
2185 mmc_bus_put(host);
2186
2187 return err;
2188}
2189EXPORT_SYMBOL(mmc_card_sleep);
2190
2191int mmc_card_can_sleep(struct mmc_host *host)
2192{
2193 struct mmc_card *card = host->card;
2194
2195 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2196 return 1;
2197 return 0;
2198}
2199EXPORT_SYMBOL(mmc_card_can_sleep);
2200
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002201/*
2202 * Flush the cache to the non-volatile storage.
2203 */
2204int mmc_flush_cache(struct mmc_card *card)
2205{
2206 struct mmc_host *host = card->host;
2207 int err = 0;
2208
2209 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2210 return err;
2211
2212 if (mmc_card_mmc(card) &&
2213 (card->ext_csd.cache_size > 0) &&
2214 (card->ext_csd.cache_ctrl & 1)) {
2215 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2216 EXT_CSD_FLUSH_CACHE, 1, 0);
2217 if (err)
2218 pr_err("%s: cache flush error %d\n",
2219 mmc_hostname(card->host), err);
2220 }
2221
2222 return err;
2223}
2224EXPORT_SYMBOL(mmc_flush_cache);
2225
2226/*
2227 * Turn the cache ON/OFF.
2228 * Turning the cache OFF shall trigger flushing of the data
2229 * to the non-volatile storage.
2230 */
2231int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2232{
2233 struct mmc_card *card = host->card;
Seungwon Jeon8bc06782011-12-09 17:47:17 +09002234 unsigned int timeout;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002235 int err = 0;
2236
2237 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2238 mmc_card_is_removable(host))
2239 return err;
2240
Ulf Hansson7c570912012-04-19 11:55:25 +02002241 mmc_claim_host(host);
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002242 if (card && mmc_card_mmc(card) &&
2243 (card->ext_csd.cache_size > 0)) {
2244 enable = !!enable;
2245
Seungwon Jeon8bc06782011-12-09 17:47:17 +09002246 if (card->ext_csd.cache_ctrl ^ enable) {
2247 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002248 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Seungwon Jeon8bc06782011-12-09 17:47:17 +09002249 EXT_CSD_CACHE_CTRL, enable, timeout);
2250 if (err)
2251 pr_err("%s: cache %s error %d\n",
2252 mmc_hostname(card->host),
2253 enable ? "on" : "off",
2254 err);
2255 else
2256 card->ext_csd.cache_ctrl = enable;
2257 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002258 }
Ulf Hansson7c570912012-04-19 11:55:25 +02002259 mmc_release_host(host);
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002260
2261 return err;
2262}
2263EXPORT_SYMBOL(mmc_cache_ctrl);
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265#ifdef CONFIG_PM
2266
2267/**
2268 * mmc_suspend_host - suspend a host
2269 * @host: mmc host
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 */
Matt Fleming1a13f8f2010-05-26 14:42:08 -07002271int mmc_suspend_host(struct mmc_host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272{
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002273 int err = 0;
2274
Jorg Schummer7de427d2009-02-19 13:17:03 +02002275 cancel_delayed_work(&host->detect);
Pierre Ossmanb5af25b2007-04-28 17:30:50 +02002276 mmc_flush_scheduled_work();
Seungwon Jeon17e9ff52011-12-26 18:03:05 +09002277
Ulf Hansson7c570912012-04-19 11:55:25 +02002278 err = mmc_cache_ctrl(host, 0);
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002279 if (err)
2280 goto out;
Pierre Ossmanb5af25b2007-04-28 17:30:50 +02002281
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002282 mmc_bus_get(host);
2283 if (host->bus_ops && !host->bus_dead) {
Ulf Hanssonb6ad7262011-10-13 16:03:58 +02002284
Ulf Hansson7c570912012-04-19 11:55:25 +02002285 if (host->bus_ops->suspend)
2286 err = host->bus_ops->suspend(host);
Sujit Reddy Thumma49df7802011-11-23 08:43:18 +05302287
Ulf Hansson7c570912012-04-19 11:55:25 +02002288 if (err == -ENOSYS || !host->bus_ops->resume) {
2289 /*
2290 * We simply "remove" the card in this case.
2291 * It will be redetected on resume. (Calling
2292 * bus_ops->remove() with a claimed host can
2293 * deadlock.)
2294 */
2295 if (host->bus_ops->remove)
2296 host->bus_ops->remove(host);
2297 mmc_claim_host(host);
2298 mmc_detach_bus(host);
2299 mmc_power_off(host);
2300 mmc_release_host(host);
2301 host->pm_flags = 0;
2302 err = 0;
Ohad Ben-Cohen1c8cf9c2010-10-13 09:31:56 +02002303 }
Pierre Ossmanb5af25b2007-04-28 17:30:50 +02002304 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002305 mmc_bus_put(host);
2306
Ohad Ben-Cohena5e94252011-04-05 17:43:20 +03002307 if (!err && !mmc_card_keep_power(host))
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002308 mmc_power_off(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002310out:
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002311 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312}
2313
2314EXPORT_SYMBOL(mmc_suspend_host);
2315
2316/**
2317 * mmc_resume_host - resume a previously suspended host
2318 * @host: mmc host
2319 */
2320int mmc_resume_host(struct mmc_host *host)
2321{
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002322 int err = 0;
2323
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002324 mmc_bus_get(host);
2325 if (host->bus_ops && !host->bus_dead) {
Ohad Ben-Cohena5e94252011-04-05 17:43:20 +03002326 if (!mmc_card_keep_power(host)) {
Nicolas Pitreda68c4e2010-03-05 13:43:31 -08002327 mmc_power_up(host);
2328 mmc_select_voltage(host, host->ocr);
Ohad Ben-Cohene5945732010-11-28 07:21:30 +02002329 /*
2330 * Tell runtime PM core we just powered up the card,
2331 * since it still believes the card is powered off.
2332 * Note that currently runtime PM is only enabled
2333 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2334 */
2335 if (mmc_card_sdio(host->card) &&
2336 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2337 pm_runtime_disable(&host->card->dev);
2338 pm_runtime_set_active(&host->card->dev);
2339 pm_runtime_enable(&host->card->dev);
2340 }
Nicolas Pitreda68c4e2010-03-05 13:43:31 -08002341 }
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002342 BUG_ON(!host->bus_ops->resume);
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002343 err = host->bus_ops->resume(host);
2344 if (err) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05302345 pr_warning("%s: error %d during resume "
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002346 "(card was removed?)\n",
2347 mmc_hostname(host), err);
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002348 err = 0;
2349 }
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002350 }
Eliad Pellera8e6df72011-05-09 11:32:31 +03002351 host->pm_flags &= ~MMC_PM_KEEP_POWER;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002352 mmc_bus_put(host);
2353
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002354 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356EXPORT_SYMBOL(mmc_resume_host);
2357
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002358/* Do the card removal on suspend if card is assumed removeable
2359 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2360 to sync the card.
2361*/
2362int mmc_pm_notify(struct notifier_block *notify_block,
2363 unsigned long mode, void *unused)
2364{
2365 struct mmc_host *host = container_of(
2366 notify_block, struct mmc_host, pm_notify);
2367 unsigned long flags;
2368
2369
2370 switch (mode) {
2371 case PM_HIBERNATION_PREPARE:
2372 case PM_SUSPEND_PREPARE:
2373
2374 spin_lock_irqsave(&host->lock, flags);
2375 host->rescan_disable = 1;
Girish K Sbec87262011-10-13 12:04:16 +05302376 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002377 spin_unlock_irqrestore(&host->lock, flags);
2378 cancel_delayed_work_sync(&host->detect);
2379
2380 if (!host->bus_ops || host->bus_ops->suspend)
2381 break;
2382
Guennadi Liakhovetski0db13fc2012-01-04 15:28:45 +01002383 /* Calling bus_ops->remove() with a claimed host can deadlock */
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002384 if (host->bus_ops->remove)
2385 host->bus_ops->remove(host);
2386
Guennadi Liakhovetski0db13fc2012-01-04 15:28:45 +01002387 mmc_claim_host(host);
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002388 mmc_detach_bus(host);
Ulf Hansson7f7e4122011-09-21 14:08:13 -04002389 mmc_power_off(host);
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002390 mmc_release_host(host);
2391 host->pm_flags = 0;
2392 break;
2393
2394 case PM_POST_SUSPEND:
2395 case PM_POST_HIBERNATION:
Takashi Iwai274476f2010-12-10 08:40:31 +01002396 case PM_POST_RESTORE:
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002397
2398 spin_lock_irqsave(&host->lock, flags);
2399 host->rescan_disable = 0;
Girish K Sbec87262011-10-13 12:04:16 +05302400 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
Maxim Levitsky4c2ef252010-08-10 18:01:41 -07002401 spin_unlock_irqrestore(&host->lock, flags);
2402 mmc_detect_change(host, 0);
2403
2404 }
2405
2406 return 0;
2407}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408#endif
2409
Pierre Ossmanffce2e72007-05-19 14:32:22 +02002410static int __init mmc_init(void)
2411{
2412 int ret;
2413
Tejun Heo0d9ee5b2010-12-24 16:00:17 +01002414 workqueue = alloc_ordered_workqueue("kmmcd", 0);
Pierre Ossmanffce2e72007-05-19 14:32:22 +02002415 if (!workqueue)
2416 return -ENOMEM;
2417
2418 ret = mmc_register_bus();
Pierre Ossmane29a7d72007-05-26 13:48:18 +02002419 if (ret)
2420 goto destroy_workqueue;
2421
2422 ret = mmc_register_host_class();
2423 if (ret)
2424 goto unregister_bus;
2425
2426 ret = sdio_register_bus();
2427 if (ret)
2428 goto unregister_host_class;
2429
2430 return 0;
2431
2432unregister_host_class:
2433 mmc_unregister_host_class();
2434unregister_bus:
2435 mmc_unregister_bus();
2436destroy_workqueue:
2437 destroy_workqueue(workqueue);
2438
Pierre Ossmanffce2e72007-05-19 14:32:22 +02002439 return ret;
2440}
2441
2442static void __exit mmc_exit(void)
2443{
Pierre Ossmane29a7d72007-05-26 13:48:18 +02002444 sdio_unregister_bus();
Pierre Ossmanffce2e72007-05-19 14:32:22 +02002445 mmc_unregister_host_class();
2446 mmc_unregister_bus();
2447 destroy_workqueue(workqueue);
2448}
2449
Nicolas Pitre26074962007-06-16 02:07:53 -04002450subsys_initcall(mmc_init);
Pierre Ossmanffce2e72007-05-19 14:32:22 +02002451module_exit(mmc_exit);
2452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453MODULE_LICENSE("GPL");