blob: ed02ebd899d1f991caec3a0a8b3b426d7b460b6c [file] [log] [blame]
David Brownell15a05802007-08-08 09:12:54 -07001/*
2 * mmc_spi.c - Access SD/MMC cards through SPI master controllers
3 *
4 * (C) Copyright 2005, Intec Automation,
5 * Mike Lavender (mike@steroidmicros)
6 * (C) Copyright 2006-2007, David Brownell
7 * (C) Copyright 2007, Axis Communications,
8 * Hans-Peter Nilsson (hp@axis.com)
9 * (C) Copyright 2007, ATRON electronic GmbH,
10 * Jan Nikitenko <jan.nikitenko@gmail.com>
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
Wolfgang Muees56e303e2009-04-07 15:26:30 +010027#include <linux/sched.h>
David Brownell15a05802007-08-08 09:12:54 -070028#include <linux/delay.h>
David Brownell23fd5042007-10-14 14:50:25 -070029#include <linux/bio.h>
David Brownell15a05802007-08-08 09:12:54 -070030#include <linux/dma-mapping.h>
31#include <linux/crc7.h>
32#include <linux/crc-itu-t.h>
Al Viroe5712a62007-10-17 01:09:07 +010033#include <linux/scatterlist.h>
David Brownell15a05802007-08-08 09:12:54 -070034
35#include <linux/mmc/host.h>
36#include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
37
38#include <linux/spi/spi.h>
39#include <linux/spi/mmc_spi.h>
40
41#include <asm/unaligned.h>
42
43
44/* NOTES:
45 *
46 * - For now, we won't try to interoperate with a real mmc/sd/sdio
47 * controller, although some of them do have hardware support for
48 * SPI protocol. The main reason for such configs would be mmc-ish
49 * cards like DataFlash, which don't support that "native" protocol.
50 *
51 * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
52 * switch between driver stacks, and in any case if "native" mode
53 * is available, it will be faster and hence preferable.
54 *
55 * - MMC depends on a different chipselect management policy than the
56 * SPI interface currently supports for shared bus segments: it needs
57 * to issue multiple spi_message requests with the chipselect active,
58 * using the results of one message to decide the next one to issue.
59 *
60 * Pending updates to the programming interface, this driver expects
61 * that it not share the bus with other drivers (precluding conflicts).
62 *
63 * - We tell the controller to keep the chipselect active from the
64 * beginning of an mmc_host_ops.request until the end. So beware
65 * of SPI controller drivers that mis-handle the cs_change flag!
66 *
67 * However, many cards seem OK with chipselect flapping up/down
68 * during that time ... at least on unshared bus segments.
69 */
70
71
72/*
73 * Local protocol constants, internal to data block protocols.
74 */
75
76/* Response tokens used to ack each block written: */
77#define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
78#define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
79#define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
80#define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
81
82/* Read and write blocks start with these tokens and end with crc;
83 * on error, read tokens act like a subset of R2_SPI_* values.
84 */
85#define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */
86#define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */
87#define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */
88
89#define MMC_SPI_BLOCKSIZE 512
90
91
92/* These fixed timeouts come from the latest SD specs, which say to ignore
93 * the CSD values. The R1B value is for card erase (e.g. the "I forgot the
94 * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
95 * reads which takes nowhere near that long. Older cards may be able to use
96 * shorter timeouts ... but why bother?
97 */
Wolfgang Muees56e303e2009-04-07 15:26:30 +010098#define r1b_timeout (HZ * 3)
David Brownell15a05802007-08-08 09:12:54 -070099
100
101/****************************************************************************/
102
103/*
104 * Local Data Structures
105 */
106
107/* "scratch" is per-{command,block} data exchanged with the card */
108struct scratch {
109 u8 status[29];
110 u8 data_token;
111 __be16 crc_val;
112};
113
114struct mmc_spi_host {
115 struct mmc_host *mmc;
116 struct spi_device *spi;
117
118 unsigned char power_mode;
119 u16 powerup_msecs;
120
121 struct mmc_spi_platform_data *pdata;
122
123 /* for bulk data transfers */
124 struct spi_transfer token, t, crc, early_status;
125 struct spi_message m;
126
127 /* for status readback */
128 struct spi_transfer status;
129 struct spi_message readback;
130
131 /* underlying DMA-aware controller, or null */
132 struct device *dma_dev;
133
134 /* buffer used for commands and for message "overhead" */
135 struct scratch *data;
136 dma_addr_t data_dma;
137
138 /* Specs say to write ones most of the time, even when the card
139 * has no need to read its input data; and many cards won't care.
140 * This is our source of those ones.
141 */
142 void *ones;
143 dma_addr_t ones_dma;
144};
145
146
147/****************************************************************************/
148
149/*
150 * MMC-over-SPI protocol glue, used by the MMC stack interface
151 */
152
153static inline int mmc_cs_off(struct mmc_spi_host *host)
154{
155 /* chipselect will always be inactive after setup() */
156 return spi_setup(host->spi);
157}
158
159static int
160mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
161{
162 int status;
163
164 if (len > sizeof(*host->data)) {
165 WARN_ON(1);
166 return -EIO;
167 }
168
169 host->status.len = len;
170
171 if (host->dma_dev)
172 dma_sync_single_for_device(host->dma_dev,
173 host->data_dma, sizeof(*host->data),
174 DMA_FROM_DEVICE);
175
176 status = spi_sync(host->spi, &host->readback);
David Brownell15a05802007-08-08 09:12:54 -0700177
178 if (host->dma_dev)
179 dma_sync_single_for_cpu(host->dma_dev,
180 host->data_dma, sizeof(*host->data),
181 DMA_FROM_DEVICE);
182
183 return status;
184}
185
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100186static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
187 unsigned n, u8 byte)
David Brownell15a05802007-08-08 09:12:54 -0700188{
189 u8 *cp = host->data->status;
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100190 unsigned long start = jiffies;
David Brownell15a05802007-08-08 09:12:54 -0700191
192 while (1) {
193 int status;
194 unsigned i;
195
196 status = mmc_spi_readbytes(host, n);
197 if (status < 0)
198 return status;
199
200 for (i = 0; i < n; i++) {
201 if (cp[i] != byte)
202 return cp[i];
203 }
204
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100205 if (time_is_before_jiffies(start + timeout))
David Brownell15a05802007-08-08 09:12:54 -0700206 break;
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100207
208 /* If we need long timeouts, we may release the CPU.
209 * We use jiffies here because we want to have a relation
210 * between elapsed time and the blocking of the scheduler.
211 */
212 if (time_is_before_jiffies(start+1))
213 schedule();
David Brownell15a05802007-08-08 09:12:54 -0700214 }
215 return -ETIMEDOUT;
216}
217
218static inline int
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100219mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
David Brownell15a05802007-08-08 09:12:54 -0700220{
221 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
222}
223
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100224static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
David Brownell15a05802007-08-08 09:12:54 -0700225{
Matthew Fleming162350e2008-10-02 12:21:42 +0100226 return mmc_spi_skip(host, timeout, 1, 0xff);
David Brownell15a05802007-08-08 09:12:54 -0700227}
228
229
230/*
231 * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
232 * hosts return! The low byte holds R1_SPI bits. The next byte may hold
233 * R2_SPI bits ... for SEND_STATUS, or after data read errors.
234 *
235 * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
236 * newer cards R7 (IF_COND).
237 */
238
239static char *maptype(struct mmc_command *cmd)
240{
241 switch (mmc_spi_resp_type(cmd)) {
242 case MMC_RSP_SPI_R1: return "R1";
243 case MMC_RSP_SPI_R1B: return "R1B";
244 case MMC_RSP_SPI_R2: return "R2/R5";
245 case MMC_RSP_SPI_R3: return "R3/R4/R7";
246 default: return "?";
247 }
248}
249
250/* return zero, else negative errno after setting cmd->error */
251static int mmc_spi_response_get(struct mmc_spi_host *host,
252 struct mmc_command *cmd, int cs_on)
253{
254 u8 *cp = host->data->status;
255 u8 *end = cp + host->t.len;
256 int value = 0;
257 char tag[32];
258
259 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
260 cmd->opcode, maptype(cmd));
261
262 /* Except for data block reads, the whole response will already
263 * be stored in the scratch buffer. It's somewhere after the
264 * command and the first byte we read after it. We ignore that
265 * first byte. After STOP_TRANSMISSION command it may include
266 * two data bits, but otherwise it's all ones.
267 */
268 cp += 8;
269 while (cp < end && *cp == 0xff)
270 cp++;
271
272 /* Data block reads (R1 response types) may need more data... */
273 if (cp == end) {
274 unsigned i;
275
276 cp = host->data->status;
277
278 /* Card sends N(CR) (== 1..8) bytes of all-ones then one
279 * status byte ... and we already scanned 2 bytes.
280 *
281 * REVISIT block read paths use nasty byte-at-a-time I/O
282 * so it can always DMA directly into the target buffer.
283 * It'd probably be better to memcpy() the first chunk and
284 * avoid extra i/o calls...
Wolfgang Mueesea15ba52009-03-11 14:17:43 +0100285 *
286 * Note we check for more than 8 bytes, because in practice,
287 * some SD cards are slow...
David Brownell15a05802007-08-08 09:12:54 -0700288 */
Wolfgang Mueesea15ba52009-03-11 14:17:43 +0100289 for (i = 2; i < 16; i++) {
David Brownell15a05802007-08-08 09:12:54 -0700290 value = mmc_spi_readbytes(host, 1);
291 if (value < 0)
292 goto done;
293 if (*cp != 0xff)
294 goto checkstatus;
295 }
296 value = -ETIMEDOUT;
297 goto done;
298 }
299
300checkstatus:
301 if (*cp & 0x80) {
302 dev_dbg(&host->spi->dev, "%s: INVALID RESPONSE, %02x\n",
303 tag, *cp);
304 value = -EBADR;
305 goto done;
306 }
307
308 cmd->resp[0] = *cp++;
309 cmd->error = 0;
310
311 /* Status byte: the entire seven-bit R1 response. */
312 if (cmd->resp[0] != 0) {
313 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS
314 | R1_SPI_ILLEGAL_COMMAND)
315 & cmd->resp[0])
316 value = -EINVAL;
317 else if (R1_SPI_COM_CRC & cmd->resp[0])
318 value = -EILSEQ;
319 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
320 & cmd->resp[0])
321 value = -EIO;
322 /* else R1_SPI_IDLE, "it's resetting" */
323 }
324
325 switch (mmc_spi_resp_type(cmd)) {
326
327 /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
328 * and less-common stuff like various erase operations.
329 */
330 case MMC_RSP_SPI_R1B:
331 /* maybe we read all the busy tokens already */
332 while (cp < end && *cp == 0)
333 cp++;
334 if (cp == end)
335 mmc_spi_wait_unbusy(host, r1b_timeout);
336 break;
337
338 /* SPI R2 == R1 + second status byte; SEND_STATUS
339 * SPI R5 == R1 + data byte; IO_RW_DIRECT
340 */
341 case MMC_RSP_SPI_R2:
342 cmd->resp[0] |= *cp << 8;
343 break;
344
345 /* SPI R3, R4, or R7 == R1 + 4 bytes */
346 case MMC_RSP_SPI_R3:
Harvey Harrison48b2cf92008-04-29 01:03:34 -0700347 cmd->resp[1] = get_unaligned_be32(cp);
David Brownell15a05802007-08-08 09:12:54 -0700348 break;
349
350 /* SPI R1 == just one status byte */
351 case MMC_RSP_SPI_R1:
352 break;
353
354 default:
355 dev_dbg(&host->spi->dev, "bad response type %04x\n",
356 mmc_spi_resp_type(cmd));
357 if (value >= 0)
358 value = -EINVAL;
359 goto done;
360 }
361
362 if (value < 0)
363 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
364 tag, cmd->resp[0], cmd->resp[1]);
365
366 /* disable chipselect on errors and some success cases */
367 if (value >= 0 && cs_on)
368 return value;
369done:
370 if (value < 0)
371 cmd->error = value;
372 mmc_cs_off(host);
373 return value;
374}
375
376/* Issue command and read its response.
377 * Returns zero on success, negative for error.
378 *
379 * On error, caller must cope with mmc core retry mechanism. That
380 * means immediate low-level resubmit, which affects the bus lock...
381 */
382static int
383mmc_spi_command_send(struct mmc_spi_host *host,
384 struct mmc_request *mrq,
385 struct mmc_command *cmd, int cs_on)
386{
387 struct scratch *data = host->data;
388 u8 *cp = data->status;
389 u32 arg = cmd->arg;
390 int status;
391 struct spi_transfer *t;
392
393 /* We can handle most commands (except block reads) in one full
394 * duplex I/O operation before either starting the next transfer
395 * (data block or command) or else deselecting the card.
396 *
397 * First, write 7 bytes:
398 * - an all-ones byte to ensure the card is ready
399 * - opcode byte (plus start and transmission bits)
400 * - four bytes of big-endian argument
401 * - crc7 (plus end bit) ... always computed, it's cheap
402 *
403 * We init the whole buffer to all-ones, which is what we need
404 * to write while we're reading (later) response data.
405 */
406 memset(cp++, 0xff, sizeof(data->status));
407
408 *cp++ = 0x40 | cmd->opcode;
409 *cp++ = (u8)(arg >> 24);
410 *cp++ = (u8)(arg >> 16);
411 *cp++ = (u8)(arg >> 8);
412 *cp++ = (u8)arg;
413 *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
414
415 /* Then, read up to 13 bytes (while writing all-ones):
416 * - N(CR) (== 1..8) bytes of all-ones
417 * - status byte (for all response types)
418 * - the rest of the response, either:
419 * + nothing, for R1 or R1B responses
420 * + second status byte, for R2 responses
421 * + four data bytes, for R3 and R7 responses
422 *
423 * Finally, read some more bytes ... in the nice cases we know in
424 * advance how many, and reading 1 more is always OK:
425 * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
426 * - N(RC) (== 1..N) bytes of all-ones, before next command
427 * - N(WR) (== 1..N) bytes of all-ones, before data write
428 *
429 * So in those cases one full duplex I/O of at most 21 bytes will
430 * handle the whole command, leaving the card ready to receive a
431 * data block or new command. We do that whenever we can, shaving
432 * CPU and IRQ costs (especially when using DMA or FIFOs).
433 *
434 * There are two other cases, where it's not generally practical
435 * to rely on a single I/O:
436 *
437 * - R1B responses need at least N(EC) bytes of all-zeroes.
438 *
439 * In this case we can *try* to fit it into one I/O, then
440 * maybe read more data later.
441 *
442 * - Data block reads are more troublesome, since a variable
443 * number of padding bytes precede the token and data.
444 * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
445 * + N(AC) (== 1..many) bytes of all-ones
446 *
447 * In this case we currently only have minimal speedups here:
448 * when N(CR) == 1 we can avoid I/O in response_get().
449 */
450 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
451 cp += 2; /* min(N(CR)) + status */
452 /* R1 */
453 } else {
454 cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */
455 if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */
456 cp++;
457 else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */
458 cp += 4;
459 else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
460 cp = data->status + sizeof(data->status);
461 /* else: R1 (most commands) */
462 }
463
464 dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
465 cmd->opcode, maptype(cmd));
466
467 /* send command, leaving chipselect active */
468 spi_message_init(&host->m);
469
470 t = &host->t;
471 memset(t, 0, sizeof(*t));
472 t->tx_buf = t->rx_buf = data->status;
473 t->tx_dma = t->rx_dma = host->data_dma;
474 t->len = cp - data->status;
475 t->cs_change = 1;
476 spi_message_add_tail(t, &host->m);
477
478 if (host->dma_dev) {
479 host->m.is_dma_mapped = 1;
480 dma_sync_single_for_device(host->dma_dev,
481 host->data_dma, sizeof(*host->data),
482 DMA_BIDIRECTIONAL);
483 }
484 status = spi_sync(host->spi, &host->m);
David Brownell15a05802007-08-08 09:12:54 -0700485
486 if (host->dma_dev)
487 dma_sync_single_for_cpu(host->dma_dev,
488 host->data_dma, sizeof(*host->data),
489 DMA_BIDIRECTIONAL);
490 if (status < 0) {
491 dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
492 cmd->error = status;
493 return status;
494 }
495
496 /* after no-data commands and STOP_TRANSMISSION, chipselect off */
497 return mmc_spi_response_get(host, cmd, cs_on);
498}
499
500/* Build data message with up to four separate transfers. For TX, we
501 * start by writing the data token. And in most cases, we finish with
502 * a status transfer.
503 *
504 * We always provide TX data for data and CRC. The MMC/SD protocol
505 * requires us to write ones; but Linux defaults to writing zeroes;
506 * so we explicitly initialize it to all ones on RX paths.
507 *
508 * We also handle DMA mapping, so the underlying SPI controller does
509 * not need to (re)do it for each message.
510 */
511static void
512mmc_spi_setup_data_message(
513 struct mmc_spi_host *host,
514 int multiple,
515 enum dma_data_direction direction)
516{
517 struct spi_transfer *t;
518 struct scratch *scratch = host->data;
519 dma_addr_t dma = host->data_dma;
520
521 spi_message_init(&host->m);
522 if (dma)
523 host->m.is_dma_mapped = 1;
524
525 /* for reads, readblock() skips 0xff bytes before finding
526 * the token; for writes, this transfer issues that token.
527 */
528 if (direction == DMA_TO_DEVICE) {
529 t = &host->token;
530 memset(t, 0, sizeof(*t));
531 t->len = 1;
532 if (multiple)
533 scratch->data_token = SPI_TOKEN_MULTI_WRITE;
534 else
535 scratch->data_token = SPI_TOKEN_SINGLE;
536 t->tx_buf = &scratch->data_token;
537 if (dma)
538 t->tx_dma = dma + offsetof(struct scratch, data_token);
539 spi_message_add_tail(t, &host->m);
540 }
541
542 /* Body of transfer is buffer, then CRC ...
543 * either TX-only, or RX with TX-ones.
544 */
545 t = &host->t;
546 memset(t, 0, sizeof(*t));
547 t->tx_buf = host->ones;
548 t->tx_dma = host->ones_dma;
549 /* length and actual buffer info are written later */
550 spi_message_add_tail(t, &host->m);
551
552 t = &host->crc;
553 memset(t, 0, sizeof(*t));
554 t->len = 2;
555 if (direction == DMA_TO_DEVICE) {
556 /* the actual CRC may get written later */
557 t->tx_buf = &scratch->crc_val;
558 if (dma)
559 t->tx_dma = dma + offsetof(struct scratch, crc_val);
560 } else {
561 t->tx_buf = host->ones;
562 t->tx_dma = host->ones_dma;
563 t->rx_buf = &scratch->crc_val;
564 if (dma)
565 t->rx_dma = dma + offsetof(struct scratch, crc_val);
566 }
567 spi_message_add_tail(t, &host->m);
568
569 /*
570 * A single block read is followed by N(EC) [0+] all-ones bytes
571 * before deselect ... don't bother.
572 *
573 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
574 * the next block is read, or a STOP_TRANSMISSION is issued. We'll
575 * collect that single byte, so readblock() doesn't need to.
576 *
577 * For a write, the one-byte data response follows immediately, then
578 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
579 * Then single block reads may deselect, and multiblock ones issue
580 * the next token (next data block, or STOP_TRAN). We can try to
581 * minimize I/O ops by using a single read to collect end-of-busy.
582 */
583 if (multiple || direction == DMA_TO_DEVICE) {
584 t = &host->early_status;
585 memset(t, 0, sizeof(*t));
586 t->len = (direction == DMA_TO_DEVICE)
587 ? sizeof(scratch->status)
588 : 1;
589 t->tx_buf = host->ones;
590 t->tx_dma = host->ones_dma;
591 t->rx_buf = scratch->status;
592 if (dma)
593 t->rx_dma = dma + offsetof(struct scratch, status);
594 t->cs_change = 1;
595 spi_message_add_tail(t, &host->m);
596 }
597}
598
599/*
600 * Write one block:
601 * - caller handled preceding N(WR) [1+] all-ones bytes
602 * - data block
603 * + token
604 * + data bytes
605 * + crc16
606 * - an all-ones byte ... card writes a data-response byte
607 * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
608 *
609 * Return negative errno, else success.
610 */
611static int
Matthew Fleming162350e2008-10-02 12:21:42 +0100612mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100613 unsigned long timeout)
David Brownell15a05802007-08-08 09:12:54 -0700614{
615 struct spi_device *spi = host->spi;
616 int status, i;
617 struct scratch *scratch = host->data;
Wolfgang Mueesf079a8f2009-03-16 12:23:03 +0100618 u32 pattern;
David Brownell15a05802007-08-08 09:12:54 -0700619
620 if (host->mmc->use_spi_crc)
621 scratch->crc_val = cpu_to_be16(
622 crc_itu_t(0, t->tx_buf, t->len));
623 if (host->dma_dev)
624 dma_sync_single_for_device(host->dma_dev,
625 host->data_dma, sizeof(*scratch),
626 DMA_BIDIRECTIONAL);
627
628 status = spi_sync(spi, &host->m);
David Brownell15a05802007-08-08 09:12:54 -0700629
630 if (status != 0) {
631 dev_dbg(&spi->dev, "write error (%d)\n", status);
632 return status;
633 }
634
635 if (host->dma_dev)
636 dma_sync_single_for_cpu(host->dma_dev,
637 host->data_dma, sizeof(*scratch),
638 DMA_BIDIRECTIONAL);
639
640 /*
641 * Get the transmission data-response reply. It must follow
642 * immediately after the data block we transferred. This reply
643 * doesn't necessarily tell whether the write operation succeeded;
644 * it just says if the transmission was ok and whether *earlier*
645 * writes succeeded; see the standard.
Wolfgang Mueesf079a8f2009-03-16 12:23:03 +0100646 *
647 * In practice, there are (even modern SDHC-)cards which are late
648 * in sending the response, and miss the time frame by a few bits,
649 * so we have to cope with this situation and check the response
650 * bit-by-bit. Arggh!!!
David Brownell15a05802007-08-08 09:12:54 -0700651 */
Wolfgang Mueesf079a8f2009-03-16 12:23:03 +0100652 pattern = scratch->status[0] << 24;
653 pattern |= scratch->status[1] << 16;
654 pattern |= scratch->status[2] << 8;
655 pattern |= scratch->status[3];
656
657 /* First 3 bit of pattern are undefined */
658 pattern |= 0xE0000000;
659
660 /* left-adjust to leading 0 bit */
661 while (pattern & 0x80000000)
662 pattern <<= 1;
663 /* right-adjust for pattern matching. Code is in bit 4..0 now. */
664 pattern >>= 27;
665
666 switch (pattern) {
David Brownell15a05802007-08-08 09:12:54 -0700667 case SPI_RESPONSE_ACCEPTED:
668 status = 0;
669 break;
670 case SPI_RESPONSE_CRC_ERR:
671 /* host shall then issue MMC_STOP_TRANSMISSION */
672 status = -EILSEQ;
673 break;
674 case SPI_RESPONSE_WRITE_ERR:
675 /* host shall then issue MMC_STOP_TRANSMISSION,
676 * and should MMC_SEND_STATUS to sort it out
677 */
678 status = -EIO;
679 break;
680 default:
681 status = -EPROTO;
682 break;
683 }
684 if (status != 0) {
685 dev_dbg(&spi->dev, "write error %02x (%d)\n",
686 scratch->status[0], status);
687 return status;
688 }
689
690 t->tx_buf += t->len;
691 if (host->dma_dev)
692 t->tx_dma += t->len;
693
694 /* Return when not busy. If we didn't collect that status yet,
695 * we'll need some more I/O.
696 */
Wolfgang Mueesf079a8f2009-03-16 12:23:03 +0100697 for (i = 4; i < sizeof(scratch->status); i++) {
698 /* card is non-busy if the most recent bit is 1 */
699 if (scratch->status[i] & 0x01)
David Brownell15a05802007-08-08 09:12:54 -0700700 return 0;
701 }
Matthew Fleming162350e2008-10-02 12:21:42 +0100702 return mmc_spi_wait_unbusy(host, timeout);
David Brownell15a05802007-08-08 09:12:54 -0700703}
704
705/*
706 * Read one block:
707 * - skip leading all-ones bytes ... either
708 * + N(AC) [1..f(clock,CSD)] usually, else
709 * + N(CX) [0..8] when reading CSD or CID
710 * - data block
711 * + token ... if error token, no data or crc
712 * + data bytes
713 * + crc16
714 *
715 * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
716 * before dropping chipselect.
717 *
718 * For multiblock reads, caller either reads the next block or issues a
719 * STOP_TRANSMISSION command.
720 */
721static int
Matthew Fleming162350e2008-10-02 12:21:42 +0100722mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100723 unsigned long timeout)
David Brownell15a05802007-08-08 09:12:54 -0700724{
725 struct spi_device *spi = host->spi;
726 int status;
727 struct scratch *scratch = host->data;
728
729 /* At least one SD card sends an all-zeroes byte when N(CX)
730 * applies, before the all-ones bytes ... just cope with that.
731 */
732 status = mmc_spi_readbytes(host, 1);
733 if (status < 0)
734 return status;
735 status = scratch->status[0];
736 if (status == 0xff || status == 0)
Matthew Fleming162350e2008-10-02 12:21:42 +0100737 status = mmc_spi_readtoken(host, timeout);
David Brownell15a05802007-08-08 09:12:54 -0700738
739 if (status == SPI_TOKEN_SINGLE) {
740 if (host->dma_dev) {
741 dma_sync_single_for_device(host->dma_dev,
742 host->data_dma, sizeof(*scratch),
743 DMA_BIDIRECTIONAL);
744 dma_sync_single_for_device(host->dma_dev,
745 t->rx_dma, t->len,
746 DMA_FROM_DEVICE);
747 }
748
749 status = spi_sync(spi, &host->m);
David Brownell15a05802007-08-08 09:12:54 -0700750
751 if (host->dma_dev) {
752 dma_sync_single_for_cpu(host->dma_dev,
753 host->data_dma, sizeof(*scratch),
754 DMA_BIDIRECTIONAL);
755 dma_sync_single_for_cpu(host->dma_dev,
756 t->rx_dma, t->len,
757 DMA_FROM_DEVICE);
758 }
759
760 } else {
761 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
762
763 /* we've read extra garbage, timed out, etc */
764 if (status < 0)
765 return status;
766
767 /* low four bits are an R2 subset, fifth seems to be
768 * vendor specific ... map them all to generic error..
769 */
770 return -EIO;
771 }
772
773 if (host->mmc->use_spi_crc) {
774 u16 crc = crc_itu_t(0, t->rx_buf, t->len);
775
776 be16_to_cpus(&scratch->crc_val);
777 if (scratch->crc_val != crc) {
778 dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
779 "computed=0x%04x len=%d\n",
780 scratch->crc_val, crc, t->len);
781 return -EILSEQ;
782 }
783 }
784
785 t->rx_buf += t->len;
786 if (host->dma_dev)
787 t->rx_dma += t->len;
788
789 return 0;
790}
791
792/*
793 * An MMC/SD data stage includes one or more blocks, optional CRCs,
794 * and inline handshaking. That handhaking makes it unlike most
795 * other SPI protocol stacks.
796 */
797static void
798mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
799 struct mmc_data *data, u32 blk_size)
800{
801 struct spi_device *spi = host->spi;
802 struct device *dma_dev = host->dma_dev;
803 struct spi_transfer *t;
804 enum dma_data_direction direction;
805 struct scatterlist *sg;
806 unsigned n_sg;
807 int multiple = (data->blocks > 1);
Matthew Fleming162350e2008-10-02 12:21:42 +0100808 u32 clock_rate;
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100809 unsigned long timeout;
David Brownell15a05802007-08-08 09:12:54 -0700810
811 if (data->flags & MMC_DATA_READ)
812 direction = DMA_FROM_DEVICE;
813 else
814 direction = DMA_TO_DEVICE;
815 mmc_spi_setup_data_message(host, multiple, direction);
816 t = &host->t;
817
Matthew Fleming162350e2008-10-02 12:21:42 +0100818 if (t->speed_hz)
819 clock_rate = t->speed_hz;
820 else
821 clock_rate = spi->max_speed_hz;
822
Wolfgang Muees56e303e2009-04-07 15:26:30 +0100823 timeout = data->timeout_ns +
824 data->timeout_clks * 1000000 / clock_rate;
825 timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
Matthew Fleming162350e2008-10-02 12:21:42 +0100826
David Brownell15a05802007-08-08 09:12:54 -0700827 /* Handle scatterlist segments one at a time, with synch for
828 * each 512-byte block
829 */
830 for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
831 int status = 0;
832 dma_addr_t dma_addr = 0;
833 void *kmap_addr;
834 unsigned length = sg->length;
835 enum dma_data_direction dir = direction;
836
837 /* set up dma mapping for controller drivers that might
838 * use DMA ... though they may fall back to PIO
839 */
840 if (dma_dev) {
841 /* never invalidate whole *shared* pages ... */
842 if ((sg->offset != 0 || length != PAGE_SIZE)
843 && dir == DMA_FROM_DEVICE)
844 dir = DMA_BIDIRECTIONAL;
845
Jens Axboe45711f12007-10-22 21:19:53 +0200846 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
David Brownell15a05802007-08-08 09:12:54 -0700847 PAGE_SIZE, dir);
848 if (direction == DMA_TO_DEVICE)
849 t->tx_dma = dma_addr + sg->offset;
850 else
851 t->rx_dma = dma_addr + sg->offset;
852 }
853
854 /* allow pio too; we don't allow highmem */
Jens Axboe45711f12007-10-22 21:19:53 +0200855 kmap_addr = kmap(sg_page(sg));
David Brownell15a05802007-08-08 09:12:54 -0700856 if (direction == DMA_TO_DEVICE)
857 t->tx_buf = kmap_addr + sg->offset;
858 else
859 t->rx_buf = kmap_addr + sg->offset;
860
861 /* transfer each block, and update request status */
862 while (length) {
863 t->len = min(length, blk_size);
864
865 dev_dbg(&host->spi->dev,
866 " mmc_spi: %s block, %d bytes\n",
867 (direction == DMA_TO_DEVICE)
868 ? "write"
869 : "read",
870 t->len);
871
872 if (direction == DMA_TO_DEVICE)
Matthew Fleming162350e2008-10-02 12:21:42 +0100873 status = mmc_spi_writeblock(host, t, timeout);
David Brownell15a05802007-08-08 09:12:54 -0700874 else
Matthew Fleming162350e2008-10-02 12:21:42 +0100875 status = mmc_spi_readblock(host, t, timeout);
David Brownell15a05802007-08-08 09:12:54 -0700876 if (status < 0)
877 break;
878
879 data->bytes_xfered += t->len;
880 length -= t->len;
881
882 if (!multiple)
883 break;
884 }
885
886 /* discard mappings */
887 if (direction == DMA_FROM_DEVICE)
Jens Axboe45711f12007-10-22 21:19:53 +0200888 flush_kernel_dcache_page(sg_page(sg));
889 kunmap(sg_page(sg));
David Brownell15a05802007-08-08 09:12:54 -0700890 if (dma_dev)
891 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
892
893 if (status < 0) {
894 data->error = status;
895 dev_dbg(&spi->dev, "%s status %d\n",
896 (direction == DMA_TO_DEVICE)
897 ? "write" : "read",
898 status);
899 break;
900 }
901 }
902
903 /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
904 * can be issued before multiblock writes. Unlike its more widely
905 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
906 * that can affect the STOP_TRAN logic. Complete (and current)
907 * MMC specs should sort that out before Linux starts using CMD23.
908 */
909 if (direction == DMA_TO_DEVICE && multiple) {
910 struct scratch *scratch = host->data;
911 int tmp;
912 const unsigned statlen = sizeof(scratch->status);
913
914 dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
915
916 /* Tweak the per-block message we set up earlier by morphing
917 * it to hold single buffer with the token followed by some
918 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
919 * "not busy any longer" status, and leave chip selected.
920 */
921 INIT_LIST_HEAD(&host->m.transfers);
922 list_add(&host->early_status.transfer_list,
923 &host->m.transfers);
924
925 memset(scratch->status, 0xff, statlen);
926 scratch->status[0] = SPI_TOKEN_STOP_TRAN;
927
928 host->early_status.tx_buf = host->early_status.rx_buf;
929 host->early_status.tx_dma = host->early_status.rx_dma;
930 host->early_status.len = statlen;
931
932 if (host->dma_dev)
933 dma_sync_single_for_device(host->dma_dev,
934 host->data_dma, sizeof(*scratch),
935 DMA_BIDIRECTIONAL);
936
937 tmp = spi_sync(spi, &host->m);
David Brownell15a05802007-08-08 09:12:54 -0700938
939 if (host->dma_dev)
940 dma_sync_single_for_cpu(host->dma_dev,
941 host->data_dma, sizeof(*scratch),
942 DMA_BIDIRECTIONAL);
943
944 if (tmp < 0) {
945 if (!data->error)
946 data->error = tmp;
947 return;
948 }
949
950 /* Ideally we collected "not busy" status with one I/O,
951 * avoiding wasteful byte-at-a-time scanning... but more
952 * I/O is often needed.
953 */
954 for (tmp = 2; tmp < statlen; tmp++) {
955 if (scratch->status[tmp] != 0)
956 return;
957 }
Matthew Fleming162350e2008-10-02 12:21:42 +0100958 tmp = mmc_spi_wait_unbusy(host, timeout);
David Brownell15a05802007-08-08 09:12:54 -0700959 if (tmp < 0 && !data->error)
960 data->error = tmp;
961 }
962}
963
964/****************************************************************************/
965
966/*
967 * MMC driver implementation -- the interface to the MMC stack
968 */
969
970static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
971{
972 struct mmc_spi_host *host = mmc_priv(mmc);
973 int status = -EINVAL;
974
975#ifdef DEBUG
976 /* MMC core and layered drivers *MUST* issue SPI-aware commands */
977 {
978 struct mmc_command *cmd;
979 int invalid = 0;
980
981 cmd = mrq->cmd;
982 if (!mmc_spi_resp_type(cmd)) {
983 dev_dbg(&host->spi->dev, "bogus command\n");
984 cmd->error = -EINVAL;
985 invalid = 1;
986 }
987
988 cmd = mrq->stop;
989 if (cmd && !mmc_spi_resp_type(cmd)) {
990 dev_dbg(&host->spi->dev, "bogus STOP command\n");
991 cmd->error = -EINVAL;
992 invalid = 1;
993 }
994
995 if (invalid) {
996 dump_stack();
997 mmc_request_done(host->mmc, mrq);
998 return;
999 }
1000 }
1001#endif
1002
1003 /* issue command; then optionally data and stop */
1004 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1005 if (status == 0 && mrq->data) {
1006 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1007 if (mrq->stop)
1008 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1009 else
1010 mmc_cs_off(host);
1011 }
1012
1013 mmc_request_done(host->mmc, mrq);
1014}
1015
1016/* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
1017 *
1018 * NOTE that here we can't know that the card has just been powered up;
1019 * not all MMC/SD sockets support power switching.
1020 *
1021 * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
1022 * this doesn't seem to do the right thing at all...
1023 */
1024static void mmc_spi_initsequence(struct mmc_spi_host *host)
1025{
1026 /* Try to be very sure any previous command has completed;
1027 * wait till not-busy, skip debris from any old commands.
1028 */
1029 mmc_spi_wait_unbusy(host, r1b_timeout);
1030 mmc_spi_readbytes(host, 10);
1031
1032 /*
1033 * Do a burst with chipselect active-high. We need to do this to
1034 * meet the requirement of 74 clock cycles with both chipselect
1035 * and CMD (MOSI) high before CMD0 ... after the card has been
1036 * powered up to Vdd(min), and so is ready to take commands.
1037 *
1038 * Some cards are particularly needy of this (e.g. Viking "SD256")
1039 * while most others don't seem to care.
1040 *
1041 * Note that this is one of the places MMC/SD plays games with the
1042 * SPI protocol. Another is that when chipselect is released while
1043 * the card returns BUSY status, the clock must issue several cycles
1044 * with chipselect high before the card will stop driving its output.
1045 */
1046 host->spi->mode |= SPI_CS_HIGH;
1047 if (spi_setup(host->spi) != 0) {
1048 /* Just warn; most cards work without it. */
1049 dev_warn(&host->spi->dev,
1050 "can't change chip-select polarity\n");
1051 host->spi->mode &= ~SPI_CS_HIGH;
1052 } else {
1053 mmc_spi_readbytes(host, 18);
1054
1055 host->spi->mode &= ~SPI_CS_HIGH;
1056 if (spi_setup(host->spi) != 0) {
1057 /* Wot, we can't get the same setup we had before? */
1058 dev_err(&host->spi->dev,
1059 "can't restore chip-select polarity\n");
1060 }
1061 }
1062}
1063
1064static char *mmc_powerstring(u8 power_mode)
1065{
1066 switch (power_mode) {
1067 case MMC_POWER_OFF: return "off";
1068 case MMC_POWER_UP: return "up";
1069 case MMC_POWER_ON: return "on";
1070 }
1071 return "?";
1072}
1073
1074static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1075{
1076 struct mmc_spi_host *host = mmc_priv(mmc);
1077
1078 if (host->power_mode != ios->power_mode) {
1079 int canpower;
1080
1081 canpower = host->pdata && host->pdata->setpower;
1082
1083 dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
1084 mmc_powerstring(ios->power_mode),
1085 ios->vdd,
1086 canpower ? ", can switch" : "");
1087
1088 /* switch power on/off if possible, accounting for
1089 * max 250msec powerup time if needed.
1090 */
1091 if (canpower) {
1092 switch (ios->power_mode) {
1093 case MMC_POWER_OFF:
1094 case MMC_POWER_UP:
1095 host->pdata->setpower(&host->spi->dev,
1096 ios->vdd);
1097 if (ios->power_mode == MMC_POWER_UP)
1098 msleep(host->powerup_msecs);
1099 }
1100 }
1101
1102 /* See 6.4.1 in the simplified SD card physical spec 2.0 */
1103 if (ios->power_mode == MMC_POWER_ON)
1104 mmc_spi_initsequence(host);
1105
1106 /* If powering down, ground all card inputs to avoid power
1107 * delivery from data lines! On a shared SPI bus, this
1108 * will probably be temporary; 6.4.2 of the simplified SD
1109 * spec says this must last at least 1msec.
1110 *
1111 * - Clock low means CPOL 0, e.g. mode 0
1112 * - MOSI low comes from writing zero
1113 * - Chipselect is usually active low...
1114 */
1115 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1116 int mres;
Jan Nikitenko1685a032008-07-24 01:27:07 +02001117 u8 nullbyte = 0;
David Brownell15a05802007-08-08 09:12:54 -07001118
1119 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1120 mres = spi_setup(host->spi);
1121 if (mres < 0)
1122 dev_dbg(&host->spi->dev,
1123 "switch to SPI mode 0 failed\n");
1124
Jan Nikitenko1685a032008-07-24 01:27:07 +02001125 if (spi_write(host->spi, &nullbyte, 1) < 0)
David Brownell15a05802007-08-08 09:12:54 -07001126 dev_dbg(&host->spi->dev,
1127 "put spi signals to low failed\n");
1128
1129 /*
1130 * Now clock should be low due to spi mode 0;
1131 * MOSI should be low because of written 0x00;
1132 * chipselect should be low (it is active low)
1133 * power supply is off, so now MMC is off too!
1134 *
1135 * FIXME no, chipselect can be high since the
1136 * device is inactive and SPI_CS_HIGH is clear...
1137 */
1138 msleep(10);
1139 if (mres == 0) {
1140 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1141 mres = spi_setup(host->spi);
1142 if (mres < 0)
1143 dev_dbg(&host->spi->dev,
1144 "switch back to SPI mode 3"
1145 " failed\n");
1146 }
1147 }
1148
1149 host->power_mode = ios->power_mode;
1150 }
1151
1152 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1153 int status;
1154
1155 host->spi->max_speed_hz = ios->clock;
1156 status = spi_setup(host->spi);
1157 dev_dbg(&host->spi->dev,
1158 "mmc_spi: clock to %d Hz, %d\n",
1159 host->spi->max_speed_hz, status);
1160 }
1161}
1162
1163static int mmc_spi_get_ro(struct mmc_host *mmc)
1164{
1165 struct mmc_spi_host *host = mmc_priv(mmc);
1166
1167 if (host->pdata && host->pdata->get_ro)
Anton Vorontsov08f80bb2008-06-17 18:17:39 +04001168 return !!host->pdata->get_ro(mmc->parent);
1169 /*
1170 * Board doesn't support read only detection; let the mmc core
1171 * decide what to do.
1172 */
1173 return -ENOSYS;
David Brownell15a05802007-08-08 09:12:54 -07001174}
1175
Anton Vorontsov619ef4b2008-06-17 18:17:21 +04001176static int mmc_spi_get_cd(struct mmc_host *mmc)
1177{
1178 struct mmc_spi_host *host = mmc_priv(mmc);
1179
1180 if (host->pdata && host->pdata->get_cd)
1181 return !!host->pdata->get_cd(mmc->parent);
1182 return -ENOSYS;
1183}
David Brownell15a05802007-08-08 09:12:54 -07001184
1185static const struct mmc_host_ops mmc_spi_ops = {
1186 .request = mmc_spi_request,
1187 .set_ios = mmc_spi_set_ios,
1188 .get_ro = mmc_spi_get_ro,
Anton Vorontsov619ef4b2008-06-17 18:17:21 +04001189 .get_cd = mmc_spi_get_cd,
David Brownell15a05802007-08-08 09:12:54 -07001190};
1191
1192
1193/****************************************************************************/
1194
1195/*
1196 * SPI driver implementation
1197 */
1198
1199static irqreturn_t
1200mmc_spi_detect_irq(int irq, void *mmc)
1201{
1202 struct mmc_spi_host *host = mmc_priv(mmc);
1203 u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1204
1205 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1206 return IRQ_HANDLED;
1207}
1208
David Brownell460cd052007-10-27 14:47:20 +02001209struct count_children {
1210 unsigned n;
1211 struct bus_type *bus;
1212};
1213
1214static int maybe_count_child(struct device *dev, void *c)
1215{
1216 struct count_children *ccp = c;
1217
1218 if (dev->bus == ccp->bus) {
1219 if (ccp->n)
1220 return -EBUSY;
1221 ccp->n++;
1222 }
1223 return 0;
1224}
1225
David Brownell15a05802007-08-08 09:12:54 -07001226static int mmc_spi_probe(struct spi_device *spi)
1227{
1228 void *ones;
1229 struct mmc_host *mmc;
1230 struct mmc_spi_host *host;
1231 int status;
1232
1233 /* MMC and SD specs only seem to care that sampling is on the
1234 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
Wolfgang Muees48881ca2009-03-11 14:13:15 +01001235 * should be legit. We'll use mode 0 since the steady state is 0,
1236 * which is appropriate for hotplugging, unless the platform data
1237 * specify mode 3 (if hardware is not compatible to mode 0).
David Brownell15a05802007-08-08 09:12:54 -07001238 */
Wolfgang Muees48881ca2009-03-11 14:13:15 +01001239 if (spi->mode != SPI_MODE_3)
1240 spi->mode = SPI_MODE_0;
David Brownell15a05802007-08-08 09:12:54 -07001241 spi->bits_per_word = 8;
1242
1243 status = spi_setup(spi);
1244 if (status < 0) {
1245 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1246 spi->mode, spi->max_speed_hz / 1000,
1247 status);
1248 return status;
1249 }
1250
David Brownell460cd052007-10-27 14:47:20 +02001251 /* We can use the bus safely iff nobody else will interfere with us.
1252 * Most commands consist of one SPI message to issue a command, then
1253 * several more to collect its response, then possibly more for data
1254 * transfer. Clocking access to other devices during that period will
1255 * corrupt the command execution.
1256 *
1257 * Until we have software primitives which guarantee non-interference,
1258 * we'll aim for a hardware-level guarantee.
1259 *
1260 * REVISIT we can't guarantee another device won't be added later...
David Brownell15a05802007-08-08 09:12:54 -07001261 */
1262 if (spi->master->num_chipselect > 1) {
David Brownell460cd052007-10-27 14:47:20 +02001263 struct count_children cc;
David Brownell15a05802007-08-08 09:12:54 -07001264
David Brownell460cd052007-10-27 14:47:20 +02001265 cc.n = 0;
1266 cc.bus = spi->dev.bus;
1267 status = device_for_each_child(spi->dev.parent, &cc,
1268 maybe_count_child);
David Brownell15a05802007-08-08 09:12:54 -07001269 if (status < 0) {
1270 dev_err(&spi->dev, "can't share SPI bus\n");
1271 return status;
1272 }
1273
David Brownell460cd052007-10-27 14:47:20 +02001274 dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n");
David Brownell15a05802007-08-08 09:12:54 -07001275 }
1276
1277 /* We need a supply of ones to transmit. This is the only time
1278 * the CPU touches these, so cache coherency isn't a concern.
1279 *
1280 * NOTE if many systems use more than one MMC-over-SPI connector
1281 * it'd save some memory to share this. That's evidently rare.
1282 */
1283 status = -ENOMEM;
1284 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1285 if (!ones)
1286 goto nomem;
1287 memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1288
1289 mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1290 if (!mmc)
1291 goto nomem;
1292
1293 mmc->ops = &mmc_spi_ops;
1294 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1295
Pierre Ossman23af6032008-07-06 01:10:27 +02001296 mmc->caps = MMC_CAP_SPI;
David Brownell15a05802007-08-08 09:12:54 -07001297
1298 /* SPI doesn't need the lowspeed device identification thing for
1299 * MMC or SD cards, since it never comes up in open drain mode.
1300 * That's good; some SPI masters can't handle very low speeds!
1301 *
1302 * However, low speed SDIO cards need not handle over 400 KHz;
1303 * that's the only reason not to use a few MHz for f_min (until
1304 * the upper layer reads the target frequency from the CSD).
1305 */
1306 mmc->f_min = 400000;
1307 mmc->f_max = spi->max_speed_hz;
1308
1309 host = mmc_priv(mmc);
1310 host->mmc = mmc;
1311 host->spi = spi;
1312
1313 host->ones = ones;
1314
1315 /* Platform data is used to hook up things like card sensing
1316 * and power switching gpios.
1317 */
Anton Vorontsov9c43df52008-12-30 18:15:28 +03001318 host->pdata = mmc_spi_get_pdata(spi);
David Brownell15a05802007-08-08 09:12:54 -07001319 if (host->pdata)
1320 mmc->ocr_avail = host->pdata->ocr_mask;
1321 if (!mmc->ocr_avail) {
1322 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1323 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1324 }
1325 if (host->pdata && host->pdata->setpower) {
1326 host->powerup_msecs = host->pdata->powerup_msecs;
1327 if (!host->powerup_msecs || host->powerup_msecs > 250)
1328 host->powerup_msecs = 250;
1329 }
1330
1331 dev_set_drvdata(&spi->dev, mmc);
1332
1333 /* preallocate dma buffers */
1334 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1335 if (!host->data)
1336 goto fail_nobuf1;
1337
Tony Jones49dce682007-10-16 01:27:48 -07001338 if (spi->master->dev.parent->dma_mask) {
1339 struct device *dev = spi->master->dev.parent;
David Brownell15a05802007-08-08 09:12:54 -07001340
1341 host->dma_dev = dev;
1342 host->ones_dma = dma_map_single(dev, ones,
1343 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1344 host->data_dma = dma_map_single(dev, host->data,
1345 sizeof(*host->data), DMA_BIDIRECTIONAL);
1346
1347 /* REVISIT in theory those map operations can fail... */
1348
1349 dma_sync_single_for_cpu(host->dma_dev,
1350 host->data_dma, sizeof(*host->data),
1351 DMA_BIDIRECTIONAL);
1352 }
1353
1354 /* setup message for status/busy readback */
1355 spi_message_init(&host->readback);
1356 host->readback.is_dma_mapped = (host->dma_dev != NULL);
1357
1358 spi_message_add_tail(&host->status, &host->readback);
1359 host->status.tx_buf = host->ones;
1360 host->status.tx_dma = host->ones_dma;
1361 host->status.rx_buf = &host->data->status;
1362 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1363 host->status.cs_change = 1;
1364
1365 /* register card detect irq */
1366 if (host->pdata && host->pdata->init) {
1367 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1368 if (status != 0)
1369 goto fail_glue_init;
1370 }
1371
Anton Vorontsov619ef4b2008-06-17 18:17:21 +04001372 /* pass platform capabilities, if any */
1373 if (host->pdata)
1374 mmc->caps |= host->pdata->caps;
1375
David Brownell15a05802007-08-08 09:12:54 -07001376 status = mmc_add_host(mmc);
1377 if (status != 0)
1378 goto fail_add_host;
1379
Anton Vorontsov619ef4b2008-06-17 18:17:21 +04001380 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
Kay Sieversd1b26862008-11-08 21:37:46 +01001381 dev_name(&mmc->class_dev),
David Brownell15a05802007-08-08 09:12:54 -07001382 host->dma_dev ? "" : ", no DMA",
1383 (host->pdata && host->pdata->get_ro)
1384 ? "" : ", no WP",
1385 (host->pdata && host->pdata->setpower)
Anton Vorontsov619ef4b2008-06-17 18:17:21 +04001386 ? "" : ", no poweroff",
1387 (mmc->caps & MMC_CAP_NEEDS_POLL)
1388 ? ", cd polling" : "");
David Brownell15a05802007-08-08 09:12:54 -07001389 return 0;
1390
1391fail_add_host:
1392 mmc_remove_host (mmc);
1393fail_glue_init:
1394 if (host->dma_dev)
1395 dma_unmap_single(host->dma_dev, host->data_dma,
1396 sizeof(*host->data), DMA_BIDIRECTIONAL);
1397 kfree(host->data);
1398
1399fail_nobuf1:
1400 mmc_free_host(mmc);
Anton Vorontsov9c43df52008-12-30 18:15:28 +03001401 mmc_spi_put_pdata(spi);
David Brownell15a05802007-08-08 09:12:54 -07001402 dev_set_drvdata(&spi->dev, NULL);
1403
1404nomem:
1405 kfree(ones);
1406 return status;
1407}
1408
1409
1410static int __devexit mmc_spi_remove(struct spi_device *spi)
1411{
1412 struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
1413 struct mmc_spi_host *host;
1414
1415 if (mmc) {
1416 host = mmc_priv(mmc);
1417
1418 /* prevent new mmc_detect_change() calls */
1419 if (host->pdata && host->pdata->exit)
1420 host->pdata->exit(&spi->dev, mmc);
1421
1422 mmc_remove_host(mmc);
1423
1424 if (host->dma_dev) {
1425 dma_unmap_single(host->dma_dev, host->ones_dma,
1426 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1427 dma_unmap_single(host->dma_dev, host->data_dma,
1428 sizeof(*host->data), DMA_BIDIRECTIONAL);
1429 }
1430
1431 kfree(host->data);
1432 kfree(host->ones);
1433
1434 spi->max_speed_hz = mmc->f_max;
1435 mmc_free_host(mmc);
Anton Vorontsov9c43df52008-12-30 18:15:28 +03001436 mmc_spi_put_pdata(spi);
David Brownell15a05802007-08-08 09:12:54 -07001437 dev_set_drvdata(&spi->dev, NULL);
1438 }
1439 return 0;
1440}
1441
1442
1443static struct spi_driver mmc_spi_driver = {
1444 .driver = {
1445 .name = "mmc_spi",
1446 .bus = &spi_bus_type,
1447 .owner = THIS_MODULE,
1448 },
1449 .probe = mmc_spi_probe,
1450 .remove = __devexit_p(mmc_spi_remove),
1451};
1452
1453
1454static int __init mmc_spi_init(void)
1455{
1456 return spi_register_driver(&mmc_spi_driver);
1457}
1458module_init(mmc_spi_init);
1459
1460
1461static void __exit mmc_spi_exit(void)
1462{
1463 spi_unregister_driver(&mmc_spi_driver);
1464}
1465module_exit(mmc_spi_exit);
1466
1467
1468MODULE_AUTHOR("Mike Lavender, David Brownell, "
1469 "Hans-Peter Nilsson, Jan Nikitenko");
1470MODULE_DESCRIPTION("SPI SD/MMC host driver");
1471MODULE_LICENSE("GPL");