blob: 8e7b7ba341dc1e05528fedef05df7e5cb9af756f [file] [log] [blame]
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +08001/*
2 * SD/MMC Greybus driver.
3 *
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +01004 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +08006 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010011#include <linux/mmc/core.h>
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080012#include <linux/mmc/host.h>
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010013#include <linux/mmc/mmc.h>
14#include <linux/scatterlist.h>
15#include <linux/workqueue.h>
Alex Eldere1e9dbd2014-10-01 21:54:11 -050016
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080017#include "greybus.h"
18
Greg Kroah-Hartman199d68d2014-08-30 16:20:22 -070019struct gb_sdio_host {
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010020 struct gb_connection *connection;
21 u8 version_major;
22 u8 version_minor;
23 struct mmc_host *mmc;
24 struct mmc_request *mrq;
25 struct mutex lock; /* lock for this host */
26 size_t data_max;
27 void *xfer_buffer;
28 spinlock_t xfer; /* lock to cancel ongoing transfer */
29 bool xfer_stop;
30 struct work_struct mrqwork;
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +010031 u8 queued_events;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010032 bool removed;
33 bool card_present;
34 bool read_only;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080035};
36
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010037static struct workqueue_struct *gb_sdio_mrq_workqueue;
38
39/* Define get_version() routine */
40define_get_version(gb_sdio_host, SDIO);
41
Rui Miguel Silvaef0cc0e2015-07-02 19:11:30 +010042#define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE)
44#define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
45#define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
46 GB_SDIO_RSP_136)
47#define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
48 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
49
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010050static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080051{
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010052 u32 caps = 0;
53 u32 caps2 = 0;
54
55 caps = (r & GB_SDIO_CAP_NONREMOVABLE ? MMC_CAP_NONREMOVABLE : 0) |
56 (r & GB_SDIO_CAP_4_BIT_DATA ? MMC_CAP_4_BIT_DATA : 0) |
57 (r & GB_SDIO_CAP_8_BIT_DATA ? MMC_CAP_8_BIT_DATA : 0) |
58 (r & GB_SDIO_CAP_MMC_HS ? MMC_CAP_MMC_HIGHSPEED : 0) |
59 (r & GB_SDIO_CAP_SD_HS ? MMC_CAP_SD_HIGHSPEED : 0) |
60 (r & GB_SDIO_CAP_ERASE ? MMC_CAP_ERASE : 0) |
61 (r & GB_SDIO_CAP_1_2V_DDR ? MMC_CAP_1_2V_DDR : 0) |
62 (r & GB_SDIO_CAP_1_8V_DDR ? MMC_CAP_1_8V_DDR : 0) |
63 (r & GB_SDIO_CAP_POWER_OFF_CARD ? MMC_CAP_POWER_OFF_CARD : 0) |
64 (r & GB_SDIO_CAP_UHS_SDR12 ? MMC_CAP_UHS_SDR12 : 0) |
65 (r & GB_SDIO_CAP_UHS_SDR25 ? MMC_CAP_UHS_SDR25 : 0) |
66 (r & GB_SDIO_CAP_UHS_SDR50 ? MMC_CAP_UHS_SDR50 : 0) |
67 (r & GB_SDIO_CAP_UHS_SDR104 ? MMC_CAP_UHS_SDR104 : 0) |
68 (r & GB_SDIO_CAP_UHS_DDR50 ? MMC_CAP_UHS_DDR50 : 0) |
69 (r & GB_SDIO_CAP_DRIVER_TYPE_A ? MMC_CAP_DRIVER_TYPE_A : 0) |
70 (r & GB_SDIO_CAP_DRIVER_TYPE_C ? MMC_CAP_DRIVER_TYPE_C : 0) |
71 (r & GB_SDIO_CAP_DRIVER_TYPE_D ? MMC_CAP_DRIVER_TYPE_D : 0);
72
73 caps2 = (r & GB_SDIO_CAP_HS200_1_2V ? MMC_CAP2_HS200_1_2V_SDR : 0) |
Rui Miguel Silva5656ab92015-06-24 23:20:26 +010074#ifdef MMC_HS400_SUPPORTED
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010075 (r & GB_SDIO_CAP_HS400_1_2V ? MMC_CAP2_HS400_1_2V : 0) |
Rui Miguel Silva5656ab92015-06-24 23:20:26 +010076 (r & GB_SDIO_CAP_HS400_1_8V ? MMC_CAP2_HS400_1_8V : 0) |
77#endif
78 (r & GB_SDIO_CAP_HS200_1_8V ? MMC_CAP2_HS200_1_8V_SDR : 0);
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010079
80 host->mmc->caps = caps;
81 host->mmc->caps2 = caps2;
82
83 if (caps & MMC_CAP_NONREMOVABLE)
84 host->card_present = true;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080085}
86
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010087static int gb_sdio_get_caps(struct gb_sdio_host *host)
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080088{
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010089 struct gb_sdio_get_caps_response response;
90 struct mmc_host *mmc = host->mmc;
91 u16 data_max;
92 u32 blksz;
93 u32 r;
94 int ret;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +080095
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +010096 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
97 NULL, 0, &response, sizeof(response));
98 if (ret < 0)
99 return ret;
100 r = le32_to_cpu(response.caps);
101
102 _gb_sdio_set_host_caps(host, r);
103
104 /* get the max block size that could fit our payload */
105 data_max = gb_operation_get_payload_size_max(host->connection);
106 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
107 data_max - sizeof(struct gb_sdio_transfer_response));
108
109 blksz = min(le16_to_cpu(response.max_blk_size), data_max);
110 blksz = max_t(u32, 512, blksz);
111
112 mmc->max_blk_size = rounddown_pow_of_two(blksz);
113 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
114 host->data_max = data_max;
115
116 /* get ocr supported values */
117 mmc->ocr_avail = le32_to_cpu(response.ocr);
118 mmc->ocr_avail_sdio = mmc->ocr_avail;
119 mmc->ocr_avail_sd = mmc->ocr_avail;
120 mmc->ocr_avail_mmc = mmc->ocr_avail;
121
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800122 return 0;
123}
124
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +0100125static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
126{
127 if (event & GB_SDIO_CARD_INSERTED)
128 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
129 else if (event & GB_SDIO_CARD_REMOVED)
130 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
131
132 host->queued_events |= event;
133}
134
135static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
136{
137 u8 state_changed = 0;
138
139 if (event & GB_SDIO_CARD_INSERTED) {
140 if (!mmc_card_is_removable(host->mmc))
141 return 0;
142 if (host->card_present)
143 return 0;
144 host->card_present = true;
145 state_changed = 1;
146 }
147
148 if (event & GB_SDIO_CARD_REMOVED) {
149 if (!mmc_card_is_removable(host->mmc))
150 return 0;
151 if (!(host->card_present))
152 return 0;
153 host->card_present = false;
154 state_changed = 1;
155 }
156
157 if (event & GB_SDIO_WP) {
158 host->read_only = true;
159 }
160
161 if (state_changed) {
162 dev_info(mmc_dev(host->mmc), "card %s now event\n",
163 (host->card_present ? "inserted" : "removed"));
164 mmc_detect_change(host->mmc, 0);
165 }
166
167 return 0;
168}
169
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100170static int gb_sdio_event_recv(u8 type, struct gb_operation *op)
171{
172 struct gb_connection *connection = op->connection;
173 struct gb_sdio_host *host = connection->private;
174 struct gb_message *request;
175 struct gb_sdio_event_request *payload;
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +0100176 int ret = 0;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100177 u8 event;
178
179 if (type != GB_SDIO_TYPE_EVENT) {
180 dev_err(&connection->dev,
181 "unsupported unsolicited event: %u\n", type);
182 return -EINVAL;
183 }
184
185 request = op->request;
186
187 if (request->payload_size != sizeof(*payload)) {
188 dev_err(mmc_dev(host->mmc), "wrong event size received\n");
189 return -EINVAL;
190 }
191
192 payload = request->payload;
193 event = payload->event;
194
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +0100195 if (host->removed)
196 _gb_queue_event(host, event);
197 else
198 ret = _gb_sdio_process_events(host, event);
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100199
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +0100200 return ret;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100201}
202
203static int gb_sdio_set_ios(struct gb_sdio_host *host,
204 struct gb_sdio_set_ios_request *request)
205{
206 return gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS,
207 request, sizeof(*request), NULL, 0);
208}
209
210static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
211 size_t len, u16 nblocks, off_t skip)
212{
213 struct gb_sdio_transfer_request *request;
214 struct gb_sdio_transfer_response response;
215 struct scatterlist *sg = data->sg;
216 unsigned int sg_len = data->sg_len;
217 size_t copied;
218 u16 send_blksz;
219 u16 send_blocks;
220 int ret;
221
222 WARN_ON(len > host->data_max);
223
224 request = host->xfer_buffer;
225 request->data_flags = (data->flags >> 8);
226 request->data_blocks = cpu_to_le16(nblocks);
227 request->data_blksz = cpu_to_le16(data->blksz);
228
229 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0] + skip, len,
230 skip);
231
232 if (copied != len)
233 return -EINVAL;
234
235 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
236 request, len, &response, sizeof(response));
237 if (ret < 0)
238 return ret;
239
240 send_blocks = le16_to_cpu(response.data_blocks);
241 send_blksz = le16_to_cpu(response.data_blksz);
242
243 if (len != send_blksz * send_blocks)
244 return -EINVAL;
245
246 return ret;
247}
248
249static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
250 size_t len, u16 nblocks, off_t skip)
251{
252 struct gb_sdio_transfer_request request;
253 struct gb_sdio_transfer_response *response;
254 struct scatterlist *sg = data->sg;
255 unsigned int sg_len = data->sg_len;
256 size_t copied;
257 u16 recv_blksz;
258 u16 recv_blocks;
259 int ret;
260
261 WARN_ON(len > host->data_max);
262
263 request.data_flags = (data->flags >> 8);
264 request.data_blocks = cpu_to_le16(nblocks);
265 request.data_blksz = cpu_to_le16(data->blksz);
266
267 response = host->xfer_buffer;
268
269 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
270 &request, sizeof(request), response, len);
271 if (ret < 0)
272 return ret;
273
274 recv_blocks = le16_to_cpu(response->data_blocks);
275 recv_blksz = le16_to_cpu(response->data_blksz);
276
277 if (len != recv_blksz * recv_blocks)
278 return -EINVAL;
279
280 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0] + skip,
281 len, skip);
282 if (copied != len)
283 return -EINVAL;
284
285 return 0;
286}
287
Rui Miguel Silva882edf52015-07-02 19:11:34 +0100288static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100289{
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100290 size_t left, len;
291 off_t skip = 0;
292 int ret = 0;
293 u16 nblocks;
294
295 left = data->blksz * data->blocks;
296
297 while (left) {
298 /* check is a stop transmission is pending */
299 spin_lock(&host->xfer);
300 if (host->xfer_stop) {
301 host->xfer_stop = false;
302 spin_unlock(&host->xfer);
303 ret = -EINTR;
304 goto out;
305 }
306 spin_unlock(&host->xfer);
307 len = min(left, host->data_max);
308 nblocks = do_div(len, data->blksz);
309 len = nblocks * data->blksz;
310
311 if (data->flags & MMC_DATA_READ) {
312 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
313 if (ret < 0)
314 goto out;
315 } else {
316 ret = _gb_sdio_send(host, data, len, nblocks, skip);
317 if (ret < 0)
318 goto out;
319 }
320 data->bytes_xfered += len;
321 left -= len;
322 skip += len;
323 }
324
325out:
326 data->error = ret;
327 return ret;
328}
329
330static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
331{
332 struct gb_sdio_command_request request;
333 struct gb_sdio_command_response response;
334 u8 cmd_flags;
335 u8 cmd_type;
336 int i;
337 int ret = 0;
338
339 switch (mmc_resp_type(cmd)) {
340 case MMC_RSP_NONE:
341 cmd_flags = GB_SDIO_RSP_NONE;
342 break;
343 case MMC_RSP_R1:
344 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
345 break;
346 case MMC_RSP_R1B:
347 cmd_flags = GB_SDIO_RSP_R1B;
348 break;
349 case MMC_RSP_R2:
350 cmd_flags = GB_SDIO_RSP_R2;
351 break;
352 case MMC_RSP_R3:
353 cmd_flags = GB_SDIO_RSP_R3_R4;
354 default:
355 dev_err(mmc_dev(host->mmc), "cmd flag invalid %04x\n",
356 mmc_resp_type(cmd));
357 ret = -EINVAL;
358 goto out;
359 }
360
361 switch (mmc_cmd_type(cmd)) {
362 case MMC_CMD_BC:
363 cmd_type = GB_SDIO_CMD_BC;
364 break;
365 case MMC_CMD_BCR:
366 cmd_type = GB_SDIO_CMD_BCR;
367 break;
368 case MMC_CMD_AC:
369 cmd_type = GB_SDIO_CMD_AC;
370 break;
371 case MMC_CMD_ADTC:
372 cmd_type = GB_SDIO_CMD_ADTC;
373 break;
374 default:
375 dev_err(mmc_dev(host->mmc), "cmd type invalid %04x\n",
376 mmc_cmd_type(cmd));
377 ret = -EINVAL;
378 goto out;
379 }
380
381 request.cmd = cmd->opcode;
382 request.cmd_flags = cmd_flags;
383 request.cmd_type = cmd_type;
384 request.cmd_arg = cpu_to_le32(cmd->arg);
385
386 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
387 &request, sizeof(request), &response,
388 sizeof(response));
389 if (ret < 0)
390 goto out;
391
392 /* no response expected */
393 if (cmd_flags & GB_SDIO_RSP_NONE)
394 goto out;
395
396 /* long response expected */
397 if (cmd_flags & GB_SDIO_RSP_R2)
398 for (i = 0; i < 4; i++)
399 cmd->resp[i] = le32_to_cpu(response.resp[i]);
400 else
401 cmd->resp[0] = le32_to_cpu(response.resp[0]);
402
403out:
404 cmd->error = ret;
405 return ret;
406}
407
408static void gb_sdio_mrq_work(struct work_struct *work)
409{
410 struct gb_sdio_host *host;
411 struct mmc_request *mrq;
412 int ret;
413
414 host = container_of(work, struct gb_sdio_host, mrqwork);
415
416 mutex_lock(&host->lock);
Phong Tran93a99e82015-06-26 21:05:13 +0700417 mrq = host->mrq;
418 if (!mrq) {
419 mutex_unlock(&host->lock);
420 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
421 return;
422 }
423
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100424 if (host->removed) {
425 mrq->cmd->error = -ESHUTDOWN;
426 goto done;
427 }
428
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100429 if (mrq->sbc) {
430 ret = gb_sdio_command(host, mrq->sbc);
431 if (ret < 0)
432 goto done;
433 }
434
435 ret = gb_sdio_command(host, mrq->cmd);
436 if (ret < 0)
437 goto done;
438
439 if (mrq->data) {
Rui Miguel Silva882edf52015-07-02 19:11:34 +0100440 ret = gb_sdio_transfer(host, host->mrq->data);
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100441 if (ret < 0)
442 goto done;
443 }
444
Rui Miguel Silva7a5cd5a2015-07-02 19:11:33 +0100445 if (mrq->stop) {
446 ret = gb_sdio_command(host, mrq->stop);
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100447 if (ret < 0)
448 goto done;
449 }
450
451done:
Phong Tran93a99e82015-06-26 21:05:13 +0700452 host->mrq = NULL;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100453 mutex_unlock(&host->lock);
454 mmc_request_done(host->mmc, mrq);
455}
456
457static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
458{
459 struct gb_sdio_host *host = mmc_priv(mmc);
460 struct mmc_command *cmd = mrq->cmd;
461
462 /* Check if it is a cancel to ongoing transfer */
463 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
464 spin_lock(&host->xfer);
465 host->xfer_stop = true;
466 spin_unlock(&host->xfer);
467 }
468
469 mutex_lock(&host->lock);
470
471 WARN_ON(host->mrq);
472 host->mrq = mrq;
473
474 if (host->removed) {
475 mrq->cmd->error = -ESHUTDOWN;
476 goto out;
477 }
478 if (!host->card_present) {
479 mrq->cmd->error = -ENOMEDIUM;
480 goto out;
481 }
482
483 queue_work(gb_sdio_mrq_workqueue, &host->mrqwork);
484
485 mutex_unlock(&host->lock);
486 return;
487
488out:
489 host->mrq = NULL;
490 mutex_unlock(&host->lock);
491 mmc_request_done(mmc, mrq);
492}
493
494static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
495{
496 struct gb_sdio_host *host = mmc_priv(mmc);
497 struct gb_sdio_set_ios_request request;
498 int ret;
499 u8 power_mode;
500 u8 bus_width;
501 u8 timing;
502 u8 signal_voltage;
503 u8 drv_type;
504
505 mutex_lock(&host->lock);
506 request.clock = cpu_to_le32(ios->clock);
507 request.vdd = cpu_to_le32(1 << ios->vdd);
508
509 request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
510 GB_SDIO_BUSMODE_OPENDRAIN :
511 GB_SDIO_BUSMODE_PUSHPULL);
512
513 switch (ios->power_mode) {
514 case MMC_POWER_OFF:
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100515 default:
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100516 power_mode = GB_SDIO_POWER_OFF;
517 break;
518 case MMC_POWER_UP:
519 power_mode = GB_SDIO_POWER_UP;
520 break;
521 case MMC_POWER_ON:
522 power_mode = GB_SDIO_POWER_ON;
523 break;
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100524#ifdef MMC_POWER_UNDEFINED_SUPPORTED
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100525 case MMC_POWER_UNDEFINED:
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100526 power_mode = GB_SDIO_POWER_UNDEFINED;
527 break;
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100528#endif
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100529 }
530 request.power_mode = power_mode;
531
532 switch (ios->bus_width) {
533 case MMC_BUS_WIDTH_1:
534 bus_width = GB_SDIO_BUS_WIDTH_1;
535 break;
536 case MMC_BUS_WIDTH_4:
537 default:
538 bus_width = GB_SDIO_BUS_WIDTH_4;
539 break;
540 case MMC_BUS_WIDTH_8:
541 bus_width = GB_SDIO_BUS_WIDTH_8;
542 break;
543 }
544 request.bus_width = bus_width;
545
546 switch (ios->timing) {
547 case MMC_TIMING_LEGACY:
548 default:
549 timing = GB_SDIO_TIMING_LEGACY;
550 break;
551 case MMC_TIMING_MMC_HS:
552 timing = GB_SDIO_TIMING_MMC_HS;
553 break;
554 case MMC_TIMING_SD_HS:
555 timing = GB_SDIO_TIMING_SD_HS;
556 break;
557 case MMC_TIMING_UHS_SDR12:
558 timing = GB_SDIO_TIMING_UHS_SDR12;
559 break;
560 case MMC_TIMING_UHS_SDR25:
561 timing = GB_SDIO_TIMING_UHS_SDR25;
562 break;
563 case MMC_TIMING_UHS_SDR50:
564 timing = GB_SDIO_TIMING_UHS_SDR50;
565 break;
566 case MMC_TIMING_UHS_SDR104:
567 timing = GB_SDIO_TIMING_UHS_SDR104;
568 break;
569 case MMC_TIMING_UHS_DDR50:
570 timing = GB_SDIO_TIMING_UHS_DDR50;
571 break;
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100572#ifdef MMC_DDR52_DEFINED
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100573 case MMC_TIMING_MMC_DDR52:
574 timing = GB_SDIO_TIMING_MMC_DDR52;
575 break;
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100576#endif
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100577 case MMC_TIMING_MMC_HS200:
578 timing = GB_SDIO_TIMING_MMC_HS200;
579 break;
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100580#ifdef MMC_HS400_SUPPORTED
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100581 case MMC_TIMING_MMC_HS400:
582 timing = GB_SDIO_TIMING_MMC_HS400;
583 break;
Rui Miguel Silva5656ab92015-06-24 23:20:26 +0100584#endif
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100585 }
586 request.timing = timing;
587
588 switch (ios->signal_voltage) {
589 case MMC_SIGNAL_VOLTAGE_330:
590 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
591 break;
592 case MMC_SIGNAL_VOLTAGE_180:
593 default:
594 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
595 break;
596 case MMC_SIGNAL_VOLTAGE_120:
597 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
598 break;
599 }
600 request.signal_voltage = signal_voltage;
601
602 switch (ios->drv_type) {
603 case MMC_SET_DRIVER_TYPE_A:
604 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
605 break;
606 case MMC_SET_DRIVER_TYPE_C:
607 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
608 break;
609 case MMC_SET_DRIVER_TYPE_D:
610 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
611 break;
612 case MMC_SET_DRIVER_TYPE_B:
613 default:
614 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
615 break;
616 }
617 request.drv_type = drv_type;
618
619 ret = gb_sdio_set_ios(host, &request);
620 if (ret < 0)
621 goto out;
622
623 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
624
625out:
626 mutex_unlock(&host->lock);
627}
628
629static int gb_mmc_get_ro(struct mmc_host *mmc)
630{
631 struct gb_sdio_host *host = mmc_priv(mmc);
632
633 mutex_lock(&host->lock);
634 if (host->removed)
635 return -ESHUTDOWN;
636 mutex_unlock(&host->lock);
Rui Miguel Silva08ccc9b2015-07-02 19:11:32 +0100637 return host->read_only;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100638}
639
640static int gb_mmc_get_cd(struct mmc_host *mmc)
641{
642 struct gb_sdio_host *host = mmc_priv(mmc);
643
644 mutex_lock(&host->lock);
645 if (host->removed)
646 return -ESHUTDOWN;
647 mutex_unlock(&host->lock);
Rui Miguel Silva08ccc9b2015-07-02 19:11:32 +0100648 return host->card_present;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100649}
650
651static const struct mmc_host_ops gb_sdio_ops = {
652 .request = gb_mmc_request,
653 .set_ios = gb_mmc_set_ios,
654 .get_ro = gb_mmc_get_ro,
655 .get_cd = gb_mmc_get_cd,
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800656};
657
Greg Kroah-Hartmana2f47632014-10-28 10:17:09 +0800658static int gb_sdio_connection_init(struct gb_connection *connection)
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800659{
660 struct mmc_host *mmc;
Greg Kroah-Hartman199d68d2014-08-30 16:20:22 -0700661 struct gb_sdio_host *host;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100662 size_t max_buffer;
663 int ret = 0;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800664
Viresh Kumar64e69292014-11-19 17:24:58 +0530665 mmc = mmc_alloc_host(sizeof(*host), &connection->dev);
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800666 if (!mmc)
667 return -ENOMEM;
668
669 host = mmc_priv(mmc);
670 host->mmc = mmc;
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +0100671 host->removed = true;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800672
Greg Kroah-Hartmana2f47632014-10-28 10:17:09 +0800673 host->connection = connection;
674 connection->private = host;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100675
676 ret = get_version(host);
677 if (ret < 0)
678 goto free_mmc;
679
680 ret = gb_sdio_get_caps(host);
681 if (ret < 0)
682 goto free_mmc;
683
684 mmc->ops = &gb_sdio_ops;
685
686 /* for now we just make a map 1:1 between max blocks and segments */
687 mmc->max_segs = host->mmc->max_blk_count;
688 mmc->max_seg_size = host->mmc->max_blk_size;
689
690 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
691
692 max_buffer = gb_operation_get_payload_size_max(host->connection);
693 host->xfer_buffer = kzalloc(max_buffer, GFP_KERNEL);
694 if (!host->xfer_buffer) {
695 ret = -ENOMEM;
696 goto free_mmc;
697 }
698 mutex_init(&host->lock);
699 spin_lock_init(&host->xfer);
700 gb_sdio_mrq_workqueue = alloc_workqueue("gb_sdio_mrq", 0, 1);
701 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
702
703 ret = mmc_add_host(mmc);
704 if (ret < 0)
705 goto free_work;
Rui Miguel Silvac36d31c2015-07-02 19:11:31 +0100706 host->removed = false;
707 ret = _gb_sdio_process_events(host, host->queued_events);
708 host->queued_events = 0;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100709
710 return ret;
711
712free_work:
713 destroy_workqueue(gb_sdio_mrq_workqueue);
714 kfree(host->xfer_buffer);
715
716free_mmc:
717 connection->private = NULL;
718 mmc_free_host(mmc);
719
720 return ret;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800721}
722
Greg Kroah-Hartmana2f47632014-10-28 10:17:09 +0800723static void gb_sdio_connection_exit(struct gb_connection *connection)
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800724{
725 struct mmc_host *mmc;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100726 struct gb_sdio_host *host = connection->private;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800727
Alex Elder051fb042014-10-16 06:35:24 -0500728 if (!host)
729 return;
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800730
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100731 mutex_lock(&host->lock);
732 host->removed = true;
Alex Elder051fb042014-10-16 06:35:24 -0500733 mmc = host->mmc;
Greg Kroah-Hartmana2f47632014-10-28 10:17:09 +0800734 connection->private = NULL;
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100735 mutex_unlock(&host->lock);
736
737 flush_workqueue(gb_sdio_mrq_workqueue);
738 destroy_workqueue(gb_sdio_mrq_workqueue);
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100739 mmc_remove_host(mmc);
Phong Tran9b86bdf2015-06-26 21:05:12 +0700740 mmc_free_host(mmc);
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100741 kfree(host->xfer_buffer);
Greg Kroah-Hartman83ddaaa2014-08-11 17:27:22 +0800742}
743
Alex Elder19d03de2014-11-05 16:12:53 -0600744static struct gb_protocol sdio_protocol = {
Greg Kroah-Hartman7422a1e2014-12-24 13:01:45 -0800745 .name = "sdio",
Alex Elder19d03de2014-11-05 16:12:53 -0600746 .id = GREYBUS_PROTOCOL_SDIO,
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100747 .major = GB_SDIO_VERSION_MAJOR,
748 .minor = GB_SDIO_VERSION_MINOR,
Alex Elder5d9fd7e2014-11-05 16:12:54 -0600749 .connection_init = gb_sdio_connection_init,
750 .connection_exit = gb_sdio_connection_exit,
Rui Miguel Silva3b6ecd62015-06-22 14:03:52 +0100751 .request_recv = gb_sdio_event_recv,
Alex Elder19d03de2014-11-05 16:12:53 -0600752};
753
Viresh Kumare18822e2015-07-01 12:13:52 +0530754gb_builtin_protocol_driver(sdio_protocol);