blob: 36295f5fed27e8c4875951c466c64057ab5b5430 [file] [log] [blame]
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/core/mmc.c
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
Hans de Goede81f8a7b2015-04-01 17:26:23 +020014#include <linux/of.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Paul Gortmaker0205a902011-07-15 12:01:27 -040016#include <linux/stat.h>
Ulf Hansson0cb403a2013-10-10 14:20:05 +020017#include <linux/pm_runtime.h>
Pierre Ossman7ea239d2006-12-31 00:11:32 +010018
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/mmc.h>
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +020022#include <linux/reboot.h>
Konstantin Dorfman892f84e2015-02-12 13:37:56 +020023#include <trace/events/mmc.h>
Pierre Ossman7ea239d2006-12-31 00:11:32 +010024
25#include "core.h"
Adrian Hunter436f8da2015-05-07 13:10:18 +030026#include "host.h"
Pierre Ossman4101c162007-05-19 13:39:01 +020027#include "bus.h"
Pierre Ossman7ea239d2006-12-31 00:11:32 +010028#include "mmc_ops.h"
Philip Rakity4c4cb172011-05-13 11:17:18 +053029#include "sd_ops.h"
Pierre Ossman7ea239d2006-12-31 00:11:32 +010030
Ulf Hanssonfe1b5702016-11-04 18:32:33 +010031#define DEFAULT_CMD6_TIMEOUT_MS 500
32
Pierre Ossman7ea239d2006-12-31 00:11:32 +010033static const unsigned int tran_exp[] = {
34 10000, 100000, 1000000, 10000000,
35 0, 0, 0, 0
36};
37
38static const unsigned char tran_mant[] = {
39 0, 10, 12, 13, 15, 20, 25, 30,
40 35, 40, 45, 50, 55, 60, 70, 80,
41};
42
43static const unsigned int tacc_exp[] = {
44 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
45};
46
47static const unsigned int tacc_mant[] = {
48 0, 10, 12, 13, 15, 20, 25, 30,
49 35, 40, 45, 50, 55, 60, 70, 80,
50};
51
Pratibhasagar V53202262016-06-09 18:09:31 -040052static const struct mmc_fixup mmc_ext_csd_fixups[] = {
53 /*
54 * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
55 * is used so disable the HPI feature for such buggy cards.
56 */
57 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
58 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
59
60 END_FIXUP
61};
62
Pierre Ossman7ea239d2006-12-31 00:11:32 +010063#define UNSTUFF_BITS(resp,start,size) \
64 ({ \
65 const int __size = size; \
66 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
67 const int __off = 3 - ((start) / 32); \
68 const int __shft = (start) & 31; \
69 u32 __res; \
70 \
71 __res = resp[__off] >> __shft; \
72 if (__size + __shft > 32) \
73 __res |= resp[__off-1] << ((32 - __shft) % 32); \
74 __res & __mask; \
75 })
76
Ritesh Harjania0b42c02016-12-01 10:12:22 +053077static int mmc_switch_status(struct mmc_card *card, bool ignore_crc);
Pierre Ossman7ea239d2006-12-31 00:11:32 +010078/*
79 * Given the decoded CSD structure, decode the raw CID to our CID structure.
80 */
Pierre Ossmanbd766312007-05-01 16:11:57 +020081static int mmc_decode_cid(struct mmc_card *card)
Pierre Ossman7ea239d2006-12-31 00:11:32 +010082{
83 u32 *resp = card->raw_cid;
84
85 /*
86 * The selection of the format here is based upon published
87 * specs from sandisk and from what people have reported.
88 */
89 switch (card->csd.mmca_vsn) {
90 case 0: /* MMC v1.0 - v1.2 */
91 case 1: /* MMC v1.4 */
92 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
93 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
94 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
95 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
96 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
97 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
98 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
99 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
100 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
101 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
102 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
103 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
104 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
105 break;
106
107 case 2: /* MMC v2.0 - v2.2 */
108 case 3: /* MMC v3.1 - v3.3 */
109 case 4: /* MMC v4 */
110 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
111 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
112 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
113 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
114 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
115 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
116 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
117 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
Bernie Thompson51e7e8b2013-02-27 12:19:17 -0800118 card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100119 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
120 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
121 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
122 break;
123
124 default:
Girish K Sa3c76eb2011-10-11 11:44:09 +0530125 pr_err("%s: card has unknown MMCA version %d\n",
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100126 mmc_hostname(card->host), card->csd.mmca_vsn);
Pierre Ossmanbd766312007-05-01 16:11:57 +0200127 return -EINVAL;
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100128 }
Pierre Ossmanbd766312007-05-01 16:11:57 +0200129
130 return 0;
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100131}
132
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700133static void mmc_set_erase_size(struct mmc_card *card)
134{
135 if (card->ext_csd.erase_group_def & 1)
136 card->erase_size = card->ext_csd.hc_erase_size;
137 else
138 card->erase_size = card->csd.erase_size;
139
140 mmc_init_erase(card);
141}
142
Talel Shenharf59f8952015-02-08 13:36:57 +0200143static const struct mmc_fixup mmc_fixups[] = {
144
145 /* avoid HPI for specific cards */
146 MMC_FIXUP_EXT_CSD_REV("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
147 add_quirk, MMC_QUIRK_BROKEN_HPI, MMC_V4_41),
148
Talel Shenhar6f3a8c22015-02-12 10:12:39 +0200149 /* Disable cache for specific cards */
150 MMC_FIXUP("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
151 add_quirk_mmc, MMC_QUIRK_CACHE_DISABLE),
152
Talel Shenharf59f8952015-02-08 13:36:57 +0200153 END_FIXUP
154};
155
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100156/*
157 * Given a 128-bit response, decode to our card CSD structure.
158 */
Pierre Ossmanbd766312007-05-01 16:11:57 +0200159static int mmc_decode_csd(struct mmc_card *card)
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100160{
161 struct mmc_csd *csd = &card->csd;
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700162 unsigned int e, m, a, b;
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100163 u32 *resp = card->raw_csd;
164
165 /*
166 * We only understand CSD structure v1.1 and v1.2.
167 * v1.2 has extra information in bits 15, 11 and 10.
Kyungmin Park6da24b72010-08-10 18:01:36 -0700168 * We also support eMMC v4.4 & v4.41.
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100169 */
Kyungmin Park6da24b72010-08-10 18:01:36 -0700170 csd->structure = UNSTUFF_BITS(resp, 126, 2);
171 if (csd->structure == 0) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530172 pr_err("%s: unrecognised CSD structure version %d\n",
Kyungmin Park6da24b72010-08-10 18:01:36 -0700173 mmc_hostname(card->host), csd->structure);
Pierre Ossmanbd766312007-05-01 16:11:57 +0200174 return -EINVAL;
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100175 }
176
177 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
178 m = UNSTUFF_BITS(resp, 115, 4);
179 e = UNSTUFF_BITS(resp, 112, 3);
180 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
181 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
182
183 m = UNSTUFF_BITS(resp, 99, 4);
184 e = UNSTUFF_BITS(resp, 96, 3);
185 csd->max_dtr = tran_exp[e] * tran_mant[m];
186 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
187
188 e = UNSTUFF_BITS(resp, 47, 3);
189 m = UNSTUFF_BITS(resp, 62, 12);
190 csd->capacity = (1 + m) << (e + 2);
191
192 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
193 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
194 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
195 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
Sascha Hauer3d705d12014-08-19 10:45:51 +0200196 csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100197 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
198 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
199 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
Pierre Ossmanbd766312007-05-01 16:11:57 +0200200
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700201 if (csd->write_blkbits >= 9) {
202 a = UNSTUFF_BITS(resp, 42, 5);
203 b = UNSTUFF_BITS(resp, 37, 5);
204 csd->erase_size = (a + 1) * (b + 1);
205 csd->erase_size <<= csd->write_blkbits - 9;
206 }
207
Pierre Ossmanbd766312007-05-01 16:11:57 +0200208 return 0;
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100209}
210
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900211static void mmc_select_card_type(struct mmc_card *card)
212{
213 struct mmc_host *host = card->host;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +0900214 u8 card_type = card->ext_csd.raw_card_type;
Lee Jones5f1a4dd2012-11-14 12:35:51 +0000215 u32 caps = host->caps, caps2 = host->caps2;
Seungwon Jeon577fb132014-04-23 17:08:44 +0900216 unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900217 unsigned int avail_type = 0;
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900218
219 if (caps & MMC_CAP_MMC_HIGHSPEED &&
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900220 card_type & EXT_CSD_CARD_TYPE_HS_26) {
221 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
222 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
223 }
224
225 if (caps & MMC_CAP_MMC_HIGHSPEED &&
226 card_type & EXT_CSD_CARD_TYPE_HS_52) {
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900227 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900228 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
229 }
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900230
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900231 if (caps & MMC_CAP_1_8V_DDR &&
232 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900233 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900234 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
235 }
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900236
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900237 if (caps & MMC_CAP_1_2V_DDR &&
238 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
239 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
240 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
241 }
242
243 if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
244 card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
Seungwon Jeon577fb132014-04-23 17:08:44 +0900245 hs200_max_dtr = MMC_HS200_MAX_DTR;
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900246 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
247 }
248
249 if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
250 card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
Seungwon Jeon577fb132014-04-23 17:08:44 +0900251 hs200_max_dtr = MMC_HS200_MAX_DTR;
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900252 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
253 }
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900254
Seungwon Jeon0a5b6432014-04-23 17:14:58 +0900255 if (caps2 & MMC_CAP2_HS400_1_8V &&
256 card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
257 hs200_max_dtr = MMC_HS200_MAX_DTR;
258 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
259 }
260
261 if (caps2 & MMC_CAP2_HS400_1_2V &&
262 card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
263 hs200_max_dtr = MMC_HS200_MAX_DTR;
264 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
265 }
266
Shawn Lin81ac2af2016-05-26 09:56:22 +0800267 if ((caps2 & MMC_CAP2_HS400_ES) &&
268 card->ext_csd.strobe_support &&
269 (avail_type & EXT_CSD_CARD_TYPE_HS400))
270 avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
271
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900272 card->ext_csd.hs_max_dtr = hs_max_dtr;
Seungwon Jeon577fb132014-04-23 17:08:44 +0900273 card->ext_csd.hs200_max_dtr = hs200_max_dtr;
Seungwon Jeon2415c0e2014-04-23 17:07:58 +0900274 card->mmc_avail_type = avail_type;
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900275}
276
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200277static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
278{
Grégory Soutadé994324b2014-09-15 17:47:11 +0200279 u8 hc_erase_grp_sz, hc_wp_grp_sz;
280
281 /*
282 * Disable these attributes by default
283 */
284 card->ext_csd.enhanced_area_offset = -EINVAL;
285 card->ext_csd.enhanced_area_size = -EINVAL;
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200286
287 /*
288 * Enhanced area feature support -- check whether the eMMC
289 * card has the Enhanced area enabled. If so, export enhanced
290 * area offset and size to user by adding sysfs interface.
291 */
292 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
293 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
Grégory Soutadé994324b2014-09-15 17:47:11 +0200294 if (card->ext_csd.partition_setting_completed) {
295 hc_erase_grp_sz =
296 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
297 hc_wp_grp_sz =
298 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200299
Grégory Soutadé994324b2014-09-15 17:47:11 +0200300 /*
301 * calculate the enhanced data area offset, in bytes
302 */
303 card->ext_csd.enhanced_area_offset =
Kuninori Morimotoded8a5f2015-05-11 07:34:53 +0000304 (((unsigned long long)ext_csd[139]) << 24) +
305 (((unsigned long long)ext_csd[138]) << 16) +
306 (((unsigned long long)ext_csd[137]) << 8) +
307 (((unsigned long long)ext_csd[136]));
Grégory Soutadé994324b2014-09-15 17:47:11 +0200308 if (mmc_card_blockaddr(card))
309 card->ext_csd.enhanced_area_offset <<= 9;
310 /*
311 * calculate the enhanced data area size, in kilobytes
312 */
313 card->ext_csd.enhanced_area_size =
314 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
315 ext_csd[140];
316 card->ext_csd.enhanced_area_size *=
317 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
318 card->ext_csd.enhanced_area_size <<= 9;
319 } else {
320 pr_warn("%s: defines enhanced area without partition setting complete\n",
321 mmc_hostname(card->host));
322 }
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200323 }
324}
325
326static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
327{
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200328 int idx;
Grégory Soutadé994324b2014-09-15 17:47:11 +0200329 u8 hc_erase_grp_sz, hc_wp_grp_sz;
330 unsigned int part_size;
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200331
332 /*
333 * General purpose partition feature support --
334 * If ext_csd has the size of general purpose partitions,
335 * set size, part_cfg, partition name in mmc_part.
336 */
337 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
338 EXT_CSD_PART_SUPPORT_PART_EN) {
Grégory Soutadé994324b2014-09-15 17:47:11 +0200339 hc_erase_grp_sz =
340 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
341 hc_wp_grp_sz =
342 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200343
344 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
345 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
346 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
347 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
348 continue;
Grégory Soutadé994324b2014-09-15 17:47:11 +0200349 if (card->ext_csd.partition_setting_completed == 0) {
350 pr_warn("%s: has partition size defined without partition complete\n",
351 mmc_hostname(card->host));
352 break;
353 }
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200354 part_size =
355 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
356 << 16) +
357 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
358 << 8) +
359 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
360 part_size *= (size_t)(hc_erase_grp_sz *
361 hc_wp_grp_sz);
362 mmc_part_add(card, part_size << 19,
363 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
364 "gp%d", idx, false,
365 MMC_BLK_DATA_AREA_GP);
366 }
367 }
368}
369
Adrian Hunter1c447112016-05-05 08:12:28 +0300370/* Minimum partition switch timeout in milliseconds */
371#define MMC_MIN_PART_SWITCH_TIME 300
372
Philip Rakity08ee80c2011-05-24 18:14:58 -0700373/*
374 * Decode extended CSD.
375 */
Ulf Hansson076ec382014-10-20 13:37:24 +0200376static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
Philip Rakity08ee80c2011-05-24 18:14:58 -0700377{
Namjae Jeone0c368d2011-10-06 23:41:38 +0900378 int err = 0, idx;
379 unsigned int part_size;
Hans de Goede81f8a7b2015-04-01 17:26:23 +0200380 struct device_node *np;
381 bool broken_hpi = false;
Philip Rakity08ee80c2011-05-24 18:14:58 -0700382
Kyungmin Park6da24b72010-08-10 18:01:36 -0700383 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700384 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
Kyungmin Park6da24b72010-08-10 18:01:36 -0700385 if (card->csd.structure == 3) {
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700386 if (card->ext_csd.raw_ext_csd_structure > 2) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530387 pr_err("%s: unrecognised EXT_CSD structure "
Kyungmin Park6da24b72010-08-10 18:01:36 -0700388 "version %d\n", mmc_hostname(card->host),
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700389 card->ext_csd.raw_ext_csd_structure);
Kyungmin Park6da24b72010-08-10 18:01:36 -0700390 err = -EINVAL;
391 goto out;
392 }
393 }
394
Hans de Goede81f8a7b2015-04-01 17:26:23 +0200395 np = mmc_of_find_child_device(card->host, 0);
396 if (np && of_device_is_compatible(np, "mmc-card"))
397 broken_hpi = of_property_read_bool(np, "broken-hpi");
398 of_node_put(np);
399
Romain Izard03a59432014-06-27 10:51:07 +0200400 /*
401 * The EXT_CSD format is meant to be forward compatible. As long
402 * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
403 * are authorized, see JEDEC JESD84-B50 section B.8.
404 */
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -0700405 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
Pierre Ossmand7604d72007-07-23 00:34:07 +0200406
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700407 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
408 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
409 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
410 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -0700411 if (card->ext_csd.rev >= 2) {
Pierre Ossmand7604d72007-07-23 00:34:07 +0200412 card->ext_csd.sectors =
413 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
414 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
415 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
416 ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
Hanumath Prasadfc8a0982010-08-10 18:01:45 -0700417
418 /* Cards with density > 2GiB are sector addressed */
419 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
Pierre Ossmand7604d72007-07-23 00:34:07 +0200420 mmc_card_set_blockaddr(card);
421 }
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900422
Shawn Lin81ac2af2016-05-26 09:56:22 +0800423 card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700424 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
Seungwon Jeon96cf5f02012-04-25 16:17:37 +0900425 mmc_select_card_type(card);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100426
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700427 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
428 card->ext_csd.raw_erase_timeout_mult =
429 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
430 card->ext_csd.raw_hc_erase_grp_size =
431 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -0700432 if (card->ext_csd.rev >= 3) {
433 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
Andrei Warkentin371a6892011-04-11 18:10:25 -0500434 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
435
436 /* EXT_CSD value is in units of 10ms, but we store in ms */
437 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
Adrian Hunter1c447112016-05-05 08:12:28 +0300438 /* Some eMMC set the value too low so set a minimum */
439 if (card->ext_csd.part_time &&
440 card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
441 card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -0700442
443 /* Sleep / awake timeout in 100ns units */
444 if (sa_shift > 0 && sa_shift <= 0x17)
445 card->ext_csd.sa_timeout =
446 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700447 card->ext_csd.erase_group_def =
448 ext_csd[EXT_CSD_ERASE_GROUP_DEF];
449 card->ext_csd.hc_erase_timeout = 300 *
450 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
451 card->ext_csd.hc_erase_size =
452 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500453
454 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
Andrei Warkentin371a6892011-04-11 18:10:25 -0500455
456 /*
457 * There are two boot regions of equal size, defined in
458 * multiples of 128K.
459 */
Namjae Jeone0c368d2011-10-06 23:41:38 +0900460 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
461 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
462 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
463 mmc_part_add(card, part_size,
464 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
Johan Rudholmadd710e2011-12-02 08:51:06 +0100465 "boot%d", idx, true,
466 MMC_BLK_DATA_AREA_BOOT);
Namjae Jeone0c368d2011-10-06 23:41:38 +0900467 }
468 }
Jarkko Lavinenb1ebe382009-09-22 16:44:34 -0700469 }
470
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700471 card->ext_csd.raw_hc_erase_gap_size =
Jurgen Heeksdd13b4e2012-02-01 13:30:55 +0100472 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700473 card->ext_csd.raw_sec_trim_mult =
474 ext_csd[EXT_CSD_SEC_TRIM_MULT];
475 card->ext_csd.raw_sec_erase_mult =
476 ext_csd[EXT_CSD_SEC_ERASE_MULT];
477 card->ext_csd.raw_sec_feature_support =
478 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
479 card->ext_csd.raw_trim_mult =
480 ext_csd[EXT_CSD_TRIM_MULT];
Philip Rakity836dc2f2013-04-04 20:18:11 +0100481 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
Adrian Hunterb097e072015-02-06 14:12:57 +0200482 card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700483 if (card->ext_csd.rev >= 4) {
Grégory Soutadé69803d42014-09-15 17:47:09 +0200484 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
485 EXT_CSD_PART_SETTING_COMPLETED)
486 card->ext_csd.partition_setting_completed = 1;
487 else
488 card->ext_csd.partition_setting_completed = 0;
489
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200490 mmc_manage_enhanced_area(card, ext_csd);
Chuanxiao Dong709de992011-01-22 04:09:41 +0800491
Grégory Soutadéb4493ee2014-09-15 17:47:06 +0200492 mmc_manage_gp_partitions(card, ext_csd);
Namjae Jeone0c368d2011-10-06 23:41:38 +0900493
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700494 card->ext_csd.sec_trim_mult =
495 ext_csd[EXT_CSD_SEC_TRIM_MULT];
496 card->ext_csd.sec_erase_mult =
497 ext_csd[EXT_CSD_SEC_ERASE_MULT];
498 card->ext_csd.sec_feature_support =
499 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
500 card->ext_csd.trim_timeout = 300 *
501 ext_csd[EXT_CSD_TRIM_MULT];
Johan Rudholmadd710e2011-12-02 08:51:06 +0100502
503 /*
504 * Note that the call to mmc_part_add above defaults to read
505 * only. If this default assumption is changed, the call must
506 * take into account the value of boot_locked below.
507 */
508 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
509 card->ext_csd.boot_ro_lockable = true;
Fredrik Soderstedt60443712013-04-23 16:27:07 +0200510
511 /* Save power class values */
512 card->ext_csd.raw_pwr_cl_52_195 =
513 ext_csd[EXT_CSD_PWR_CL_52_195];
514 card->ext_csd.raw_pwr_cl_26_195 =
515 ext_csd[EXT_CSD_PWR_CL_26_195];
516 card->ext_csd.raw_pwr_cl_52_360 =
517 ext_csd[EXT_CSD_PWR_CL_52_360];
518 card->ext_csd.raw_pwr_cl_26_360 =
519 ext_csd[EXT_CSD_PWR_CL_26_360];
520 card->ext_csd.raw_pwr_cl_200_195 =
521 ext_csd[EXT_CSD_PWR_CL_200_195];
522 card->ext_csd.raw_pwr_cl_200_360 =
523 ext_csd[EXT_CSD_PWR_CL_200_360];
524 card->ext_csd.raw_pwr_cl_ddr_52_195 =
525 ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
526 card->ext_csd.raw_pwr_cl_ddr_52_360 =
527 ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
Seungwon Jeon0a5b6432014-04-23 17:14:58 +0900528 card->ext_csd.raw_pwr_cl_ddr_200_360 =
529 ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700530 }
531
Talel Shenhar981300a2015-02-05 14:44:15 +0200532 /* check whether the eMMC card supports HPI */
533 if ((ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) &&
534 !(card->quirks & MMC_QUIRK_BROKEN_HPI)) {
535 card->ext_csd.hpi = 1;
536 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
537 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
538 else
539 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
540 /*
541 * Indicate the maximum timeout to close
542 * a command interrupted by HPI
543 */
544 card->ext_csd.out_of_int_time =
545 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
Dov Levenglicka0296392015-06-24 19:51:58 +0300546 pr_info("%s: Out-of-interrupt timeout is %d[ms]\n",
547 mmc_hostname(card->host),
548 card->ext_csd.out_of_int_time);
Talel Shenhar981300a2015-02-05 14:44:15 +0200549 }
550
Adrian Hunterb2499512011-08-29 16:42:11 +0300551 if (card->ext_csd.rev >= 5) {
Romain Izard7c4f10a2013-06-14 14:25:44 +0200552 /* Adjust production date as per JEDEC JESD84-B451 */
553 if (card->cid.year < 2010)
554 card->cid.year += 16;
555
Jaehoon Chung950d56a2012-09-17 08:42:02 +0000556 /* check whether the eMMC card supports BKOPS */
Pratibhasagar V53202262016-06-09 18:09:31 -0400557 if (!mmc_card_broken_hpi(card) &&
Talel Shenhar981300a2015-02-05 14:44:15 +0200558 (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
559 card->ext_csd.hpi) {
Jaehoon Chung950d56a2012-09-17 08:42:02 +0000560 card->ext_csd.bkops = 1;
Subhash Jadavani0a3b83c2016-05-12 11:58:15 -0700561 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
Jaehoon Chung950d56a2012-09-17 08:42:02 +0000562 card->ext_csd.raw_bkops_status =
563 ext_csd[EXT_CSD_BKOPS_STATUS];
Subhash Jadavani0a3b83c2016-05-12 11:58:15 -0700564 if (!card->ext_csd.bkops_en)
Dov Levenglicka0296392015-06-24 19:51:58 +0300565 pr_info("%s: BKOPS_EN equals 0x%x\n",
566 mmc_hostname(card->host),
Subhash Jadavani0a3b83c2016-05-12 11:58:15 -0700567 card->ext_csd.bkops_en);
Jaehoon Chung950d56a2012-09-17 08:42:02 +0000568 }
569
Jaehoon Chungeb0d8f12011-10-18 01:26:42 -0400570 /* check whether the eMMC card supports HPI */
Pratibhasagar V53202262016-06-09 18:09:31 -0400571 if (!mmc_card_broken_hpi(card) &&
572 !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
Jaehoon Chungeb0d8f12011-10-18 01:26:42 -0400573 card->ext_csd.hpi = 1;
574 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
575 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
576 else
577 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
578 /*
579 * Indicate the maximum timeout to close
580 * a command interrupted by HPI
581 */
582 card->ext_csd.out_of_int_time =
583 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
584 }
585
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500586 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
Adrian Hunterb2499512011-08-29 16:42:11 +0300587 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
Loic Pallardy090d25f2012-11-17 18:08:24 -0500588
589 /*
xiaonianf36ff082015-07-16 14:39:32 +0800590 * Some eMMC vendors violate eMMC 5.0 spec and set
591 * REL_WR_SEC_C register to 0x10 to indicate the
592 * ability of RPMB throughput improvement thus lead
593 * to failure when TZ module write data to RPMB
594 * partition. So check bit[4] of EXT_CSD[166] and
595 * if it is not set then change value of REL_WR_SEC_C
596 * to 0x1 directly ignoring value of EXT_CSD[222].
597 */
598 if (!(card->ext_csd.rel_param &
599 EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR))
600 card->ext_csd.rel_sectors = 0x1;
601
602 /*
Loic Pallardy090d25f2012-11-17 18:08:24 -0500603 * RPMB regions are defined in multiples of 128K.
604 */
605 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
Balaji T Kd0123cc2013-01-25 17:00:30 +0530606 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
Loic Pallardy090d25f2012-11-17 18:08:24 -0500607 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
608 EXT_CSD_PART_CONFIG_ACC_RPMB,
609 "rpmb", 0, false,
610 MMC_BLK_DATA_AREA_RPMB);
611 }
Adrian Hunterb2499512011-08-29 16:42:11 +0300612 }
Andrei Warkentinf4c55222011-03-31 18:40:00 -0500613
Andrei Warkentin5238acb2011-09-24 12:12:30 -0400614 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700615 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
616 card->erased_byte = 0xFF;
617 else
618 card->erased_byte = 0x0;
619
Seungwon Jeon336c7162011-10-25 09:43:12 +0900620 /* eMMC v4.5 or later */
Ulf Hanssonfe1b5702016-11-04 18:32:33 +0100621 card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
Girish K Sbec87262011-10-13 12:04:16 +0530622 if (card->ext_csd.rev >= 6) {
Seungwon Jeon336c7162011-10-25 09:43:12 +0900623 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
624
Seungwon Jeonb23cf0b2011-09-23 14:15:29 +0900625 card->ext_csd.generic_cmd6_time = 10 *
626 ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
Girish K Sbec87262011-10-13 12:04:16 +0530627 card->ext_csd.power_off_longtime = 10 *
628 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
Seungwon Jeonb23cf0b2011-09-23 14:15:29 +0900629
Seungwon Jeon336c7162011-10-25 09:43:12 +0900630 card->ext_csd.cache_size =
631 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
632 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
633 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
634 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
Saugata Das42659002011-12-21 13:09:17 +0530635
636 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
637 card->ext_csd.data_sector_size = 4096;
638 else
639 card->ext_csd.data_sector_size = 512;
640
641 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
642 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
643 card->ext_csd.data_tag_unit_size =
644 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
645 (card->ext_csd.data_sector_size);
646 } else {
647 card->ext_csd.data_tag_unit_size = 0;
648 }
Seungwon Jeonabd9ac12013-02-06 17:01:43 +0900649
650 card->ext_csd.max_packed_writes =
651 ext_csd[EXT_CSD_MAX_PACKED_WRITES];
652 card->ext_csd.max_packed_reads =
653 ext_csd[EXT_CSD_MAX_PACKED_READS];
Saugata Dasa5075eb2012-05-17 16:32:21 +0530654 } else {
655 card->ext_csd.data_sector_size = 512;
Seungwon Jeon336c7162011-10-25 09:43:12 +0900656 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +0900657
Asutosh Das0b7dfe62014-03-21 11:14:51 +0530658 if (card->ext_csd.rev >= 7) {
Yi Sun041a3812015-06-10 16:40:06 -0700659 /* Enhance Strobe is supported since v5.1 which rev should be
660 * 8 but some eMMC devices can support it with rev 7. So handle
661 * Enhance Strobe here.
662 */
663 card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
Asutosh Das0b7dfe62014-03-21 11:14:51 +0530664 card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT];
Asutosh Dasc0c7c982015-07-09 17:35:22 +0530665 card->ext_csd.fw_version = ext_csd[EXT_CSD_FIRMWARE_VERSION];
666 pr_info("%s: eMMC FW version: 0x%02x\n",
667 mmc_hostname(card->host),
668 card->ext_csd.fw_version);
Asutosh Das0b7dfe62014-03-21 11:14:51 +0530669 if (card->ext_csd.cmdq_support) {
670 /*
671 * Queue Depth = N + 1,
672 * see JEDEC JESD84-B51 section 7.4.19
673 */
674 card->ext_csd.cmdq_depth =
675 ext_csd[EXT_CSD_CMDQ_DEPTH] + 1;
676 pr_info("%s: CMDQ supported: depth: %d\n",
677 mmc_hostname(card->host),
678 card->ext_csd.cmdq_depth);
679 }
Sahitya Tummala61868a42015-05-28 16:54:19 +0530680 card->ext_csd.barrier_support =
681 ext_csd[EXT_CSD_BARRIER_SUPPORT];
682 card->ext_csd.cache_flush_policy =
683 ext_csd[EXT_CSD_CACHE_FLUSH_POLICY];
684 pr_info("%s: cache barrier support %d flush policy %d\n",
685 mmc_hostname(card->host),
686 card->ext_csd.barrier_support,
687 card->ext_csd.cache_flush_policy);
Krishna Konda668276272015-06-29 19:34:35 -0700688 card->ext_csd.enhanced_rpmb_supported =
689 (card->ext_csd.rel_param &
690 EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
Asutosh Das0b7dfe62014-03-21 11:14:51 +0530691 } else {
692 card->ext_csd.cmdq_support = 0;
693 card->ext_csd.cmdq_depth = 0;
Sahitya Tummala61868a42015-05-28 16:54:19 +0530694 card->ext_csd.barrier_support = 0;
695 card->ext_csd.cache_flush_policy = 0;
Asutosh Das0b7dfe62014-03-21 11:14:51 +0530696 }
697
Gwendal Grignou0f762422014-10-16 11:27:16 -0700698 /* eMMC v5 or later */
699 if (card->ext_csd.rev >= 7) {
700 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
701 MMC_FIRMWARE_LEN);
702 card->ext_csd.ffu_capable =
703 (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
704 !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
Jungseung Leea02c7542016-12-22 12:37:34 +0900705
706 card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
707 card->ext_csd.device_life_time_est_typ_a =
708 ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
709 card->ext_csd.device_life_time_est_typ_b =
710 ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
Gwendal Grignou0f762422014-10-16 11:27:16 -0700711 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100712out:
Philip Rakity08ee80c2011-05-24 18:14:58 -0700713 return err;
714}
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100715
Ulf Hansson076ec382014-10-20 13:37:24 +0200716static int mmc_read_ext_csd(struct mmc_card *card)
717{
Sahitya Tummalaace06842014-12-09 23:23:25 +0200718 struct mmc_host *host = card->host;
Ulf Hanssonc1977872014-10-20 14:08:16 +0200719 u8 *ext_csd;
Ulf Hansson076ec382014-10-20 13:37:24 +0200720 int err;
721
Ulf Hanssonc1977872014-10-20 14:08:16 +0200722 if (!mmc_can_ext_csd(card))
723 return 0;
724
Ulf Hansson076ec382014-10-20 13:37:24 +0200725 err = mmc_get_ext_csd(card, &ext_csd);
Ulf Hanssonc1977872014-10-20 14:08:16 +0200726 if (err) {
Sahitya Tummalaace06842014-12-09 23:23:25 +0200727 pr_err("%s: %s: mmc_get_ext_csd() fails %d\n",
728 mmc_hostname(host), __func__, err);
729
Ulf Hanssonc1977872014-10-20 14:08:16 +0200730 /* If the host or the card can't do the switch,
731 * fail more gracefully. */
732 if ((err != -EINVAL)
733 && (err != -ENOSYS)
734 && (err != -EFAULT))
735 return err;
736
737 /*
738 * High capacity cards should have this "magic" size
739 * stored in their CSD.
740 */
741 if (card->csd.capacity == (4096 * 512)) {
742 pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
743 mmc_hostname(card->host));
744 } else {
745 pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
746 mmc_hostname(card->host));
747 err = 0;
748 }
749
Ulf Hansson076ec382014-10-20 13:37:24 +0200750 return err;
Ulf Hanssonc1977872014-10-20 14:08:16 +0200751 }
Ulf Hansson076ec382014-10-20 13:37:24 +0200752
Sujit Reddy Thumma5cc09c92013-12-05 19:35:43 +0530753 card->cached_ext_csd = ext_csd;
Ulf Hansson076ec382014-10-20 13:37:24 +0200754 err = mmc_decode_ext_csd(card, ext_csd);
755 kfree(ext_csd);
756 return err;
757}
758
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700759static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
Philip Rakity08ee80c2011-05-24 18:14:58 -0700760{
761 u8 *bw_ext_csd;
762 int err;
763
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700764 if (bus_width == MMC_BUS_WIDTH_1)
765 return 0;
Philip Rakity08ee80c2011-05-24 18:14:58 -0700766
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700767 err = mmc_get_ext_csd(card, &bw_ext_csd);
Ulf Hanssonc1977872014-10-20 14:08:16 +0200768 if (err)
769 return err;
Philip Rakity08ee80c2011-05-24 18:14:58 -0700770
Philip Rakity08ee80c2011-05-24 18:14:58 -0700771 /* only compare read only fields */
Jurgen Heeksdd13b4e2012-02-01 13:30:55 +0100772 err = !((card->ext_csd.raw_partition_support ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700773 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700774 (card->ext_csd.raw_erased_mem_count ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700775 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700776 (card->ext_csd.rev ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700777 bw_ext_csd[EXT_CSD_REV]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700778 (card->ext_csd.raw_ext_csd_structure ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700779 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700780 (card->ext_csd.raw_card_type ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700781 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700782 (card->ext_csd.raw_s_a_timeout ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700783 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700784 (card->ext_csd.raw_hc_erase_gap_size ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700785 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700786 (card->ext_csd.raw_erase_timeout_mult ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700787 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700788 (card->ext_csd.raw_hc_erase_grp_size ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700789 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700790 (card->ext_csd.raw_sec_trim_mult ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700791 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700792 (card->ext_csd.raw_sec_erase_mult ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700793 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700794 (card->ext_csd.raw_sec_feature_support ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700795 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700796 (card->ext_csd.raw_trim_mult ==
Philip Rakity08ee80c2011-05-24 18:14:58 -0700797 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
Philip Rakityf39b2dd92011-07-07 09:04:55 -0700798 (card->ext_csd.raw_sectors[0] ==
799 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
800 (card->ext_csd.raw_sectors[1] ==
801 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
802 (card->ext_csd.raw_sectors[2] ==
803 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
804 (card->ext_csd.raw_sectors[3] ==
Fredrik Soderstedt60443712013-04-23 16:27:07 +0200805 bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
806 (card->ext_csd.raw_pwr_cl_52_195 ==
807 bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
808 (card->ext_csd.raw_pwr_cl_26_195 ==
809 bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
810 (card->ext_csd.raw_pwr_cl_52_360 ==
811 bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
812 (card->ext_csd.raw_pwr_cl_26_360 ==
813 bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
814 (card->ext_csd.raw_pwr_cl_200_195 ==
815 bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
816 (card->ext_csd.raw_pwr_cl_200_360 ==
817 bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
818 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
819 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
820 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
Seungwon Jeon0a5b6432014-04-23 17:14:58 +0900821 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
822 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
823 bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
824
Philip Rakity08ee80c2011-05-24 18:14:58 -0700825 if (err)
826 err = -EINVAL;
827
Ulf Hansson00b41b52014-10-16 16:18:51 +0200828 kfree(bw_ext_csd);
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100829 return err;
830}
831
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100832MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
833 card->raw_cid[2], card->raw_cid[3]);
834MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
835 card->raw_csd[2], card->raw_csd[3]);
836MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700837MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
838MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
Gwendal Grignou0f762422014-10-16 11:27:16 -0700839MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100840MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
841MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
842MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
843MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
Bernie Thompson51e7e8b2013-02-27 12:19:17 -0800844MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
Jin Qian63172222017-03-21 11:43:02 -0700845MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
Jungseung Leea02c7542016-12-22 12:37:34 +0900846MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
847MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
848 card->ext_csd.device_life_time_est_typ_a,
849 card->ext_csd.device_life_time_est_typ_b);
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100850MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
Chuanxiao Dong709de992011-01-22 04:09:41 +0800851MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
852 card->ext_csd.enhanced_area_offset);
853MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
Loic Pallardy188cc042012-08-06 17:12:29 +0200854MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
Krishna Konda668276272015-06-29 19:34:35 -0700855MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
856 card->ext_csd.enhanced_rpmb_supported);
Loic Pallardy188cc042012-08-06 17:12:29 +0200857MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
Bojan Prtvar5fb06af2016-07-04 13:56:55 +0200858MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100859
Gwendal Grignou0f762422014-10-16 11:27:16 -0700860static ssize_t mmc_fwrev_show(struct device *dev,
861 struct device_attribute *attr,
862 char *buf)
863{
864 struct mmc_card *card = mmc_dev_to_card(dev);
865
866 if (card->ext_csd.rev < 7) {
867 return sprintf(buf, "0x%x\n", card->cid.fwrev);
868 } else {
869 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
870 card->ext_csd.fwrev);
871 }
872}
873
874static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
875
Bojan Prtvar6825a602016-07-19 11:16:38 +0200876static ssize_t mmc_dsr_show(struct device *dev,
877 struct device_attribute *attr,
878 char *buf)
879{
880 struct mmc_card *card = mmc_dev_to_card(dev);
881 struct mmc_host *host = card->host;
882
883 if (card->csd.dsr_imp && host->dsr_req)
884 return sprintf(buf, "0x%x\n", host->dsr);
885 else
886 /* return default DSR value */
887 return sprintf(buf, "0x%x\n", 0x404);
888}
889
890static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
891
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100892static struct attribute *mmc_std_attrs[] = {
893 &dev_attr_cid.attr,
894 &dev_attr_csd.attr,
895 &dev_attr_date.attr,
Adrian Hunterdfe86cb2010-08-11 14:17:46 -0700896 &dev_attr_erase_size.attr,
897 &dev_attr_preferred_erase_size.attr,
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100898 &dev_attr_fwrev.attr,
Gwendal Grignou0f762422014-10-16 11:27:16 -0700899 &dev_attr_ffu_capable.attr,
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100900 &dev_attr_hwrev.attr,
901 &dev_attr_manfid.attr,
902 &dev_attr_name.attr,
903 &dev_attr_oemid.attr,
Bernie Thompson51e7e8b2013-02-27 12:19:17 -0800904 &dev_attr_prv.attr,
Jin Qian63172222017-03-21 11:43:02 -0700905 &dev_attr_rev.attr,
Jungseung Leea02c7542016-12-22 12:37:34 +0900906 &dev_attr_pre_eol_info.attr,
907 &dev_attr_life_time.attr,
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100908 &dev_attr_serial.attr,
Chuanxiao Dong709de992011-01-22 04:09:41 +0800909 &dev_attr_enhanced_area_offset.attr,
910 &dev_attr_enhanced_area_size.attr,
Loic Pallardy188cc042012-08-06 17:12:29 +0200911 &dev_attr_raw_rpmb_size_mult.attr,
Krishna Konda668276272015-06-29 19:34:35 -0700912 &dev_attr_enhanced_rpmb_supported.attr,
Loic Pallardy188cc042012-08-06 17:12:29 +0200913 &dev_attr_rel_sectors.attr,
Bojan Prtvar5fb06af2016-07-04 13:56:55 +0200914 &dev_attr_ocr.attr,
Bojan Prtvar6825a602016-07-19 11:16:38 +0200915 &dev_attr_dsr.attr,
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100916 NULL,
917};
Axel Lind1e58212014-03-08 15:05:27 +0800918ATTRIBUTE_GROUPS(mmc_std);
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100919
920static struct device_type mmc_type = {
Axel Lind1e58212014-03-08 15:05:27 +0800921 .groups = mmc_std_groups,
Pierre Ossman51ec92e2008-03-21 23:54:50 +0100922};
923
Pierre Ossman7ea239d2006-12-31 00:11:32 +0100924/*
Girish K Sb87d8db2011-09-23 20:41:47 +0530925 * Select the PowerClass for the current bus width
926 * If power class is defined for 4/8 bit bus in the
927 * extended CSD register, select it by executing the
928 * mmc_switch command.
929 */
Seungwon Jeon23850492014-04-23 17:08:05 +0900930static int __mmc_select_powerclass(struct mmc_card *card,
931 unsigned int bus_width)
Girish K Sb87d8db2011-09-23 20:41:47 +0530932{
Seungwon Jeon23850492014-04-23 17:08:05 +0900933 struct mmc_host *host = card->host;
934 struct mmc_ext_csd *ext_csd = &card->ext_csd;
Fredrik Soderstedt60443712013-04-23 16:27:07 +0200935 unsigned int pwrclass_val = 0;
Seungwon Jeon23850492014-04-23 17:08:05 +0900936 int err = 0;
Girish K Sb87d8db2011-09-23 20:41:47 +0530937
Girish K Sb87d8db2011-09-23 20:41:47 +0530938 switch (1 << host->ios.vdd) {
939 case MMC_VDD_165_195:
Seungwon Jeon23850492014-04-23 17:08:05 +0900940 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
941 pwrclass_val = ext_csd->raw_pwr_cl_26_195;
942 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
Fredrik Soderstedt60443712013-04-23 16:27:07 +0200943 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
Seungwon Jeon23850492014-04-23 17:08:05 +0900944 ext_csd->raw_pwr_cl_52_195 :
945 ext_csd->raw_pwr_cl_ddr_52_195;
946 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
947 pwrclass_val = ext_csd->raw_pwr_cl_200_195;
Girish K Sb87d8db2011-09-23 20:41:47 +0530948 break;
Subhash Jadavani93fc5a42012-04-03 12:25:58 +0530949 case MMC_VDD_27_28:
950 case MMC_VDD_28_29:
951 case MMC_VDD_29_30:
952 case MMC_VDD_30_31:
953 case MMC_VDD_31_32:
Girish K Sb87d8db2011-09-23 20:41:47 +0530954 case MMC_VDD_32_33:
955 case MMC_VDD_33_34:
956 case MMC_VDD_34_35:
957 case MMC_VDD_35_36:
Seungwon Jeon23850492014-04-23 17:08:05 +0900958 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
959 pwrclass_val = ext_csd->raw_pwr_cl_26_360;
960 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
Fredrik Soderstedt60443712013-04-23 16:27:07 +0200961 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
Seungwon Jeon23850492014-04-23 17:08:05 +0900962 ext_csd->raw_pwr_cl_52_360 :
963 ext_csd->raw_pwr_cl_ddr_52_360;
964 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
Seungwon Jeon0a5b6432014-04-23 17:14:58 +0900965 pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
966 ext_csd->raw_pwr_cl_ddr_200_360 :
967 ext_csd->raw_pwr_cl_200_360;
Girish K Sb87d8db2011-09-23 20:41:47 +0530968 break;
969 default:
Joe Perches66061102014-09-12 14:56:56 -0700970 pr_warn("%s: Voltage range not supported for power class\n",
971 mmc_hostname(host));
Girish K Sb87d8db2011-09-23 20:41:47 +0530972 return -EINVAL;
973 }
974
Girish K Sb87d8db2011-09-23 20:41:47 +0530975 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
976 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
977 EXT_CSD_PWR_CL_8BIT_SHIFT;
978 else
979 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
980 EXT_CSD_PWR_CL_4BIT_SHIFT;
981
982 /* If the power class is different from the default value */
983 if (pwrclass_val > 0) {
984 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
985 EXT_CSD_POWER_CLASS,
986 pwrclass_val,
Seungwon Jeon71fe3eb2011-10-18 13:20:57 +0900987 card->ext_csd.generic_cmd6_time);
Girish K Sb87d8db2011-09-23 20:41:47 +0530988 }
989
990 return err;
991}
992
Seungwon Jeon23850492014-04-23 17:08:05 +0900993static int mmc_select_powerclass(struct mmc_card *card)
994{
995 struct mmc_host *host = card->host;
996 u32 bus_width, ext_csd_bits;
997 int err, ddr;
998
999 /* Power class selection is supported for versions >= 4.0 */
Ulf Hansson148bcab2014-10-20 11:33:53 +02001000 if (!mmc_can_ext_csd(card))
Seungwon Jeon23850492014-04-23 17:08:05 +09001001 return 0;
1002
1003 bus_width = host->ios.bus_width;
1004 /* Power class values are defined only for 4/8 bit bus */
1005 if (bus_width == MMC_BUS_WIDTH_1)
1006 return 0;
1007
1008 ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
1009 if (ddr)
1010 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1011 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1012 else
1013 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1014 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
1015
1016 err = __mmc_select_powerclass(card, ext_csd_bits);
1017 if (err)
1018 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
1019 mmc_hostname(host), 1 << bus_width, ddr);
1020
1021 return err;
1022}
1023
Girish K Sb87d8db2011-09-23 20:41:47 +05301024/*
Seungwon Jeon577fb132014-04-23 17:08:44 +09001025 * Set the bus speed for the selected speed mode.
Girish K Sa4924c72012-01-11 14:04:52 -05001026 */
Seungwon Jeon577fb132014-04-23 17:08:44 +09001027static void mmc_set_bus_speed(struct mmc_card *card)
Girish K Sa4924c72012-01-11 14:04:52 -05001028{
Seungwon Jeon577fb132014-04-23 17:08:44 +09001029 unsigned int max_dtr = (unsigned int)-1;
1030
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001031 if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
1032 max_dtr > card->ext_csd.hs200_max_dtr)
Seungwon Jeon577fb132014-04-23 17:08:44 +09001033 max_dtr = card->ext_csd.hs200_max_dtr;
1034 else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
1035 max_dtr = card->ext_csd.hs_max_dtr;
1036 else if (max_dtr > card->csd.max_dtr)
1037 max_dtr = card->csd.max_dtr;
1038
1039 mmc_set_clock(card->host, max_dtr);
1040}
1041
1042/*
1043 * Select the bus width amoung 4-bit and 8-bit(SDR).
1044 * If the bus width is changed successfully, return the selected width value.
1045 * Zero is returned instead of error value if the wide width is not supported.
1046 */
1047static int mmc_select_bus_width(struct mmc_card *card)
1048{
Venkat Gopalakrishnanb93ed342015-01-09 17:26:27 -08001049 static const unsigned ext_csd_bits[] = {
Girish K Sa4924c72012-01-11 14:04:52 -05001050 EXT_CSD_BUS_WIDTH_8,
Seungwon Jeon577fb132014-04-23 17:08:44 +09001051 EXT_CSD_BUS_WIDTH_4,
Girish K Sa4924c72012-01-11 14:04:52 -05001052 };
Venkat Gopalakrishnanb93ed342015-01-09 17:26:27 -08001053 static const unsigned bus_widths[] = {
Girish K Sa4924c72012-01-11 14:04:52 -05001054 MMC_BUS_WIDTH_8,
Seungwon Jeon577fb132014-04-23 17:08:44 +09001055 MMC_BUS_WIDTH_4,
Girish K Sa4924c72012-01-11 14:04:52 -05001056 };
Seungwon Jeon577fb132014-04-23 17:08:44 +09001057 struct mmc_host *host = card->host;
1058 unsigned idx, bus_width = 0;
1059 int err = 0;
Girish K Sa4924c72012-01-11 14:04:52 -05001060
Alexandre Belloni1c2d26e2014-12-17 19:32:06 +01001061 if (!mmc_can_ext_csd(card) ||
Seungwon Jeon577fb132014-04-23 17:08:44 +09001062 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
1063 return 0;
Girish K Sa4924c72012-01-11 14:04:52 -05001064
Seungwon Jeon577fb132014-04-23 17:08:44 +09001065 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
Girish K Sa4924c72012-01-11 14:04:52 -05001066
1067 /*
1068 * Unlike SD, MMC cards dont have a configuration register to notify
1069 * supported bus width. So bus test command should be run to identify
1070 * the supported bus width or compare the ext csd values of current
1071 * bus width and ext csd values of 1 bit mode read earlier.
1072 */
Seungwon Jeon577fb132014-04-23 17:08:44 +09001073 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
Girish K Sa4924c72012-01-11 14:04:52 -05001074 /*
1075 * Host is capable of 8bit transfer, then switch
1076 * the device to work in 8bit transfer mode. If the
1077 * mmc switch command returns error then switch to
1078 * 4bit transfer mode. On success set the corresponding
1079 * bus width on the host.
1080 */
1081 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1082 EXT_CSD_BUS_WIDTH,
1083 ext_csd_bits[idx],
1084 card->ext_csd.generic_cmd6_time);
1085 if (err)
1086 continue;
1087
Seungwon Jeon577fb132014-04-23 17:08:44 +09001088 bus_width = bus_widths[idx];
1089 mmc_set_bus_width(host, bus_width);
Girish K Sa4924c72012-01-11 14:04:52 -05001090
Seungwon Jeon577fb132014-04-23 17:08:44 +09001091 /*
1092 * If controller can't handle bus width test,
1093 * compare ext_csd previously read in 1 bit mode
1094 * against ext_csd at new bus width
1095 */
Girish K Sa4924c72012-01-11 14:04:52 -05001096 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
Seungwon Jeon577fb132014-04-23 17:08:44 +09001097 err = mmc_compare_ext_csds(card, bus_width);
Girish K Sa4924c72012-01-11 14:04:52 -05001098 else
Seungwon Jeon577fb132014-04-23 17:08:44 +09001099 err = mmc_bus_test(card, bus_width);
1100
1101 if (!err) {
1102 err = bus_width;
Girish K Sa4924c72012-01-11 14:04:52 -05001103 break;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001104 } else {
1105 pr_warn("%s: switch to bus width %d failed\n",
Wolfram Sanged9feec2016-01-29 09:27:50 +01001106 mmc_hostname(host), 1 << bus_width);
Seungwon Jeon577fb132014-04-23 17:08:44 +09001107 }
Girish K Sa4924c72012-01-11 14:04:52 -05001108 }
1109
Seungwon Jeon577fb132014-04-23 17:08:44 +09001110 return err;
1111}
1112
Chaotian Jing08573ea2016-05-19 16:47:41 +08001113/* Caller must hold re-tuning */
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301114static int mmc_switch_status(struct mmc_card *card, bool ignore_crc)
Chaotian Jing08573ea2016-05-19 16:47:41 +08001115{
1116 u32 status;
1117 int err;
1118
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301119 err = __mmc_send_status(card, &status, ignore_crc);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001120 if (err)
1121 return err;
1122
1123 return mmc_switch_status_error(card->host, status);
1124}
1125
Seungwon Jeon577fb132014-04-23 17:08:44 +09001126/*
1127 * Switch to the high-speed mode
1128 */
1129static int mmc_select_hs(struct mmc_card *card)
1130{
1131 int err;
1132
1133 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1134 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1135 card->ext_csd.generic_cmd6_time,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001136 true, false, true);
1137 if (!err) {
Seungwon Jeon577fb132014-04-23 17:08:44 +09001138 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301139 err = mmc_switch_status(card, false);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001140 }
Seungwon Jeon577fb132014-04-23 17:08:44 +09001141
Jungseung Lee67d35962016-08-24 19:34:09 +09001142 if (err)
1143 pr_warn("%s: switch to high-speed failed, err:%d\n",
1144 mmc_hostname(card->host), err);
1145
Seungwon Jeon577fb132014-04-23 17:08:44 +09001146 return err;
1147}
1148
1149/*
1150 * Activate wide bus and DDR if supported.
1151 */
1152static int mmc_select_hs_ddr(struct mmc_card *card)
1153{
1154 struct mmc_host *host = card->host;
1155 u32 bus_width, ext_csd_bits;
1156 int err = 0;
1157
1158 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
1159 return 0;
1160
1161 bus_width = host->ios.bus_width;
1162 if (bus_width == MMC_BUS_WIDTH_1)
1163 return 0;
1164
1165 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1166 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1167
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301168 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Seungwon Jeon577fb132014-04-23 17:08:44 +09001169 EXT_CSD_BUS_WIDTH,
1170 ext_csd_bits,
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301171 card->ext_csd.generic_cmd6_time,
1172 true, false, false);
Seungwon Jeon577fb132014-04-23 17:08:44 +09001173 if (err) {
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05001174 pr_err("%s: switch to bus width %d ddr failed\n",
Seungwon Jeon577fb132014-04-23 17:08:44 +09001175 mmc_hostname(host), 1 << bus_width);
1176 return err;
1177 }
1178
1179 /*
1180 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1181 * signaling.
1182 *
1183 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1184 *
1185 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1186 * in the JEDEC spec for DDR.
1187 *
Chuanxiao.Dong312449e2014-08-15 11:28:07 +08001188 * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1189 * host controller can support this, like some of the SDHCI
1190 * controller which connect to an eMMC device. Some of these
1191 * host controller still needs to use 1.8v vccq for supporting
1192 * DDR mode.
1193 *
1194 * So the sequence will be:
1195 * if (host and device can both support 1.2v IO)
1196 * use 1.2v IO;
1197 * else if (host and device can both support 1.8v IO)
1198 * use 1.8v IO;
1199 * so if host and device can only support 3.3v IO, this is the
1200 * last choice.
Seungwon Jeon577fb132014-04-23 17:08:44 +09001201 *
1202 * WARNING: eMMC rules are NOT the same as SD DDR
1203 */
Chuanxiao.Dong312449e2014-08-15 11:28:07 +08001204 err = -EINVAL;
1205 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1206 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
Seungwon Jeon577fb132014-04-23 17:08:44 +09001207
Chuanxiao.Dong312449e2014-08-15 11:28:07 +08001208 if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1209 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1210
1211 /* make sure vccq is 3.3v after switching disaster */
1212 if (err)
1213 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1214
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301215 if (!err) {
Chuanxiao.Dong312449e2014-08-15 11:28:07 +08001216 mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301217 err = mmc_switch_status(card, false);
1218 }
Seungwon Jeon577fb132014-04-23 17:08:44 +09001219
1220 return err;
1221}
1222
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001223static int mmc_select_hs400(struct mmc_card *card)
1224{
1225 struct mmc_host *host = card->host;
Adrian Hunter51b12f72015-10-28 14:25:41 +02001226 unsigned int max_dtr;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001227 int err = 0;
Adrian Huntercc4f4142015-02-06 14:12:58 +02001228 u8 val;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001229
1230 /*
1231 * HS400 mode requires 8-bit bus width
1232 */
Yi Sun041a3812015-06-10 16:40:06 -07001233 if (card->ext_csd.strobe_support) {
1234 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1235 host->caps & MMC_CAP_8_BIT_DATA))
1236 return 0;
1237
1238 /* For Enhance Strobe flow. For non Enhance Strobe, signal
1239 * voltage will not be set.
1240 */
1241 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1242 err = __mmc_set_signal_voltage(host,
1243 MMC_SIGNAL_VOLTAGE_120);
1244
1245 if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1246 err = __mmc_set_signal_voltage(host,
1247 MMC_SIGNAL_VOLTAGE_180);
1248 if (err)
1249 return err;
1250 } else {
1251 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1252 host->ios.bus_width == MMC_BUS_WIDTH_8))
1253 return 0;
1254 }
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001255
Adrian Hunter51b12f72015-10-28 14:25:41 +02001256 /* Switch card to HS mode */
Wenkai Duadb24d42015-11-26 14:00:44 +02001257 val = EXT_CSD_TIMING_HS;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001258 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Huntercc4f4142015-02-06 14:12:58 +02001259 EXT_CSD_HS_TIMING, val,
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001260 card->ext_csd.generic_cmd6_time,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001261 true, false, true);
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001262 if (err) {
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05001263 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001264 mmc_hostname(host), err);
1265 return err;
1266 }
1267
Adrian Hunter51b12f72015-10-28 14:25:41 +02001268 /* Set host controller to HS timing */
1269 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1270
Ziyuan Xu649c6052016-05-26 13:50:32 +08001271 /* Reduce frequency to HS frequency */
1272 max_dtr = card->ext_csd.hs_max_dtr;
1273 mmc_set_clock(host, max_dtr);
1274
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301275 err = mmc_switch_status(card, false);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001276 if (err)
1277 goto out_err;
Adrian Hunterd2302932015-10-28 14:25:43 +02001278
Yi Sun041a3812015-06-10 16:40:06 -07001279 val = EXT_CSD_DDR_BUS_WIDTH_8;
Venkat Gopalakrishnanffca5772015-06-11 18:27:27 -07001280 if (card->ext_csd.strobe_support) {
1281 err = mmc_select_bus_width(card);
1282 if (IS_ERR_VALUE((unsigned long)err))
1283 return err;
Yi Sun041a3812015-06-10 16:40:06 -07001284 val |= EXT_CSD_BUS_WIDTH_STROBE;
Venkat Gopalakrishnanffca5772015-06-11 18:27:27 -07001285 }
Yi Sun041a3812015-06-10 16:40:06 -07001286
Adrian Hunterd2302932015-10-28 14:25:43 +02001287 /* Switch card to DDR */
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001288 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1289 EXT_CSD_BUS_WIDTH,
Yi Sun041a3812015-06-10 16:40:06 -07001290 val,
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001291 card->ext_csd.generic_cmd6_time);
1292 if (err) {
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05001293 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001294 mmc_hostname(host), err);
1295 return err;
1296 }
1297
Adrian Hunterd2302932015-10-28 14:25:43 +02001298 /* Switch card to HS400 */
Adrian Huntercc4f4142015-02-06 14:12:58 +02001299 val = EXT_CSD_TIMING_HS400 |
1300 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001301 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Huntercc4f4142015-02-06 14:12:58 +02001302 EXT_CSD_HS_TIMING, val,
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001303 card->ext_csd.generic_cmd6_time,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001304 true, false, true);
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001305 if (err) {
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05001306 pr_err("%s: switch to hs400 failed, err:%d\n",
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001307 mmc_hostname(host), err);
1308 return err;
1309 }
1310
Adrian Hunterd2302932015-10-28 14:25:43 +02001311 /* Set host controller to HS400 timing and frequency */
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001312 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1313 mmc_set_bus_speed(card);
1314
Ritesh Harjani06b9b782015-08-25 11:30:48 +05301315 if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) {
Venkat Gopalakrishnanf3acb6b2015-06-10 17:20:00 -07001316 mmc_host_clk_hold(host);
1317 err = host->ops->enhanced_strobe(host);
1318 mmc_host_clk_release(host);
1319 } else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) &&
1320 host->ops->execute_tuning) {
Venkat Gopalakrishnanf6c98452015-01-09 15:11:57 -08001321 mmc_host_clk_hold(host);
1322 err = host->ops->execute_tuning(host,
1323 MMC_SEND_TUNING_BLOCK_HS200);
1324 mmc_host_clk_release(host);
1325
1326 if (err)
1327 pr_warn("%s: tuning execution failed\n",
1328 mmc_hostname(host));
1329 }
1330
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301331 /*
1332 * Sending of CMD13 should be done after the host calibration
1333 * for enhanced_strobe or HS400 mode is completed.
1334 * Otherwise may see CMD13 timeouts or CRC errors.
1335 */
1336 err = mmc_switch_status(card, false);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001337 if (err)
1338 goto out_err;
Adrian Hunterd2302932015-10-28 14:25:43 +02001339
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001340 return 0;
Adrian Hunterd2302932015-10-28 14:25:43 +02001341
1342out_err:
1343 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1344 __func__, err);
1345 return err;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001346}
1347
Adrian Hunter6376f692015-05-07 13:10:20 +03001348int mmc_hs200_to_hs400(struct mmc_card *card)
1349{
1350 return mmc_select_hs400(card);
1351}
1352
Adrian Hunter6376f692015-05-07 13:10:20 +03001353int mmc_hs400_to_hs200(struct mmc_card *card)
1354{
1355 struct mmc_host *host = card->host;
Adrian Hunter6376f692015-05-07 13:10:20 +03001356 unsigned int max_dtr;
1357 int err;
Adrian Huntercc4f4142015-02-06 14:12:58 +02001358 u8 val;
Adrian Hunter6376f692015-05-07 13:10:20 +03001359
Adrian Hunter6376f692015-05-07 13:10:20 +03001360 /* Switch HS400 to HS DDR */
Wenkai Duadb24d42015-11-26 14:00:44 +02001361 val = EXT_CSD_TIMING_HS;
Adrian Hunter6376f692015-05-07 13:10:20 +03001362 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
Adrian Huntercc4f4142015-02-06 14:12:58 +02001363 val, card->ext_csd.generic_cmd6_time,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001364 true, false, true);
Adrian Hunter6376f692015-05-07 13:10:20 +03001365 if (err)
1366 goto out_err;
1367
1368 mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1369
Sayali Lokhandee73fdff2017-07-05 11:21:28 +05301370 /* Reduce frequency to HS */
1371 max_dtr = card->ext_csd.hs_max_dtr;
1372 mmc_set_clock(host, max_dtr);
1373
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301374 err = mmc_switch_status(card, false);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001375 if (err)
1376 goto out_err;
Adrian Hunter6376f692015-05-07 13:10:20 +03001377
1378 /* Switch HS DDR to HS */
1379 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1380 EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001381 true, false, true);
Adrian Hunter6376f692015-05-07 13:10:20 +03001382 if (err)
1383 goto out_err;
1384
1385 mmc_set_timing(host, MMC_TIMING_MMC_HS);
1386
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301387 err = mmc_switch_status(card, false);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001388 if (err)
1389 goto out_err;
Adrian Hunter6376f692015-05-07 13:10:20 +03001390
1391 /* Switch HS to HS200 */
Adrian Huntercc4f4142015-02-06 14:12:58 +02001392 val = EXT_CSD_TIMING_HS200 |
1393 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
Adrian Hunter6376f692015-05-07 13:10:20 +03001394 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001395 val, card->ext_csd.generic_cmd6_time,
1396 true, false, true);
Adrian Hunter6376f692015-05-07 13:10:20 +03001397 if (err)
1398 goto out_err;
1399
1400 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1401
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301402 err = mmc_switch_status(card, false);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001403 if (err)
1404 goto out_err;
Adrian Hunter6376f692015-05-07 13:10:20 +03001405
1406 mmc_set_bus_speed(card);
1407
1408 return 0;
1409
1410out_err:
1411 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1412 __func__, err);
1413 return err;
1414}
1415
Shawn Lin81ac2af2016-05-26 09:56:22 +08001416static int mmc_select_hs400es(struct mmc_card *card)
1417{
1418 struct mmc_host *host = card->host;
Haibo Chen0a205d82017-08-08 18:54:01 +08001419 int err = -EINVAL;
Shawn Lin81ac2af2016-05-26 09:56:22 +08001420 u8 val;
1421
1422 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
1423 err = -ENOTSUPP;
1424 goto out_err;
1425 }
1426
Shawn Lin1720d352016-09-30 14:18:58 +08001427 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
1428 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1429
1430 if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
1431 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1432
1433 /* If fails try again during next card power cycle */
1434 if (err)
1435 goto out_err;
1436
Shawn Lin81ac2af2016-05-26 09:56:22 +08001437 err = mmc_select_bus_width(card);
1438 if (err < 0)
1439 goto out_err;
1440
1441 /* Switch card to HS mode */
1442 err = mmc_select_hs(card);
Jungseung Lee67d35962016-08-24 19:34:09 +09001443 if (err)
Shawn Lin81ac2af2016-05-26 09:56:22 +08001444 goto out_err;
Shawn Lin81ac2af2016-05-26 09:56:22 +08001445
Shawn Lin4f255802016-09-30 14:18:59 +08001446 mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1447
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301448 err = mmc_switch_status(card, false);
Shawn Lin81ac2af2016-05-26 09:56:22 +08001449 if (err)
1450 goto out_err;
1451
1452 /* Switch card to DDR with strobe bit */
1453 val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1454 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1455 EXT_CSD_BUS_WIDTH,
1456 val,
1457 card->ext_csd.generic_cmd6_time);
1458 if (err) {
1459 pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
1460 mmc_hostname(host), err);
1461 goto out_err;
1462 }
1463
1464 /* Switch card to HS400 */
1465 val = EXT_CSD_TIMING_HS400 |
1466 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1467 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1468 EXT_CSD_HS_TIMING, val,
1469 card->ext_csd.generic_cmd6_time,
1470 true, false, true);
1471 if (err) {
1472 pr_err("%s: switch to hs400es failed, err:%d\n",
1473 mmc_hostname(host), err);
1474 goto out_err;
1475 }
1476
1477 /* Set host controller to HS400 timing and frequency */
1478 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1479
1480 /* Controller enable enhanced strobe function */
1481 host->ios.enhanced_strobe = true;
1482 if (host->ops->hs400_enhanced_strobe)
1483 host->ops->hs400_enhanced_strobe(host, &host->ios);
1484
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301485 err = mmc_switch_status(card, false);
Shawn Lin81ac2af2016-05-26 09:56:22 +08001486 if (err)
1487 goto out_err;
1488
1489 return 0;
1490
1491out_err:
1492 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1493 __func__, err);
1494 return err;
1495}
1496
Adrian Huntercc4f4142015-02-06 14:12:58 +02001497static void mmc_select_driver_type(struct mmc_card *card)
1498{
1499 int card_drv_type, drive_strength, drv_type;
1500
1501 card_drv_type = card->ext_csd.raw_driver_strength |
1502 mmc_driver_type_mask(0);
1503
1504 drive_strength = mmc_select_drive_strength(card,
1505 card->ext_csd.hs200_max_dtr,
1506 card_drv_type, &drv_type);
1507
1508 card->drive_strength = drive_strength;
1509
1510 if (drv_type)
1511 mmc_set_driver_type(card->host, drv_type);
1512}
1513
Seungwon Jeon577fb132014-04-23 17:08:44 +09001514/*
1515 * For device supporting HS200 mode, the following sequence
1516 * should be done before executing the tuning process.
1517 * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1518 * 2. switch to HS200 mode
1519 * 3. set the clock to > 52Mhz and <=200MHz
1520 */
1521static int mmc_select_hs200(struct mmc_card *card)
1522{
1523 struct mmc_host *host = card->host;
Dong Aishenge51534c2016-04-21 00:51:30 +08001524 unsigned int old_timing, old_signal_voltage;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001525 int err = -EINVAL;
Adrian Huntercc4f4142015-02-06 14:12:58 +02001526 u8 val;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001527
Dong Aishenge51534c2016-04-21 00:51:30 +08001528 old_signal_voltage = host->ios.signal_voltage;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001529 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1530 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1531
1532 if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1533 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1534
1535 /* If fails try again during next card power cycle */
1536 if (err)
Dong Aishenge51534c2016-04-21 00:51:30 +08001537 return err;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001538
Adrian Huntercc4f4142015-02-06 14:12:58 +02001539 mmc_select_driver_type(card);
1540
Seungwon Jeon577fb132014-04-23 17:08:44 +09001541 /*
1542 * Set the bus width(4 or 8) with host's support and
1543 * switch to HS200 mode if bus width is set successfully.
1544 */
1545 err = mmc_select_bus_width(card);
Peter Griffin8b7be8f2016-06-08 11:27:52 +01001546 if (err > 0) {
Adrian Huntercc4f4142015-02-06 14:12:58 +02001547 val = EXT_CSD_TIMING_HS200 |
1548 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
Ulf Hansson4509f84772014-01-08 16:09:33 +01001549 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Adrian Huntercc4f4142015-02-06 14:12:58 +02001550 EXT_CSD_HS_TIMING, val,
Seungwon Jeon577fb132014-04-23 17:08:44 +09001551 card->ext_csd.generic_cmd6_time,
Chaotian Jing08573ea2016-05-19 16:47:41 +08001552 true, false, true);
Adrian Hunter1815e612015-10-28 14:25:40 +02001553 if (err)
1554 goto err;
1555 old_timing = host->ios.timing;
1556 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001557
Ritesh Harjania0b42c02016-12-01 10:12:22 +05301558 /*
1559 * Since after switching to hs200, crc errors might
1560 * occur for commands send before tuning.
1561 * So ignore crc error for cmd13.
1562 */
1563 err = mmc_switch_status(card, true);
Chaotian Jing08573ea2016-05-19 16:47:41 +08001564 /*
1565 * mmc_select_timing() assumes timing has not changed if
1566 * it is a switch error.
1567 */
1568 if (err == -EBADMSG)
1569 mmc_set_timing(host, old_timing);
Seungwon Jeon577fb132014-04-23 17:08:44 +09001570 }
Girish K Sa4924c72012-01-11 14:04:52 -05001571err:
Dong Aishenge51534c2016-04-21 00:51:30 +08001572 if (err) {
1573 /* fall back to the old signal voltage, if fails report error */
1574 if (__mmc_set_signal_voltage(host, old_signal_voltage))
1575 err = -EIO;
1576
Adrian Hunter1815e612015-10-28 14:25:40 +02001577 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1578 __func__, err);
Dong Aishenge51534c2016-04-21 00:51:30 +08001579 }
Girish K Sa4924c72012-01-11 14:04:52 -05001580 return err;
1581}
1582
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02001583static int mmc_reboot_notify(struct notifier_block *notify_block,
1584 unsigned long event, void *unused)
1585{
1586 struct mmc_card *card = container_of(
1587 notify_block, struct mmc_card, reboot_notify);
1588
Krishna Kondaa7ea2dc2015-06-29 19:20:05 -07001589 card->pon_type = (event != SYS_RESTART) ? MMC_LONG_PON : MMC_SHRT_PON;
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02001590
1591 return NOTIFY_OK;
1592}
1593
Girish K Sa4924c72012-01-11 14:04:52 -05001594/*
Shawn Lin81ac2af2016-05-26 09:56:22 +08001595 * Activate High Speed, HS200 or HS400ES mode if supported.
Seungwon Jeon577fb132014-04-23 17:08:44 +09001596 */
1597static int mmc_select_timing(struct mmc_card *card)
1598{
1599 int err = 0;
1600
Ulf Hansson148bcab2014-10-20 11:33:53 +02001601 if (!mmc_can_ext_csd(card))
Seungwon Jeon577fb132014-04-23 17:08:44 +09001602 goto bus_speed;
1603
Yi Sun041a3812015-06-10 16:40:06 -07001604 /* For Enhance Strobe HS400 flow */
1605 if (card->ext_csd.strobe_support &&
1606 card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1607 card->host->caps & MMC_CAP_8_BIT_DATA)
Shawn Lin81ac2af2016-05-26 09:56:22 +08001608 err = mmc_select_hs400es(card);
1609 else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
Seungwon Jeon577fb132014-04-23 17:08:44 +09001610 err = mmc_select_hs200(card);
1611 else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1612 err = mmc_select_hs(card);
1613
1614 if (err && err != -EBADMSG)
1615 return err;
1616
Seungwon Jeon577fb132014-04-23 17:08:44 +09001617bus_speed:
1618 /*
1619 * Set the bus speed to the selected bus timing.
1620 * If timing is not selected, backward compatible is the default.
1621 */
1622 mmc_set_bus_speed(card);
Dong Aisheng0400ed02016-04-21 00:51:31 +08001623 return 0;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001624}
1625
1626/*
1627 * Execute tuning sequence to seek the proper bus operating
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001628 * conditions for HS200 and HS400, which sends CMD21 to the device.
Seungwon Jeon577fb132014-04-23 17:08:44 +09001629 */
1630static int mmc_hs200_tuning(struct mmc_card *card)
1631{
1632 struct mmc_host *host = card->host;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001633
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001634 /*
1635 * Timing should be adjusted to the HS400 target
1636 * operation frequency for tuning process
1637 */
1638 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1639 host->ios.bus_width == MMC_BUS_WIDTH_8)
Venkat Gopalakrishnane73eb3f2015-01-09 15:09:11 -08001640 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09001641
Adrian Hunter63e415c2014-12-05 19:40:59 +02001642 return mmc_execute_tuning(card);
Seungwon Jeon577fb132014-04-23 17:08:44 +09001643}
1644
Asutosh Das4f458922015-05-21 13:43:24 +05301645static int mmc_select_cmdq(struct mmc_card *card)
1646{
1647 struct mmc_host *host = card->host;
1648 int ret = 0;
1649
1650 if (!host->cmdq_ops) {
1651 pr_err("%s: host controller doesn't support CMDQ\n",
1652 mmc_hostname(host));
1653 return 0;
1654 }
1655
1656 ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
1657 if (ret)
1658 goto out;
1659
1660 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 1,
1661 card->ext_csd.generic_cmd6_time);
1662 if (ret)
1663 goto out;
1664
1665 mmc_card_set_cmdq(card);
Talel Shenharf89b1182015-06-23 13:15:54 +03001666 mmc_host_clk_hold(card->host);
Asutosh Das4f458922015-05-21 13:43:24 +05301667 ret = host->cmdq_ops->enable(card->host);
1668 if (ret) {
Talel Shenharf89b1182015-06-23 13:15:54 +03001669 mmc_host_clk_release(card->host);
Asutosh Das4f458922015-05-21 13:43:24 +05301670 pr_err("%s: failed (%d) enabling CMDQ on host\n",
1671 mmc_hostname(host), ret);
1672 mmc_card_clr_cmdq(card);
1673 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 0,
1674 card->ext_csd.generic_cmd6_time);
Subhash Jadavanie88f5e22015-10-20 18:01:53 -07001675 goto out;
Asutosh Das4f458922015-05-21 13:43:24 +05301676 }
1677
Talel Shenharf89b1182015-06-23 13:15:54 +03001678 mmc_host_clk_release(card->host);
Maya Erezbe076d42015-09-29 17:34:03 +03001679 pr_info_once("%s: CMDQ enabled on card\n", mmc_hostname(host));
Asutosh Das4f458922015-05-21 13:43:24 +05301680out:
1681 return ret;
1682}
1683
Asutosh Das0fcc35f2015-12-09 10:48:18 +05301684static int mmc_select_hs_ddr52(struct mmc_host *host)
1685{
1686 int err;
1687
1688 mmc_select_hs(host->card);
Asutosh Das0fcc35f2015-12-09 10:48:18 +05301689 err = mmc_select_bus_width(host->card);
1690 if (err < 0) {
1691 pr_err("%s: %s: select_bus_width failed(%d)\n",
1692 mmc_hostname(host), __func__, err);
1693 return err;
1694 }
1695
1696 err = mmc_select_hs_ddr(host->card);
Asutosh Dasc364cee2016-02-25 17:30:38 +05301697 mmc_set_clock(host, MMC_HIGH_52_MAX_DTR);
Asutosh Das0fcc35f2015-12-09 10:48:18 +05301698
1699 return err;
1700}
1701
Seungwon Jeon577fb132014-04-23 17:08:44 +09001702/*
Talel Shenhara52f84e2015-05-27 14:20:34 +03001703 * Scale down from HS400 to HS in order to allow frequency change.
1704 * This is needed for cards that doesn't support changing frequency in HS400
1705 */
1706static int mmc_scale_low(struct mmc_host *host, unsigned long freq)
1707{
1708 int err = 0;
1709
1710 mmc_set_timing(host, MMC_TIMING_LEGACY);
1711 mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
1712
Asutosh Das0fcc35f2015-12-09 10:48:18 +05301713 if (host->clk_scaling.lower_bus_speed_mode &
1714 MMC_SCALING_LOWER_DDR52_MODE) {
1715 err = mmc_select_hs_ddr52(host);
1716 if (err)
1717 pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
1718 mmc_hostname(host), __func__, err);
1719 else
1720 return err;
1721 }
1722
Talel Shenhara52f84e2015-05-27 14:20:34 +03001723 err = mmc_select_hs(host->card);
1724 if (err) {
Asutosh Das0fcc35f2015-12-09 10:48:18 +05301725 pr_err("%s: %s: scaling low: failed (%d)\n",
1726 mmc_hostname(host), __func__, err);
Talel Shenhara52f84e2015-05-27 14:20:34 +03001727 return err;
1728 }
1729
1730 err = mmc_select_bus_width(host->card);
1731 if (err < 0) {
1732 pr_err("%s: %s: select_bus_width failed(%d)\n",
1733 mmc_hostname(host), __func__, err);
1734 return err;
1735 }
1736
1737 mmc_set_clock(host, freq);
1738
1739 return 0;
1740}
1741
1742/*
1743 * Scale UP from HS to HS200/H400
1744 */
1745static int mmc_scale_high(struct mmc_host *host)
1746{
1747 int err = 0;
1748
Asutosh Dasc364cee2016-02-25 17:30:38 +05301749 if (mmc_card_ddr52(host->card)) {
1750 mmc_set_timing(host, MMC_TIMING_LEGACY);
1751 mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
1752 }
1753
Venkat Gopalakrishnandf07bec2015-06-15 10:12:53 -07001754 if (!host->card->ext_csd.strobe_support) {
1755 if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) {
1756 pr_err("%s: %s: card does not support HS200\n",
1757 mmc_hostname(host), __func__);
1758 WARN_ON(1);
1759 return -EPERM;
1760 }
Talel Shenhara52f84e2015-05-27 14:20:34 +03001761
Venkat Gopalakrishnandf07bec2015-06-15 10:12:53 -07001762 err = mmc_select_hs200(host->card);
1763 if (err) {
1764 pr_err("%s: %s: selecting HS200 failed (%d)\n",
1765 mmc_hostname(host), __func__, err);
1766 return err;
1767 }
Talel Shenhara52f84e2015-05-27 14:20:34 +03001768
Venkat Gopalakrishnandf07bec2015-06-15 10:12:53 -07001769 mmc_set_bus_speed(host->card);
Talel Shenhara52f84e2015-05-27 14:20:34 +03001770
Venkat Gopalakrishnandf07bec2015-06-15 10:12:53 -07001771 err = mmc_hs200_tuning(host->card);
1772 if (err) {
1773 pr_err("%s: %s: hs200 tuning failed (%d)\n",
1774 mmc_hostname(host), __func__, err);
1775 return err;
1776 }
Talel Shenhara52f84e2015-05-27 14:20:34 +03001777
Venkat Gopalakrishnandf07bec2015-06-15 10:12:53 -07001778 if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400)) {
1779 pr_debug("%s: card does not support HS400\n",
1780 mmc_hostname(host));
1781 return 0;
1782 }
Talel Shenhara52f84e2015-05-27 14:20:34 +03001783 }
1784
1785 err = mmc_select_hs400(host->card);
1786 if (err) {
1787 pr_err("%s: %s: select hs400 failed (%d)\n",
1788 mmc_hostname(host), __func__, err);
1789 return err;
1790 }
1791
Venkat Gopalakrishnanf3acb6b2015-06-10 17:20:00 -07001792 return err;
Talel Shenhara52f84e2015-05-27 14:20:34 +03001793}
1794
1795static int mmc_set_clock_bus_speed(struct mmc_card *card, unsigned long freq)
1796{
1797 int err = 0;
1798
1799 if (freq == MMC_HS200_MAX_DTR)
1800 err = mmc_scale_high(card->host);
1801 else
1802 err = mmc_scale_low(card->host, freq);
1803
1804 return err;
1805}
1806
1807static inline unsigned long mmc_ddr_freq_accommodation(unsigned long freq)
1808{
1809 if (freq == MMC_HIGH_DDR_MAX_DTR)
1810 return freq;
1811
1812 return freq/2;
1813}
1814
1815/**
1816 * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
1817 * @host: pointer to mmc host structure
1818 * @freq: pointer to desired frequency to be set
1819 *
1820 * Change the MMC card bus frequency at runtime after the card is
1821 * initialized. Callers are expected to make sure of the card's
1822 * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
1823 *
1824 * If the frequency to change is greater than max. supported by card,
1825 * *freq is changed to max. supported by card. If it is less than min.
1826 * supported by host, *freq is changed to min. supported by host.
1827 * Host is assumed to be calimed while calling this funciton.
1828 */
1829static int mmc_change_bus_speed(struct mmc_host *host, unsigned long *freq)
1830{
1831 int err = 0;
1832 struct mmc_card *card;
1833 unsigned long actual_freq;
1834
1835 card = host->card;
1836
1837 if (!card || !freq) {
1838 err = -EINVAL;
1839 goto out;
1840 }
1841 actual_freq = *freq;
1842
1843 WARN_ON(!host->claimed);
1844
1845 /*
1846 * For scaling up/down HS400 we'll need special handling,
1847 * for other timings we can simply do clock frequency change
1848 */
1849 if (mmc_card_hs400(card) ||
Sahitya Tummalaedac91a2016-04-21 16:04:09 +05301850 (!mmc_card_hs200(host->card) && *freq == MMC_HS200_MAX_DTR)) {
Talel Shenhara52f84e2015-05-27 14:20:34 +03001851 err = mmc_set_clock_bus_speed(card, *freq);
1852 if (err) {
1853 pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
1854 mmc_hostname(host), __func__, err, *freq);
1855 goto out;
1856 }
1857 } else if (mmc_card_hs200(host->card)) {
1858 mmc_set_clock(host, *freq);
1859 err = mmc_hs200_tuning(host->card);
1860 if (err) {
1861 pr_warn("%s: %s: tuning execution failed %d\n",
1862 mmc_hostname(card->host),
1863 __func__, err);
1864 mmc_set_clock(host, host->clk_scaling.curr_freq);
1865 }
1866 } else {
1867 if (mmc_card_ddr52(host->card))
1868 actual_freq = mmc_ddr_freq_accommodation(*freq);
1869 mmc_set_clock(host, actual_freq);
1870 }
1871
1872out:
1873 return err;
1874}
1875
1876/*
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001877 * Handle the detection and initialisation of a card.
1878 *
Deepak Saxena87693922008-06-16 19:20:57 -07001879 * In the case of a resume, "oldcard" will contain the card
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001880 * we're trying to reinitialise.
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001881 */
Pierre Ossman8c75dea2007-05-19 16:14:43 +02001882static int mmc_init_card(struct mmc_host *host, u32 ocr,
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001883 struct mmc_card *oldcard)
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001884{
1885 struct mmc_card *card;
Seungwon Jeon577fb132014-04-23 17:08:44 +09001886 int err;
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001887 u32 cid[4];
Philip Rakityb676f032011-02-13 23:13:09 -08001888 u32 rocr;
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001889
1890 BUG_ON(!host);
Pierre Ossmand84075c82007-08-09 13:23:56 +02001891 WARN_ON(!host->claimed);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001892
Stefan Nilsson XK44669032011-09-15 17:50:38 +02001893 /* Set correct bus mode for MMC before attempting init */
1894 if (!mmc_host_is_spi(host))
1895 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1896
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001897 /*
1898 * Since we're changing the OCR value, we seem to
1899 * need to tell some cards to go back to the idle
1900 * state. We wait 1ms to give cards time to
1901 * respond.
Balaji T Kc3805462011-09-08 22:08:39 +05301902 * mmc_go_idle is needed for eMMC that are asleep
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001903 */
Asutosh Das4f458922015-05-21 13:43:24 +05301904reinit:
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001905 mmc_go_idle(host);
1906
1907 /* The extra bit indicates that we support high capacity */
Philip Rakityb676f032011-02-13 23:13:09 -08001908 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
Sahitya Tummalaace06842014-12-09 23:23:25 +02001909 if (err) {
1910 pr_err("%s: %s: mmc_send_op_cond() fails %d\n",
1911 mmc_hostname(host), __func__, err);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001912 goto err;
Sahitya Tummalaace06842014-12-09 23:23:25 +02001913 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001914
1915 /*
David Brownellaf517152007-08-08 09:11:32 -07001916 * For SPI, enable CRC as appropriate.
1917 */
1918 if (mmc_host_is_spi(host)) {
1919 err = mmc_spi_set_crc(host, use_spi_crc);
Sahitya Tummalaace06842014-12-09 23:23:25 +02001920 if (err) {
1921 pr_err("%s: %s: mmc_spi_set_crc() fails %d\n",
1922 mmc_hostname(host), __func__, err);
David Brownellaf517152007-08-08 09:11:32 -07001923 goto err;
Sahitya Tummalaace06842014-12-09 23:23:25 +02001924 }
David Brownellaf517152007-08-08 09:11:32 -07001925 }
1926
1927 /*
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001928 * Fetch CID from card.
1929 */
David Brownellaf517152007-08-08 09:11:32 -07001930 if (mmc_host_is_spi(host))
1931 err = mmc_send_cid(host, cid);
1932 else
1933 err = mmc_all_send_cid(host, cid);
Sahitya Tummalaace06842014-12-09 23:23:25 +02001934 if (err) {
1935 pr_err("%s: %s: mmc_send_cid() fails %d\n",
1936 mmc_hostname(host), __func__, err);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001937 goto err;
Sahitya Tummalaace06842014-12-09 23:23:25 +02001938 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001939
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001940 if (oldcard) {
Pierre Ossmanadf66a02007-07-22 23:08:30 +02001941 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1942 err = -ENOENT;
Sahitya Tummalaace06842014-12-09 23:23:25 +02001943 pr_err("%s: %s: CID memcmp failed %d\n",
1944 mmc_hostname(host), __func__, err);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001945 goto err;
Pierre Ossmanadf66a02007-07-22 23:08:30 +02001946 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001947
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001948 card = oldcard;
1949 } else {
1950 /*
1951 * Allocate card structure.
1952 */
Pierre Ossman51ec92e2008-03-21 23:54:50 +01001953 card = mmc_alloc_card(host, &mmc_type);
Pierre Ossmanadf66a02007-07-22 23:08:30 +02001954 if (IS_ERR(card)) {
1955 err = PTR_ERR(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02001956 pr_err("%s: %s: no memory to allocate for card %d\n",
1957 mmc_hostname(host), __func__, err);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001958 goto err;
Pierre Ossmanadf66a02007-07-22 23:08:30 +02001959 }
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001960
Ulf Hansson69041152013-09-13 11:31:33 +02001961 card->ocr = ocr;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001962 card->type = MMC_TYPE_MMC;
1963 card->rca = 1;
1964 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
Krishna Konda96e6b112013-10-28 15:25:03 -07001965 host->card = card;
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02001966 card->reboot_notify.notifier_call = mmc_reboot_notify;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001967 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001968
1969 /*
Doug Andersoneac86322014-12-02 15:42:45 -08001970 * Call the optional HC's init_card function to handle quirks.
1971 */
1972 if (host->ops->init_card)
1973 host->ops->init_card(host, card);
1974
1975 /*
David Brownellaf517152007-08-08 09:11:32 -07001976 * For native busses: set card RCA and quit open drain mode.
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001977 */
David Brownellaf517152007-08-08 09:11:32 -07001978 if (!mmc_host_is_spi(host)) {
1979 err = mmc_set_relative_addr(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02001980 if (err) {
1981 pr_err("%s: %s: mmc_set_relative_addr() fails %d\n",
1982 mmc_hostname(host), __func__, err);
David Brownellaf517152007-08-08 09:11:32 -07001983 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02001984 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001985
David Brownellaf517152007-08-08 09:11:32 -07001986 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1987 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001988
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001989 if (!oldcard) {
1990 /*
1991 * Fetch CSD from card.
1992 */
1993 err = mmc_send_csd(card, card->raw_csd);
Sahitya Tummalaace06842014-12-09 23:23:25 +02001994 if (err) {
1995 pr_err("%s: %s: mmc_send_csd() fails %d\n",
1996 mmc_hostname(host), __func__, err);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02001997 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02001998 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01001999
Pierre Ossmanbd766312007-05-01 16:11:57 +02002000 err = mmc_decode_csd(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002001 if (err) {
2002 pr_err("%s: %s: mmc_decode_csd() fails %d\n",
2003 mmc_hostname(host), __func__, err);
Pierre Ossmanbd766312007-05-01 16:11:57 +02002004 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002005 }
Pierre Ossmanbd766312007-05-01 16:11:57 +02002006 err = mmc_decode_cid(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002007 if (err) {
2008 pr_err("%s: %s: mmc_decode_cid() fails %d\n",
2009 mmc_hostname(host), __func__, err);
Pierre Ossmanbd766312007-05-01 16:11:57 +02002010 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002011 }
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002012 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002013
2014 /*
Sascha Hauer3d705d12014-08-19 10:45:51 +02002015 * handling only for cards supporting DSR and hosts requesting
2016 * DSR configuration
2017 */
2018 if (card->csd.dsr_imp && host->dsr_req)
2019 mmc_set_dsr(host);
2020
2021 /*
Pierre Ossman89a73cf2007-05-01 15:08:30 +02002022 * Select card, as all following commands rely on that.
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002023 */
David Brownellaf517152007-08-08 09:11:32 -07002024 if (!mmc_host_is_spi(host)) {
2025 err = mmc_select_card(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002026 if (err) {
2027 pr_err("%s: %s: mmc_select_card() fails %d\n",
2028 mmc_hostname(host), __func__, err);
David Brownellaf517152007-08-08 09:11:32 -07002029 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002030 }
David Brownellaf517152007-08-08 09:11:32 -07002031 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002032
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002033 if (!oldcard) {
Ulf Hansson076ec382014-10-20 13:37:24 +02002034 /* Read extended CSD. */
2035 err = mmc_read_ext_csd(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002036 if (err) {
2037 pr_err("%s: %s: mmc_read_ext_csd() fails %d\n",
2038 mmc_hostname(host), __func__, err);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002039 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002040 }
Philip Rakityb676f032011-02-13 23:13:09 -08002041
Masahiro Yamada87e88652016-04-15 20:16:12 +09002042 /*
2043 * If doing byte addressing, check if required to do sector
Philip Rakityb676f032011-02-13 23:13:09 -08002044 * addressing. Handle the case of <2GB cards needing sector
2045 * addressing. See section 8.1 JEDEC Standard JED84-A441;
2046 * ocr register has bit 30 set for sector addressing.
2047 */
Masahiro Yamada87e88652016-04-15 20:16:12 +09002048 if (rocr & BIT(30))
Philip Rakityb676f032011-02-13 23:13:09 -08002049 mmc_card_set_blockaddr(card);
2050
Adrian Hunterdfe86cb2010-08-11 14:17:46 -07002051 /* Erase size depends on CSD and Extended CSD */
2052 mmc_set_erase_size(card);
Sujith Reddy Thumma8d0a4482010-12-20 16:27:20 +05302053
2054 if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
2055 mmc_card_set_blockaddr(card);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002056 }
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002057
2058 /*
Chuanxiao Dong709de992011-01-22 04:09:41 +08002059 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002060 * bit. This bit will be lost every time after a reset or power off.
Chuanxiao Dong709de992011-01-22 04:09:41 +08002061 */
Grégory Soutadé69803d42014-09-15 17:47:09 +02002062 if (card->ext_csd.partition_setting_completed ||
Adrian Hunter83bb24a2012-03-13 05:19:13 -04002063 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
Chuanxiao Dong709de992011-01-22 04:09:41 +08002064 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
Seungwon Jeonb23cf0b2011-09-23 14:15:29 +09002065 EXT_CSD_ERASE_GROUP_DEF, 1,
2066 card->ext_csd.generic_cmd6_time);
Chuanxiao Dong709de992011-01-22 04:09:41 +08002067
Sahitya Tummalaace06842014-12-09 23:23:25 +02002068 if (err && err != -EBADMSG) {
2069 pr_err("%s: %s: mmc_switch() for ERASE_GRP_DEF fails %d\n",
2070 mmc_hostname(host), __func__, err);
Chuanxiao Dong709de992011-01-22 04:09:41 +08002071 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002072 }
Chuanxiao Dong709de992011-01-22 04:09:41 +08002073
2074 if (err) {
2075 err = 0;
2076 /*
2077 * Just disable enhanced area off & sz
2078 * will try to enable ERASE_GROUP_DEF
2079 * during next time reinit
2080 */
2081 card->ext_csd.enhanced_area_offset = -EINVAL;
2082 card->ext_csd.enhanced_area_size = -EINVAL;
2083 } else {
2084 card->ext_csd.erase_group_def = 1;
2085 /*
2086 * enable ERASE_GRP_DEF successfully.
2087 * This will affect the erase size, so
2088 * here need to reset erase size
2089 */
2090 mmc_set_erase_size(card);
2091 }
2092 }
2093
2094 /*
Philip Rakity41e2a482011-03-19 14:10:33 -04002095 * Ensure eMMC user default partition is enabled
2096 */
Andrei Warkentin371a6892011-04-11 18:10:25 -05002097 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
2098 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
2099 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
2100 card->ext_csd.part_config,
2101 card->ext_csd.part_time);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002102 if (err && err != -EBADMSG) {
2103 pr_err("%s: %s: mmc_switch() for PART_CONFIG fails %d\n",
2104 mmc_hostname(host), __func__, err);
Andrei Warkentin371a6892011-04-11 18:10:25 -05002105 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002106 }
Oluwafemi Adeyemif952a472013-01-03 11:32:53 -08002107 card->part_curr = card->ext_csd.part_config &
2108 EXT_CSD_PART_CONFIG_ACC_MASK;
Philip Rakity41e2a482011-03-19 14:10:33 -04002109 }
2110
2111 /*
Ulf Hansson432356792013-06-10 17:03:45 +02002112 * Enable power_off_notification byte in the ext_csd register
Girish K Sbec87262011-10-13 12:04:16 +05302113 */
Ulf Hansson432356792013-06-10 17:03:45 +02002114 if (card->ext_csd.rev >= 6) {
Girish K Sbec87262011-10-13 12:04:16 +05302115 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2116 EXT_CSD_POWER_OFF_NOTIFICATION,
2117 EXT_CSD_POWER_ON,
2118 card->ext_csd.generic_cmd6_time);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002119 if (err && err != -EBADMSG) {
2120 pr_err("%s: %s: mmc_switch() for POWER_ON PON fails %d\n",
2121 mmc_hostname(host), __func__, err);
Girish K Sbec87262011-10-13 12:04:16 +05302122 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002123 }
Girish K Sbec87262011-10-13 12:04:16 +05302124
Girish K S96a85d52011-11-04 16:22:47 +05302125 /*
2126 * The err can be -EBADMSG or 0,
2127 * so check for success and update the flag
2128 */
2129 if (!err)
Ulf Hanssone6c08582012-10-05 12:45:39 -04002130 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
Girish K S96a85d52011-11-04 16:22:47 +05302131 }
Girish K Sbec87262011-10-13 12:04:16 +05302132
2133 /*
Seungwon Jeon577fb132014-04-23 17:08:44 +09002134 * Select timing interface
Pierre Ossman89a73cf2007-05-01 15:08:30 +02002135 */
Seungwon Jeon577fb132014-04-23 17:08:44 +09002136 err = mmc_select_timing(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002137 if (err) {
2138 pr_err("%s: %s: mmc_select_timing() fails %d\n",
2139 mmc_hostname(host), __func__, err);
Seungwon Jeon577fb132014-04-23 17:08:44 +09002140 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002141 }
Girish K Sa4924c72012-01-11 14:04:52 -05002142
Girish K Sa4924c72012-01-11 14:04:52 -05002143 if (mmc_card_hs200(card)) {
Seungwon Jeon577fb132014-04-23 17:08:44 +09002144 err = mmc_hs200_tuning(card);
2145 if (err)
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05002146 goto free_card;
Seungwon Jeon0a5b6432014-04-23 17:14:58 +09002147
2148 err = mmc_select_hs400(card);
2149 if (err)
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05002150 goto free_card;
Guenter Roeck943281e2017-03-01 14:11:47 -08002151 } else if (!mmc_card_hs400es(card)) {
Seungwon Jeon577fb132014-04-23 17:08:44 +09002152 /* Select the desired bus width optionally */
2153 err = mmc_select_bus_width(card);
Anssi Hannulaef746a32017-02-13 13:46:41 +02002154 if (err > 0 && mmc_card_hs(card)) {
Seungwon Jeon577fb132014-04-23 17:08:44 +09002155 err = mmc_select_hs_ddr(card);
2156 if (err)
Andrew Gabbasov4b75bff2014-10-01 07:14:11 -05002157 goto free_card;
Adrian Hunteref0b27d2009-09-22 16:44:37 -07002158 }
Pierre Ossman89a73cf2007-05-01 15:08:30 +02002159 }
2160
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002161 card->clk_scaling_lowest = host->f_min;
Veerabhadrarao Badiganti6720af32017-09-12 15:02:01 +05302162 if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400) ||
2163 (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002164 card->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
Veerabhadrarao Badiganti6720af32017-09-12 15:02:01 +05302165 else if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) ||
2166 (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002167 card->clk_scaling_highest = card->ext_csd.hs_max_dtr;
2168 else
2169 card->clk_scaling_highest = card->csd.max_dtr;
2170
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002171 /*
Seungwon Jeon23850492014-04-23 17:08:05 +09002172 * Choose the power class with selected bus interface
2173 */
2174 mmc_select_powerclass(card);
2175
2176 /*
Subhash Jadavani52d09742012-03-06 17:59:12 +05302177 * Enable HPI feature (if supported)
2178 */
2179 if (card->ext_csd.hpi) {
2180 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2181 EXT_CSD_HPI_MGMT, 1,
2182 card->ext_csd.generic_cmd6_time);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002183 if (err && err != -EBADMSG) {
2184 pr_err("%s: %s: mmc_switch() for HPI_MGMT fails %d\n",
2185 mmc_hostname(host), __func__, err);
Subhash Jadavani52d09742012-03-06 17:59:12 +05302186 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002187 }
Subhash Jadavani52d09742012-03-06 17:59:12 +05302188 if (err) {
Joe Perches66061102014-09-12 14:56:56 -07002189 pr_warn("%s: Enabling HPI failed\n",
2190 mmc_hostname(card->host));
Subhash Jadavani52d09742012-03-06 17:59:12 +05302191 err = 0;
2192 } else
2193 card->ext_csd.hpi_en = 1;
2194 }
2195
2196 /*
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002197 * If cache size is higher than 0, this indicates
2198 * the existence of cache and it can be turned on.
Talel Shenhar981300a2015-02-05 14:44:15 +02002199 * If HPI is not supported then cache shouldn't be enabled.
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002200 */
Pratibhasagar V53202262016-06-09 18:09:31 -04002201 if (!mmc_card_broken_hpi(card) &&
Talel Shenharc1d30bc2015-02-04 17:59:23 +02002202 card->ext_csd.cache_size > 0) {
2203 if (card->ext_csd.hpi_en &&
2204 (!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
2205 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2206 EXT_CSD_CACHE_CTRL, 1,
2207 card->ext_csd.generic_cmd6_time);
2208 if (err && err != -EBADMSG) {
2209 pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
Sahitya Tummalaace06842014-12-09 23:23:25 +02002210 mmc_hostname(host), __func__, err);
Talel Shenharc1d30bc2015-02-04 17:59:23 +02002211 goto free_card;
2212 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002213
Talel Shenharc1d30bc2015-02-04 17:59:23 +02002214 /*
2215 * Only if no error, cache is turned on successfully.
2216 */
2217 if (err) {
2218 pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
2219 mmc_hostname(card->host), err);
2220 card->ext_csd.cache_ctrl = 0;
2221 err = 0;
2222 } else {
2223 card->ext_csd.cache_ctrl = 1;
2224 }
Sahitya Tummala61868a42015-05-28 16:54:19 +05302225 /* enable cache barrier if supported by the device */
2226 if (card->ext_csd.cache_ctrl &&
2227 card->ext_csd.barrier_support) {
2228 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2229 EXT_CSD_BARRIER_CTRL, 1,
2230 card->ext_csd.generic_cmd6_time);
2231 if (err && err != -EBADMSG) {
2232 pr_err("%s: %s: mmc_switch() for BARRIER_CTRL fails %d\n",
2233 mmc_hostname(host), __func__,
2234 err);
2235 goto free_card;
2236 }
2237 if (err) {
2238 pr_warn("%s: Barrier is supported but failed to turn on (%d)\n",
2239 mmc_hostname(card->host), err);
2240 card->ext_csd.barrier_en = 0;
2241 err = 0;
2242 } else {
2243 card->ext_csd.barrier_en = 1;
2244 }
2245 }
Seungwon Jeon8bc06782011-12-09 17:47:17 +09002246 } else {
Talel Shenharc1d30bc2015-02-04 17:59:23 +02002247 /*
2248 * mmc standard doesn't say what is the card default
2249 * value for EXT_CSD_CACHE_CTRL.
2250 * Hence, cache may be enabled by default by
2251 * card vendors.
2252 * Thus, it is best to explicitly disable cache in case
2253 * we want to avoid cache.
2254 */
2255 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2256 EXT_CSD_CACHE_CTRL, 0,
2257 card->ext_csd.generic_cmd6_time);
2258 if (err) {
2259 pr_err("%s: %s: fail on CACHE_CTRL OFF %d\n",
2260 mmc_hostname(host), __func__, err);
2261 goto free_card;
2262 }
Seungwon Jeon8bc06782011-12-09 17:47:17 +09002263 }
Seungwon Jeon881d1c22011-10-14 14:03:21 +09002264 }
Seungwon Jeonabd9ac12013-02-06 17:01:43 +09002265 /*
2266 * The mandatory minimum values are defined for packed command.
2267 * read: 5, write: 3
2268 */
2269 if (card->ext_csd.max_packed_writes >= 3 &&
2270 card->ext_csd.max_packed_reads >= 5 &&
2271 host->caps2 & MMC_CAP2_PACKED_CMD) {
2272 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2273 EXT_CSD_EXP_EVENTS_CTRL,
2274 EXT_CSD_PACKED_EVENT_EN,
2275 card->ext_csd.generic_cmd6_time);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002276 if (err && err != -EBADMSG) {
2277 pr_err("%s: %s: mmc_switch() for EXP_EVENTS_CTRL fails %d\n",
2278 mmc_hostname(host), __func__, err);
Seungwon Jeonabd9ac12013-02-06 17:01:43 +09002279 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002280 }
Seungwon Jeonabd9ac12013-02-06 17:01:43 +09002281 if (err) {
2282 pr_warn("%s: Enabling packed event failed\n",
2283 mmc_hostname(card->host));
2284 card->ext_csd.packed_event_en = 0;
2285 err = 0;
2286 } else {
2287 card->ext_csd.packed_event_en = 1;
2288 }
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002289
2290 }
2291
2292 if (!oldcard) {
2293 if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
2294 (card->ext_csd.max_packed_writes > 0)) {
2295 /*
2296 * We would like to keep the statistics in an index
2297 * that equals the num of packed requests
2298 * (1 to max_packed_writes)
2299 */
2300 card->wr_pack_stats.packing_events = kzalloc(
2301 (card->ext_csd.max_packed_writes + 1) *
2302 sizeof(*card->wr_pack_stats.packing_events),
2303 GFP_KERNEL);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002304 if (!card->wr_pack_stats.packing_events) {
2305 pr_err("%s: %s: no memory for packing events\n",
2306 mmc_hostname(host), __func__);
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002307 goto free_card;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002308 }
Tatyana Brokhman08238ce2012-10-07 10:33:13 +02002309 }
Seungwon Jeonabd9ac12013-02-06 17:01:43 +09002310 }
2311
Dov Levenglicka0296392015-06-24 19:51:58 +03002312 /*
2313 * Start auto bkops, if supported.
2314 *
2315 * Note: This leaves the possibility of having both manual and
2316 * auto bkops running in parallel. The runtime implementation
2317 * will allow this, but ignore bkops exceptions on the premises
2318 * that auto bkops will eventually kick in and the device will
2319 * handle bkops without START_BKOPS from the host.
2320 */
2321 if (mmc_card_support_auto_bkops(card)) {
2322 /*
2323 * Ignore the return value of setting auto bkops.
2324 * If it failed, will run in backward compatible mode.
2325 */
2326 (void)mmc_set_auto_bkops(card, true);
2327 }
2328
Dov Levenglickbbc49f22015-06-30 14:19:16 +03002329 if (card->ext_csd.cmdq_support && (card->host->caps2 &
2330 MMC_CAP2_CMD_QUEUE)) {
2331 err = mmc_select_cmdq(card);
2332 if (err) {
2333 pr_err("%s: selecting CMDQ mode: failed: %d\n",
2334 mmc_hostname(card->host), err);
2335 card->ext_csd.cmdq_support = 0;
2336 oldcard = card;
2337 goto reinit;
2338 }
2339 }
2340
Pierre Ossman17b04292007-07-22 22:18:46 +02002341 return 0;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002342
2343free_card:
Sahitya Tummalaace06842014-12-09 23:23:25 +02002344 if (!oldcard) {
2345 host->card = NULL;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002346 mmc_remove_card(card);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002347 }
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002348err:
Pierre Ossmanadf66a02007-07-22 23:08:30 +02002349 return err;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002350}
2351
Maya Erezbe076d42015-09-29 17:34:03 +03002352static int mmc_can_sleepawake(struct mmc_host *host)
Ulf Hansson07a68212013-04-19 15:12:11 +02002353{
Maya Erezbe076d42015-09-29 17:34:03 +03002354 return host && (host->caps2 & MMC_CAP2_SLEEP_AWAKE) && host->card &&
2355 (host->card->ext_csd.rev >= 3);
Ulf Hansson07a68212013-04-19 15:12:11 +02002356}
2357
Maya Erezbe076d42015-09-29 17:34:03 +03002358static int mmc_sleepawake(struct mmc_host *host, bool sleep)
Ulf Hansson07a68212013-04-19 15:12:11 +02002359{
2360 struct mmc_command cmd = {0};
2361 struct mmc_card *card = host->card;
Sahitya Tummala47c53d92014-04-18 13:00:20 +05302362 unsigned int timeout_ms;
Ulf Hansson07a68212013-04-19 15:12:11 +02002363 int err;
2364
Sahitya Tummala47c53d92014-04-18 13:00:20 +05302365 if (!card) {
2366 pr_err("%s: %s: invalid card\n", mmc_hostname(host), __func__);
2367 return -EINVAL;
2368 }
2369
2370 timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
2371 if (card->ext_csd.rev >= 3 &&
2372 card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) {
2373 u8 part_config = card->ext_csd.part_config;
2374
2375 /*
2376 * If the last access before suspend is RPMB access, then
2377 * switch to default part config so that sleep command CMD5
2378 * and deselect CMD7 can be sent to the card.
2379 */
2380 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
2381 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2382 EXT_CSD_PART_CONFIG,
2383 part_config,
2384 card->ext_csd.part_time);
2385 if (err) {
2386 pr_err("%s: %s: failed to switch to default part config %x\n",
2387 mmc_hostname(host), __func__, part_config);
2388 return err;
2389 }
2390 card->ext_csd.part_config = part_config;
2391 card->part_curr = card->ext_csd.part_config &
2392 EXT_CSD_PART_CONFIG_ACC_MASK;
2393 }
2394
Adrian Hunter436f8da2015-05-07 13:10:18 +03002395 /* Re-tuning can't be done once the card is deselected */
2396 mmc_retune_hold(host);
2397
Maya Erezbe076d42015-09-29 17:34:03 +03002398 if (sleep) {
2399 err = mmc_deselect_cards(host);
2400 if (err)
2401 goto out_release;
2402 }
Ulf Hansson07a68212013-04-19 15:12:11 +02002403
2404 cmd.opcode = MMC_SLEEP_AWAKE;
2405 cmd.arg = card->rca << 16;
Maya Erezbe076d42015-09-29 17:34:03 +03002406 if (sleep)
2407 cmd.arg |= 1 << 15;
Ulf Hansson07a68212013-04-19 15:12:11 +02002408
Ulf Hanssoncb962e02014-01-14 23:17:36 +01002409 /*
2410 * If the max_busy_timeout of the host is specified, validate it against
2411 * the sleep cmd timeout. A failure means we need to prevent the host
2412 * from doing hw busy detection, which is done by converting to a R1
2413 * response instead of a R1B.
2414 */
2415 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
2416 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2417 } else {
2418 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
2419 cmd.busy_timeout = timeout_ms;
2420 }
2421
Ulf Hansson07a68212013-04-19 15:12:11 +02002422 err = mmc_wait_for_cmd(host, &cmd, 0);
2423 if (err)
Adrian Hunter436f8da2015-05-07 13:10:18 +03002424 goto out_release;
Ulf Hansson07a68212013-04-19 15:12:11 +02002425
2426 /*
2427 * If the host does not wait while the card signals busy, then we will
2428 * will have to wait the sleep/awake timeout. Note, we cannot use the
2429 * SEND_STATUS command to poll the status because that command (and most
2430 * others) is invalid while the card sleeps.
2431 */
Ulf Hanssoncb962e02014-01-14 23:17:36 +01002432 if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
2433 mmc_delay(timeout_ms);
Ulf Hansson07a68212013-04-19 15:12:11 +02002434
Maya Erezbe076d42015-09-29 17:34:03 +03002435 if (!sleep)
2436 err = mmc_select_card(card);
2437
Adrian Hunter436f8da2015-05-07 13:10:18 +03002438out_release:
2439 mmc_retune_release(host);
Ulf Hansson07a68212013-04-19 15:12:11 +02002440 return err;
2441}
2442
Ulf Hanssone6c08582012-10-05 12:45:39 -04002443static int mmc_can_poweroff_notify(const struct mmc_card *card)
2444{
2445 return card &&
2446 mmc_card_mmc(card) &&
2447 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
2448}
2449
2450static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
2451{
2452 unsigned int timeout = card->ext_csd.generic_cmd6_time;
2453 int err;
2454
2455 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
2456 if (notify_type == EXT_CSD_POWER_OFF_LONG)
2457 timeout = card->ext_csd.power_off_longtime;
2458
Ulf Hansson878e2002013-09-09 11:57:57 +02002459 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2460 EXT_CSD_POWER_OFF_NOTIFICATION,
Ulf Hansson4509f84772014-01-08 16:09:33 +01002461 notify_type, timeout, true, false, false);
Ulf Hanssone6c08582012-10-05 12:45:39 -04002462 if (err)
2463 pr_err("%s: Power Off Notification timed out, %u\n",
2464 mmc_hostname(card->host), timeout);
2465
2466 /* Disable the power off notification after the switch operation. */
2467 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
2468
2469 return err;
2470}
2471
Krishna Kondaa7ea2dc2015-06-29 19:20:05 -07002472int mmc_send_pon(struct mmc_card *card)
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02002473{
2474 int err = 0;
2475 struct mmc_host *host = card->host;
2476
Krishna Kondaa7ea2dc2015-06-29 19:20:05 -07002477 if (!mmc_can_poweroff_notify(card))
2478 goto out;
2479
Sahitya Tummalad51865b2015-11-18 10:37:56 +05302480 mmc_get_card(card);
Krishna Kondaa7ea2dc2015-06-29 19:20:05 -07002481 if (card->pon_type & MMC_LONG_PON)
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02002482 err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
Krishna Kondaa7ea2dc2015-06-29 19:20:05 -07002483 else if (card->pon_type & MMC_SHRT_PON)
2484 err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
2485 if (err)
2486 pr_warn("%s: error %d sending PON type %u",
2487 mmc_hostname(host), err, card->pon_type);
Sahitya Tummalad51865b2015-11-18 10:37:56 +05302488 mmc_put_card(card);
Krishna Kondaa7ea2dc2015-06-29 19:20:05 -07002489out:
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02002490 return err;
2491}
2492
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002493/*
2494 * Host is being removed. Free up the current card.
2495 */
2496static void mmc_remove(struct mmc_host *host)
2497{
2498 BUG_ON(!host);
2499 BUG_ON(!host->card);
2500
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02002501 unregister_reboot_notifier(&host->card->reboot_notify);
Pratibhasagar V4e3a8b42014-04-09 12:52:46 +05302502
2503 mmc_exit_clk_scaling(host);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002504 mmc_remove_card(host->card);
Sujith Reddy Thumma8a5e1472011-04-28 18:29:34 +05302505
2506 mmc_claim_host(host);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002507 host->card = NULL;
Sujith Reddy Thumma8a5e1472011-04-28 18:29:34 +05302508 mmc_release_host(host);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002509}
2510
2511/*
Adrian Hunterd3049502011-11-28 16:22:00 +02002512 * Card detection - card is alive.
2513 */
2514static int mmc_alive(struct mmc_host *host)
2515{
2516 return mmc_send_status(host->card, NULL);
2517}
2518
2519/*
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002520 * Card detection callback from host.
2521 */
2522static void mmc_detect(struct mmc_host *host)
2523{
2524 int err;
2525
2526 BUG_ON(!host);
2527 BUG_ON(!host->card);
2528
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002529 mmc_get_card(host->card);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002530
2531 /*
2532 * Just check if our card has been removed.
2533 */
Adrian Hunterd3049502011-11-28 16:22:00 +02002534 err = _mmc_detect_card_removed(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002535
Ulf Hanssone94cfef2013-05-02 14:02:38 +02002536 mmc_put_card(host->card);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01002537
Pierre Ossman17b04292007-07-22 22:18:46 +02002538 if (err) {
Pierre Ossman4101c162007-05-19 13:39:01 +02002539 mmc_remove(host);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002540
2541 mmc_claim_host(host);
2542 mmc_detach_bus(host);
Ulf Hansson7f7e4122011-09-21 14:08:13 -04002543 mmc_power_off(host);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002544 mmc_release_host(host);
2545 }
2546}
2547
Maya Erezbe076d42015-09-29 17:34:03 +03002548static int mmc_cache_card_ext_csd(struct mmc_host *host)
2549{
2550 int err;
2551 u8 *ext_csd;
2552 struct mmc_card *card = host->card;
2553
2554 err = mmc_get_ext_csd(card, &ext_csd);
2555 if (err || !ext_csd) {
2556 pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
2557 mmc_hostname(host), __func__, err);
2558 return err;
2559 }
2560
2561 /* only cache read/write fields that the sw changes */
2562 card->ext_csd.raw_ext_csd_cmdq = ext_csd[EXT_CSD_CMDQ];
2563 card->ext_csd.raw_ext_csd_cache_ctrl = ext_csd[EXT_CSD_CACHE_CTRL];
2564 card->ext_csd.raw_ext_csd_bus_width = ext_csd[EXT_CSD_BUS_WIDTH];
2565 card->ext_csd.raw_ext_csd_hs_timing = ext_csd[EXT_CSD_HS_TIMING];
2566
2567 kfree(ext_csd);
2568
2569 return 0;
2570}
2571
2572static int mmc_test_awake_ext_csd(struct mmc_host *host)
2573{
2574 int err;
2575 u8 *ext_csd;
2576 struct mmc_card *card = host->card;
2577
2578 err = mmc_get_ext_csd(card, &ext_csd);
Pavan Anamulaf2dda062016-03-30 22:07:56 +05302579 if (err || !ext_csd) {
Maya Erezbe076d42015-09-29 17:34:03 +03002580 pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
2581 mmc_hostname(host), __func__, err);
2582 return err;
2583 }
2584
2585 /* only compare read/write fields that the sw changes */
2586 pr_debug("%s: %s: type(cached:current) cmdq(%d:%d) cache_ctrl(%d:%d) bus_width (%d:%d) timing(%d:%d)\n",
2587 mmc_hostname(host), __func__,
2588 card->ext_csd.raw_ext_csd_cmdq,
2589 ext_csd[EXT_CSD_CMDQ],
2590 card->ext_csd.raw_ext_csd_cache_ctrl,
2591 ext_csd[EXT_CSD_CACHE_CTRL],
2592 card->ext_csd.raw_ext_csd_bus_width,
2593 ext_csd[EXT_CSD_BUS_WIDTH],
2594 card->ext_csd.raw_ext_csd_hs_timing,
2595 ext_csd[EXT_CSD_HS_TIMING]);
2596
2597 err = !((card->ext_csd.raw_ext_csd_cmdq ==
2598 ext_csd[EXT_CSD_CMDQ]) &&
2599 (card->ext_csd.raw_ext_csd_cache_ctrl ==
2600 ext_csd[EXT_CSD_CACHE_CTRL]) &&
2601 (card->ext_csd.raw_ext_csd_bus_width ==
2602 ext_csd[EXT_CSD_BUS_WIDTH]) &&
2603 (card->ext_csd.raw_ext_csd_hs_timing ==
2604 ext_csd[EXT_CSD_HS_TIMING]));
2605
2606 kfree(ext_csd);
2607
2608 return err;
2609}
2610
Ulf Hansson03d071f2013-06-10 17:03:43 +02002611static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002612{
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302613 int err = 0, ret;
Balaji T Kc3805462011-09-08 22:08:39 +05302614
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002615 BUG_ON(!host);
2616 BUG_ON(!host->card);
2617
Talel Shenhar999102c2015-09-06 16:38:42 +03002618 err = mmc_suspend_clk_scaling(host);
2619 if (err) {
2620 pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
2621 mmc_hostname(host), __func__, err);
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302622 if (host->card->cmdq_init)
2623 wake_up(&host->cmdq_ctx.wait);
Talel Shenhar999102c2015-09-06 16:38:42 +03002624 return err;
2625 }
Sujit Reddy Thummacb18d852014-12-04 09:57:23 +02002626
Talel Shenhara52f84e2015-05-27 14:20:34 +03002627 mmc_claim_host(host);
2628
2629 if (mmc_card_suspended(host->card))
2630 goto out;
2631
Sahitya Tummala508905dd2015-09-10 11:26:59 +05302632 if (host->card->cmdq_init) {
Konstantin Dorfman41834d62015-06-22 12:11:12 +03002633 BUG_ON(host->cmdq_ctx.active_reqs);
2634
2635 err = mmc_cmdq_halt(host, true);
2636 if (err) {
2637 pr_err("%s: halt: failed: %d\n", __func__, err);
2638 goto out;
2639 }
2640 mmc_host_clk_hold(host);
2641 host->cmdq_ops->disable(host, true);
2642 mmc_host_clk_release(host);
2643 }
2644
Ulf Hansson39b94312013-05-02 14:02:36 +02002645 if (mmc_card_doing_bkops(host->card)) {
2646 err = mmc_stop_bkops(host->card);
2647 if (err)
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302648 goto out_err;
Ulf Hansson39b94312013-05-02 14:02:36 +02002649 }
2650
Ulf Hansson10e5d962013-12-16 16:23:22 +01002651 err = mmc_flush_cache(host->card);
Maya Erez881d9262013-01-28 16:44:22 -05002652 if (err)
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302653 goto out_err;
Maya Erez881d9262013-01-28 16:44:22 -05002654
Maya Erezbe076d42015-09-29 17:34:03 +03002655 if (mmc_can_sleepawake(host)) {
Ritesh Harjani06c3f6a2016-04-06 13:40:01 +05302656 /*
2657 * For caching host->ios to cached_ios we need to
2658 * make sure that clocks are not gated otherwise
2659 * cached_ios->clock will be 0.
2660 */
2661 mmc_host_clk_hold(host);
Maya Erezbe076d42015-09-29 17:34:03 +03002662 memcpy(&host->cached_ios, &host->ios, sizeof(host->cached_ios));
2663 mmc_cache_card_ext_csd(host);
2664 err = mmc_sleepawake(host, true);
Ritesh Harjani06c3f6a2016-04-06 13:40:01 +05302665 mmc_host_clk_release(host);
Maya Erezbe076d42015-09-29 17:34:03 +03002666 } else if (!mmc_host_is_spi(host)) {
Jaehoon Chung85e727e2012-05-31 20:31:47 +09002667 err = mmc_deselect_cards(host);
Maya Erezbe076d42015-09-29 17:34:03 +03002668 }
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002669
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302670 if (err)
2671 goto out_err;
2672 mmc_power_off(host);
2673 mmc_card_set_suspended(host->card);
2674
2675 goto out;
2676
2677out_err:
2678 /*
2679 * In case of err let's put controller back in cmdq mode and unhalt
2680 * the controller.
2681 * We expect cmdq_enable and unhalt won't return any error
2682 * since it is anyway enabling few registers.
2683 */
2684 if (host->card->cmdq_init) {
2685 mmc_host_clk_hold(host);
2686 ret = host->cmdq_ops->enable(host);
2687 if (ret)
2688 pr_err("%s: %s: enabling CMDQ mode failed (%d)\n",
2689 mmc_hostname(host), __func__, ret);
2690 mmc_host_clk_release(host);
2691 mmc_cmdq_halt(host, false);
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002692 }
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302693
Maya Erez881d9262013-01-28 16:44:22 -05002694out:
Subhash Jadavanid667bff2015-11-10 15:13:22 -08002695 /* Kick CMDQ thread to process any requests came in while suspending */
2696 if (host->card->cmdq_init)
2697 wake_up(&host->cmdq_ctx.wait);
2698
Maya Erez881d9262013-01-28 16:44:22 -05002699 mmc_release_host(host);
Ritesh Harjanie2f8af3d2016-12-04 21:37:41 +05302700 if (err)
2701 mmc_resume_clk_scaling(host);
Balaji T Kc3805462011-09-08 22:08:39 +05302702 return err;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002703}
2704
Maya Erezbe076d42015-09-29 17:34:03 +03002705static int mmc_partial_init(struct mmc_host *host)
2706{
2707 int err = 0;
2708 struct mmc_card *card = host->card;
2709
2710 pr_debug("%s: %s: starting partial init\n",
2711 mmc_hostname(host), __func__);
2712
2713 mmc_set_bus_width(host, host->cached_ios.bus_width);
2714 mmc_set_timing(host, host->cached_ios.timing);
2715 mmc_set_clock(host, host->cached_ios.clock);
2716 mmc_set_bus_mode(host, host->cached_ios.bus_mode);
2717
2718 mmc_host_clk_hold(host);
2719
Sahitya Tummalac728dcb2016-04-27 16:17:44 +05302720 if (mmc_card_hs400(card)) {
Maya Erezbe076d42015-09-29 17:34:03 +03002721 if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
2722 err = host->ops->enhanced_strobe(host);
Ritesh Harjani9fba0b42017-02-20 13:50:58 +05302723 else if (host->ops->execute_tuning)
2724 err = host->ops->execute_tuning(host,
2725 MMC_SEND_TUNING_BLOCK_HS200);
Sahitya Tummalac728dcb2016-04-27 16:17:44 +05302726 } else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
2727 err = host->ops->execute_tuning(host,
2728 MMC_SEND_TUNING_BLOCK_HS200);
Maya Erezbe076d42015-09-29 17:34:03 +03002729 if (err)
2730 pr_warn("%s: %s: tuning execution failed (%d)\n",
2731 mmc_hostname(host), __func__, err);
2732 }
2733
2734 /*
2735 * The ext_csd is read to make sure the card did not went through
2736 * Power-failure during sleep period.
2737 * A subset of the W/E_P, W/C_P register will be tested. In case
2738 * these registers values are different from the values that were
2739 * cached during suspend, we will conclude that a Power-failure occurred
2740 * and will do full initialization sequence.
2741 * In addition, full init sequence also transfer ext_csd before moving
2742 * to CMDQ mode which has a side affect of configuring SDHCI registers
2743 * which needed to be done before moving to CMDQ mode. The same
2744 * registers need to be configured for partial init.
2745 */
2746 err = mmc_test_awake_ext_csd(host);
2747 if (err) {
2748 pr_debug("%s: %s: fail on ext_csd read (%d)\n",
2749 mmc_hostname(host), __func__, err);
2750 goto out;
2751 }
2752 pr_debug("%s: %s: reading and comparing ext_csd successful\n",
2753 mmc_hostname(host), __func__);
2754
2755 if (card->ext_csd.cmdq_support && (card->host->caps2 &
2756 MMC_CAP2_CMD_QUEUE)) {
2757 err = mmc_select_cmdq(card);
2758 if (err) {
2759 pr_warn("%s: %s: enabling CMDQ mode failed (%d)\n",
2760 mmc_hostname(card->host),
2761 __func__, err);
2762 }
2763 }
2764out:
2765 mmc_host_clk_release(host);
2766
2767 pr_debug("%s: %s: done partial init (%d)\n",
2768 mmc_hostname(host), __func__, err);
2769
2770 return err;
2771}
2772
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002773/*
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002774 * Suspend callback
Ulf Hansson03d071f2013-06-10 17:03:43 +02002775 */
2776static int mmc_suspend(struct mmc_host *host)
2777{
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002778 int err;
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002779 ktime_t start = ktime_get();
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002780
Sayali Lokhande686b42a2016-12-12 08:48:37 +05302781 MMC_TRACE(host, "%s: Enter\n", __func__);
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002782 err = _mmc_suspend(host, true);
2783 if (!err) {
2784 pm_runtime_disable(&host->card->dev);
2785 pm_runtime_set_suspended(&host->card->dev);
2786 }
2787
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002788 trace_mmc_suspend(mmc_hostname(host), err,
2789 ktime_to_us(ktime_sub(ktime_get(), start)));
Sayali Lokhande686b42a2016-12-12 08:48:37 +05302790 MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002791 return err;
Ulf Hansson03d071f2013-06-10 17:03:43 +02002792}
2793
2794/*
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002795 * This function tries to determine if the same card is still present
2796 * and, if so, restore all state to it.
2797 */
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002798static int _mmc_resume(struct mmc_host *host)
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002799{
Maya Erezbe076d42015-09-29 17:34:03 +03002800 int err = -ENOSYS;
Sahitya Tummalaace06842014-12-09 23:23:25 +02002801 int retries;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002802
2803 BUG_ON(!host);
2804 BUG_ON(!host->card);
2805
2806 mmc_claim_host(host);
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002807
Sujit Reddy Thummacb18d852014-12-04 09:57:23 +02002808 if (!mmc_card_suspended(host->card)) {
2809 mmc_release_host(host);
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002810 goto out;
Sujit Reddy Thummacb18d852014-12-04 09:57:23 +02002811 }
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002812
Ulf Hansson69041152013-09-13 11:31:33 +02002813 mmc_power_up(host, host->card->ocr);
Sahitya Tummalaace06842014-12-09 23:23:25 +02002814 retries = 3;
2815 while (retries) {
Maya Erezbe076d42015-09-29 17:34:03 +03002816 if (mmc_can_sleepawake(host)) {
2817 err = mmc_sleepawake(host, false);
2818 if (!err)
2819 err = mmc_partial_init(host);
2820 if (err)
2821 pr_err("%s: %s: awake failed (%d), fallback to full init\n",
2822 mmc_hostname(host), __func__, err);
2823 }
2824
2825 if (err)
2826 err = mmc_init_card(host, host->card->ocr, host->card);
2827
Sahitya Tummalaace06842014-12-09 23:23:25 +02002828 if (err) {
2829 pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
2830 mmc_hostname(host), err, retries);
2831 retries--;
2832 mmc_power_off(host);
2833 usleep_range(5000, 5500);
2834 mmc_power_up(host, host->card->ocr);
2835 mmc_select_voltage(host, host->card->ocr);
2836 continue;
2837 }
2838 break;
2839 }
Konstantin Dorfman41834d62015-06-22 12:11:12 +03002840 if (!err && mmc_card_cmdq(host->card)) {
2841 err = mmc_cmdq_halt(host, false);
2842 if (err)
2843 pr_err("%s: un-halt: failed: %d\n", __func__, err);
2844 }
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002845 mmc_card_clr_suspended(host->card);
Pierre Ossman2986d0b2007-07-22 17:52:06 +02002846
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002847 mmc_release_host(host);
Sujit Reddy Thummacb18d852014-12-04 09:57:23 +02002848
Talel Shenhar999102c2015-09-06 16:38:42 +03002849 err = mmc_resume_clk_scaling(host);
2850 if (err)
2851 pr_err("%s: %s: fail to resume clock scaling (%d)\n",
2852 mmc_hostname(host), __func__, err);
Sujit Reddy Thummacb18d852014-12-04 09:57:23 +02002853
2854out:
Nicolas Pitre95cdfb72009-09-22 16:45:29 -07002855 return err;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02002856}
2857
Ulf Hansson9ec775f2013-10-02 17:37:09 +02002858/*
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002859 * Callback for resume.
2860 */
2861static int mmc_resume(struct mmc_host *host)
2862{
Sayali Lokhande686b42a2016-12-12 08:48:37 +05302863 int err = 0;
2864
2865 MMC_TRACE(host, "%s: Enter\n", __func__);
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002866 pm_runtime_enable(&host->card->dev);
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002867
Sayali Lokhande686b42a2016-12-12 08:48:37 +05302868 MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
Ulf Hanssonc29536e2015-11-05 16:01:32 +01002869 return 0;
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002870}
2871
Konstantin Dorfman56380b12015-12-31 14:45:48 +02002872#define MAX_DEFER_SUSPEND_COUNTER 20
2873static bool mmc_process_bkops(struct mmc_host *host)
2874{
2875 int err = 0;
2876 bool is_running = false;
2877 u32 status;
2878
2879 mmc_claim_host(host);
2880 if (mmc_card_cmdq(host->card)) {
2881 BUG_ON(host->cmdq_ctx.active_reqs);
2882
2883 err = mmc_cmdq_halt(host, true);
2884 if (err) {
2885 pr_err("%s: halt: failed: %d\n", __func__, err);
2886 goto unhalt;
2887 }
2888 }
2889
2890 if (mmc_card_doing_bkops(host->card)) {
2891 /* check that manual bkops finished */
2892 err = mmc_send_status(host->card, &status);
2893 if (err) {
2894 pr_err("%s: Get card status fail\n", __func__);
2895 goto unhalt;
2896 }
2897 if (R1_CURRENT_STATE(status) != R1_STATE_PRG) {
2898 mmc_card_clr_doing_bkops(host->card);
2899 goto unhalt;
2900 }
2901 } else {
2902 mmc_check_bkops(host->card);
2903 }
2904
2905 if (host->card->bkops.needs_bkops &&
2906 !mmc_card_support_auto_bkops(host->card))
2907 mmc_start_manual_bkops(host->card);
2908
2909unhalt:
2910 if (mmc_card_cmdq(host->card)) {
2911 err = mmc_cmdq_halt(host, false);
2912 if (err)
2913 pr_err("%s: unhalt: failed: %d\n", __func__, err);
2914 }
2915 mmc_release_host(host);
2916
2917 if (host->card->bkops.needs_bkops ||
2918 mmc_card_doing_bkops(host->card)) {
2919 if (host->card->bkops.retry_counter++ <
2920 MAX_DEFER_SUSPEND_COUNTER) {
2921 host->card->bkops.needs_check = true;
2922 is_running = true;
2923 } else {
2924 host->card->bkops.retry_counter = 0;
2925 }
2926 }
2927 return is_running;
2928}
2929
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002930/*
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002931 * Callback for runtime_suspend.
2932 */
2933static int mmc_runtime_suspend(struct mmc_host *host)
2934{
2935 int err;
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002936 ktime_t start = ktime_get();
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002937
2938 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
2939 return 0;
2940
Konstantin Dorfman56380b12015-12-31 14:45:48 +02002941 if (mmc_process_bkops(host)) {
2942 pm_runtime_mark_last_busy(&host->card->dev);
2943 pr_debug("%s: defered, need bkops\n", __func__);
2944 return -EBUSY;
2945 }
2946
Siba Prasad0196fe42017-06-27 15:13:27 +05302947 MMC_TRACE(host, "%s\n", __func__);
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002948 err = _mmc_suspend(host, true);
Ulf Hansson0cc81a82013-10-03 11:24:44 +02002949 if (err)
Masanari Iidaf42cf8d2015-02-24 23:11:26 +09002950 pr_err("%s: error %d doing aggressive suspend\n",
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002951 mmc_hostname(host), err);
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002952
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002953 trace_mmc_runtime_suspend(mmc_hostname(host), err,
2954 ktime_to_us(ktime_sub(ktime_get(), start)));
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002955 return err;
2956}
2957
2958/*
2959 * Callback for runtime_resume.
2960 */
2961static int mmc_runtime_resume(struct mmc_host *host)
2962{
2963 int err;
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002964 ktime_t start = ktime_get();
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002965
Siba Prasad0196fe42017-06-27 15:13:27 +05302966 MMC_TRACE(host, "%s\n", __func__);
Ulf Hansson0cb403a2013-10-10 14:20:05 +02002967 err = _mmc_resume(host);
Adrian Hunter520322d2015-12-14 15:51:27 +02002968 if (err && err != -ENOMEDIUM)
Ulf Hanssonc29536e2015-11-05 16:01:32 +01002969 pr_err("%s: error %d doing runtime resume\n",
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002970 mmc_hostname(host), err);
2971
Konstantin Dorfman892f84e2015-02-12 13:37:56 +02002972 trace_mmc_runtime_resume(mmc_hostname(host), err,
2973 ktime_to_us(ktime_sub(ktime_get(), start)));
2974
Konstantin Dorfman41834d62015-06-22 12:11:12 +03002975 return err;
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02002976}
2977
Johan Rudholmf855a372015-01-12 15:38:05 +01002978int mmc_can_reset(struct mmc_card *card)
2979{
2980 u8 rst_n_function;
2981
2982 rst_n_function = card->ext_csd.rst_n_function;
2983 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2984 return 0;
2985 return 1;
2986}
2987EXPORT_SYMBOL(mmc_can_reset);
2988
2989static int mmc_reset(struct mmc_host *host)
2990{
2991 struct mmc_card *card = host->card;
Sayali Lokhandeb2637032017-04-24 13:40:50 +05302992 int ret;
Johan Rudholmf855a372015-01-12 15:38:05 +01002993
Adrian Hunter437db4c2016-05-04 10:38:21 +03002994 /*
2995 * In the case of recovery, we can't expect flushing the cache to work
2996 * always, but we have a go and ignore errors.
2997 */
2998 mmc_flush_cache(host->card);
2999
Gwendal Grignou4e6c7172016-04-01 16:04:22 -07003000 if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
3001 mmc_can_reset(card)) {
Subhash Jadavanidff31262016-04-19 16:21:16 -07003002 mmc_host_clk_hold(host);
Gwendal Grignou4e6c7172016-04-01 16:04:22 -07003003 /* If the card accept RST_n signal, send it. */
3004 mmc_set_clock(host, host->f_init);
3005 host->ops->hw_reset(host);
3006 /* Set initial state and call mmc_set_ios */
3007 mmc_set_initial_state(host);
Subhash Jadavanidff31262016-04-19 16:21:16 -07003008 mmc_host_clk_release(host);
Gwendal Grignou4e6c7172016-04-01 16:04:22 -07003009 } else {
3010 /* Do a brute force power cycle */
3011 mmc_power_cycle(host, card->ocr);
3012 }
Sayali Lokhandeb2637032017-04-24 13:40:50 +05303013
3014 /* Suspend clk scaling to avoid switching frequencies intermittently */
3015
3016 ret = mmc_suspend_clk_scaling(host);
3017 if (ret) {
3018 pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
3019 mmc_hostname(host), __func__, ret);
3020 return ret;
3021 }
3022
3023 ret = mmc_init_card(host, host->card->ocr, host->card);
3024 if (ret) {
3025 pr_err("%s: %s: mmc_init_card failed (%d)\n",
3026 mmc_hostname(host), __func__, ret);
3027 return ret;
3028 }
3029
3030 ret = mmc_resume_clk_scaling(host);
3031 if (ret)
3032 pr_err("%s: %s: fail to resume clock scaling (%d)\n",
3033 mmc_hostname(host), __func__, ret);
3034
3035 return ret;
Johan Rudholmf855a372015-01-12 15:38:05 +01003036}
3037
Vijay Viswanath7f5e93a2017-05-16 10:34:21 +05303038static int mmc_shutdown(struct mmc_host *host)
3039{
3040 struct mmc_card *card = host->card;
3041
3042 /*
3043 * Exit clock scaling so that it doesn't kick in after
3044 * power off notification is sent
3045 */
3046 if (host->caps2 & MMC_CAP2_CLK_SCALE)
3047 mmc_exit_clk_scaling(card->host);
3048 /* send power off notification */
3049 if (mmc_card_mmc(card))
3050 mmc_send_pon(card);
3051 return 0;
3052}
3053
Adrian Hunter9feae242009-09-22 16:44:32 -07003054static const struct mmc_bus_ops mmc_ops = {
3055 .remove = mmc_remove,
3056 .detect = mmc_detect,
Adrian Hunter9feae242009-09-22 16:44:32 -07003057 .suspend = mmc_suspend,
3058 .resume = mmc_resume,
Ulf Hanssonc4d770d2013-05-02 14:02:39 +02003059 .runtime_suspend = mmc_runtime_suspend,
3060 .runtime_resume = mmc_runtime_resume,
Adrian Hunterd3049502011-11-28 16:22:00 +02003061 .alive = mmc_alive,
Sujit Reddy Thumma7095b8e2014-12-03 09:22:39 +02003062 .change_bus_speed = mmc_change_bus_speed,
Johan Rudholmf855a372015-01-12 15:38:05 +01003063 .reset = mmc_reset,
Vijay Viswanath7f5e93a2017-05-16 10:34:21 +05303064 .shutdown = mmc_shutdown,
Adrian Hunter9feae242009-09-22 16:44:32 -07003065};
3066
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003067/*
3068 * Starting point for MMC card init.
3069 */
Andy Ross807e8e42011-01-03 10:36:56 -08003070int mmc_attach_mmc(struct mmc_host *host)
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003071{
3072 int err;
Ulf Hansson69041152013-09-13 11:31:33 +02003073 u32 ocr, rocr;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003074
3075 BUG_ON(!host);
Pierre Ossmand84075c82007-08-09 13:23:56 +02003076 WARN_ON(!host->claimed);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003077
Stefan Nilsson XK44669032011-09-15 17:50:38 +02003078 /* Set correct bus mode for MMC before attempting attach */
3079 if (!mmc_host_is_spi(host))
3080 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
3081
Andy Ross807e8e42011-01-03 10:36:56 -08003082 err = mmc_send_op_cond(host, 0, &ocr);
3083 if (err)
3084 return err;
3085
Ulf Hansson2501c912013-10-30 01:00:18 +01003086 mmc_attach_bus(host, &mmc_ops);
Takashi Iwai8f230f42010-12-08 10:04:30 +01003087 if (host->ocr_avail_mmc)
3088 host->ocr_avail = host->ocr_avail_mmc;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003089
3090 /*
David Brownellaf517152007-08-08 09:11:32 -07003091 * We need to get OCR a different way for SPI.
3092 */
3093 if (mmc_host_is_spi(host)) {
3094 err = mmc_spi_read_ocr(host, 1, &ocr);
3095 if (err)
3096 goto err;
3097 }
3098
Ulf Hansson69041152013-09-13 11:31:33 +02003099 rocr = mmc_select_voltage(host, ocr);
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003100
3101 /*
3102 * Can we support the voltage of the card?
3103 */
Ulf Hansson69041152013-09-13 11:31:33 +02003104 if (!rocr) {
Pierre Ossman109b5be2007-07-23 00:12:10 +02003105 err = -EINVAL;
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003106 goto err;
Pierre Ossman109b5be2007-07-23 00:12:10 +02003107 }
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003108
3109 /*
3110 * Detect and init the card.
3111 */
Ulf Hansson69041152013-09-13 11:31:33 +02003112 err = mmc_init_card(host, rocr, NULL);
Pierre Ossman17b04292007-07-22 22:18:46 +02003113 if (err)
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003114 goto err;
3115
3116 mmc_release_host(host);
Pierre Ossman4101c162007-05-19 13:39:01 +02003117 err = mmc_add_card(host->card);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003118 if (err)
Pierre Ossman2986d0b2007-07-22 17:52:06 +02003119 goto remove_card;
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003120
Sergei Shtylyov2860d062015-10-14 23:53:03 +03003121 mmc_claim_host(host);
Talel Shenhar999102c2015-09-06 16:38:42 +03003122 err = mmc_init_clk_scaling(host);
3123 if (err) {
3124 mmc_release_host(host);
3125 goto remove_card;
3126 }
Tatyana Brokhman4fbb0bf2014-12-04 22:03:42 +02003127
3128 register_reboot_notifier(&host->card->reboot_notify);
3129
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003130 return 0;
3131
Pierre Ossman2986d0b2007-07-22 17:52:06 +02003132remove_card:
Pierre Ossman6abaa0c2007-05-01 16:00:02 +02003133 mmc_remove_card(host->card);
Pierre Ossman2986d0b2007-07-22 17:52:06 +02003134 mmc_claim_host(host);
Andy Ross807e8e42011-01-03 10:36:56 -08003135 host->card = NULL;
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003136err:
3137 mmc_detach_bus(host);
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003138
Girish K Sa3c76eb2011-10-11 11:44:09 +05303139 pr_err("%s: error %d whilst initialising MMC card\n",
Pierre Ossman109b5be2007-07-23 00:12:10 +02003140 mmc_hostname(host), err);
3141
Pierre Ossmanadf66a02007-07-22 23:08:30 +02003142 return err;
Pierre Ossman7ea239d2006-12-31 00:11:32 +01003143}