blob: d2f76e7ef43cc6aacf82b7478f8a3f0598d31053 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -08005 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080046#define CORE_POWER 0x0
47#define CORE_SW_RST (1 << 7)
48
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070049#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_MCI_DATA_CNT 0x30
51#define CORE_MCI_STATUS 0x34
52#define CORE_MCI_FIFO_CNT 0x44
53
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
60
61#define CORE_GENERICS 0x70
62#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053063
64#define CORE_VERSION_MAJOR_MASK 0xF0000000
65#define CORE_VERSION_MAJOR_SHIFT 28
66
Asutosh Das0ef24812012-12-18 16:14:02 +053067#define CORE_HC_MODE 0x78
68#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070069#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053070
Sahitya Tummala67717bc2013-08-02 09:21:37 +053071#define CORE_MCI_VERSION 0x050
72#define CORE_TESTBUS_CONFIG 0x0CC
73#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080074#define CORE_TESTBUS_SEL2_BIT 4
75#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053076
Asutosh Das0ef24812012-12-18 16:14:02 +053077#define CORE_PWRCTL_STATUS 0xDC
78#define CORE_PWRCTL_MASK 0xE0
79#define CORE_PWRCTL_CLEAR 0xE4
80#define CORE_PWRCTL_CTL 0xE8
81
82#define CORE_PWRCTL_BUS_OFF 0x01
83#define CORE_PWRCTL_BUS_ON (1 << 1)
84#define CORE_PWRCTL_IO_LOW (1 << 2)
85#define CORE_PWRCTL_IO_HIGH (1 << 3)
86
87#define CORE_PWRCTL_BUS_SUCCESS 0x01
88#define CORE_PWRCTL_BUS_FAIL (1 << 1)
89#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
90#define CORE_PWRCTL_IO_FAIL (1 << 3)
91
92#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093#define MAX_PHASES 16
94
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070095#define CORE_DLL_CONFIG 0x100
96#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070097#define CORE_DLL_EN (1 << 16)
98#define CORE_CDR_EN (1 << 17)
99#define CORE_CK_OUT_EN (1 << 18)
100#define CORE_CDR_EXT_EN (1 << 19)
101#define CORE_DLL_PDN (1 << 29)
102#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700103
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700104#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700105#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700106#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700107
108#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700109#define CORE_CLK_PWRSAVE (1 << 1)
110#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
111#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
112#define CORE_HC_MCLK_SEL_MASK (3 << 8)
113#define CORE_HC_AUTO_CMD21_EN (1 << 6)
114#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700115#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700116#define CORE_HC_SELECT_IN_EN (1 << 18)
117#define CORE_HC_SELECT_IN_HS400 (6 << 19)
118#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700119#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700120
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800121#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
122#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
123
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530124#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530125#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
126#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530127#define CORE_ONE_MID_EN (1 << 25)
128
Krishna Konda7feab352013-09-17 23:55:40 -0700129#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530130#define CORE_8_BIT_SUPPORT (1 << 18)
131#define CORE_3_3V_SUPPORT (1 << 24)
132#define CORE_3_0V_SUPPORT (1 << 25)
133#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300134#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700135
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800136#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530137
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700138#define CORE_CSR_CDC_CTLR_CFG0 0x130
139#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
140#define CORE_HW_AUTOCAL_ENA (1 << 17)
141
142#define CORE_CSR_CDC_CTLR_CFG1 0x134
143#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
144#define CORE_TIMER_ENA (1 << 16)
145
146#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
147#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
148#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
149#define CORE_CDC_OFFSET_CFG 0x14C
150#define CORE_CSR_CDC_DELAY_CFG 0x150
151#define CORE_CDC_SLAVE_DDA_CFG 0x160
152#define CORE_CSR_CDC_STATUS0 0x164
153#define CORE_CALIBRATION_DONE (1 << 0)
154
155#define CORE_CDC_ERROR_CODE_MASK 0x7000000
156
157#define CORE_CSR_CDC_GEN_CFG 0x178
158#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
159#define CORE_CDC_SWITCH_RC_EN (1 << 1)
160
161#define CORE_DDR_200_CFG 0x184
162#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530163#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700164#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530165
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700166#define CORE_VENDOR_SPEC3 0x1B0
167#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530168#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700169
170#define CORE_DLL_CONFIG_2 0x1B4
171#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800172#define CORE_FLL_CYCLE_CNT (1 << 18)
173#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700174
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530175#define CORE_DDR_CONFIG 0x1B8
176#define DDR_CONFIG_POR_VAL 0x80040853
177#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
178#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700179#define CORE_DDR_CONFIG_2 0x1BC
180#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700181
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700182/* 512 descriptors */
183#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530184#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530185
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700186#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800187#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700188
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700189#define INVALID_TUNING_PHASE -1
190
Krishna Konda96e6b112013-10-28 15:25:03 -0700191#define NUM_TUNING_PHASES 16
192#define MAX_DRV_TYPES_SUPPORTED_HS200 3
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200193#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700194
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700195static const u32 tuning_block_64[] = {
196 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
197 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
198 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
199 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
200};
201
202static const u32 tuning_block_128[] = {
203 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
204 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
205 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
206 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
207 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
208 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
209 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
210 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
211};
Asutosh Das0ef24812012-12-18 16:14:02 +0530212
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700213static int disable_slots;
214/* root can write, others read */
215module_param(disable_slots, int, S_IRUGO|S_IWUSR);
216
Asutosh Das0ef24812012-12-18 16:14:02 +0530217enum vdd_io_level {
218 /* set vdd_io_data->low_vol_level */
219 VDD_IO_LOW,
220 /* set vdd_io_data->high_vol_level */
221 VDD_IO_HIGH,
222 /*
223 * set whatever there in voltage_level (third argument) of
224 * sdhci_msm_set_vdd_io_vol() function.
225 */
226 VDD_IO_SET_LEVEL,
227};
228
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700229/* MSM platform specific tuning */
230static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
231 u8 poll)
232{
233 int rc = 0;
234 u32 wait_cnt = 50;
235 u8 ck_out_en = 0;
236 struct mmc_host *mmc = host->mmc;
237
238 /* poll for CK_OUT_EN bit. max. poll time = 50us */
239 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
240 CORE_CK_OUT_EN);
241
242 while (ck_out_en != poll) {
243 if (--wait_cnt == 0) {
244 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
245 mmc_hostname(mmc), __func__, poll);
246 rc = -ETIMEDOUT;
247 goto out;
248 }
249 udelay(1);
250
251 ck_out_en = !!(readl_relaxed(host->ioaddr +
252 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
253 }
254out:
255 return rc;
256}
257
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530258/*
259 * Enable CDR to track changes of DAT lines and adjust sampling
260 * point according to voltage/temperature variations
261 */
262static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
263{
264 int rc = 0;
265 u32 config;
266
267 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
268 config |= CORE_CDR_EN;
269 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
270 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
271
272 rc = msm_dll_poll_ck_out_en(host, 0);
273 if (rc)
274 goto err;
275
276 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
277 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
278
279 rc = msm_dll_poll_ck_out_en(host, 1);
280 if (rc)
281 goto err;
282 goto out;
283err:
284 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
285out:
286 return rc;
287}
288
289static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
290 *attr, const char *buf, size_t count)
291{
292 struct sdhci_host *host = dev_get_drvdata(dev);
293 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
294 struct sdhci_msm_host *msm_host = pltfm_host->priv;
295 u32 tmp;
296 unsigned long flags;
297
298 if (!kstrtou32(buf, 0, &tmp)) {
299 spin_lock_irqsave(&host->lock, flags);
300 msm_host->en_auto_cmd21 = !!tmp;
301 spin_unlock_irqrestore(&host->lock, flags);
302 }
303 return count;
304}
305
306static ssize_t show_auto_cmd21(struct device *dev,
307 struct device_attribute *attr, char *buf)
308{
309 struct sdhci_host *host = dev_get_drvdata(dev);
310 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
311 struct sdhci_msm_host *msm_host = pltfm_host->priv;
312
313 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
314}
315
316/* MSM auto-tuning handler */
317static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
318 bool enable,
319 u32 type)
320{
321 int rc = 0;
322 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
323 struct sdhci_msm_host *msm_host = pltfm_host->priv;
324 u32 val = 0;
325
326 if (!msm_host->en_auto_cmd21)
327 return 0;
328
329 if (type == MMC_SEND_TUNING_BLOCK_HS200)
330 val = CORE_HC_AUTO_CMD21_EN;
331 else
332 return 0;
333
334 if (enable) {
335 rc = msm_enable_cdr_cm_sdc4_dll(host);
336 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
337 val, host->ioaddr + CORE_VENDOR_SPEC);
338 } else {
339 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
340 ~val, host->ioaddr + CORE_VENDOR_SPEC);
341 }
342 return rc;
343}
344
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700345static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
346{
347 int rc = 0;
348 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
349 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
350 0x8};
351 unsigned long flags;
352 u32 config;
353 struct mmc_host *mmc = host->mmc;
354
355 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
356 spin_lock_irqsave(&host->lock, flags);
357
358 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
359 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
360 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
361 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
362
363 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
364 rc = msm_dll_poll_ck_out_en(host, 0);
365 if (rc)
366 goto err_out;
367
368 /*
369 * Write the selected DLL clock output phase (0 ... 15)
370 * to CDR_SELEXT bit field of DLL_CONFIG register.
371 */
372 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
373 & ~(0xF << 20))
374 | (grey_coded_phase_table[phase] << 20)),
375 host->ioaddr + CORE_DLL_CONFIG);
376
377 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
378 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
379 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
380
381 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
382 rc = msm_dll_poll_ck_out_en(host, 1);
383 if (rc)
384 goto err_out;
385
386 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
387 config |= CORE_CDR_EN;
388 config &= ~CORE_CDR_EXT_EN;
389 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
390 goto out;
391
392err_out:
393 pr_err("%s: %s: Failed to set DLL phase: %d\n",
394 mmc_hostname(mmc), __func__, phase);
395out:
396 spin_unlock_irqrestore(&host->lock, flags);
397 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
398 return rc;
399}
400
401/*
402 * Find out the greatest range of consecuitive selected
403 * DLL clock output phases that can be used as sampling
404 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700405 * timing mode) or for eMMC4.5 card read operation (in
406 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700407 * Select the 3/4 of the range and configure the DLL with the
408 * selected DLL clock output phase.
409 */
410
411static int msm_find_most_appropriate_phase(struct sdhci_host *host,
412 u8 *phase_table, u8 total_phases)
413{
414 int ret;
415 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
416 u8 phases_per_row[MAX_PHASES] = {0};
417 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
418 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
419 bool phase_0_found = false, phase_15_found = false;
420 struct mmc_host *mmc = host->mmc;
421
422 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
423 if (!total_phases || (total_phases > MAX_PHASES)) {
424 pr_err("%s: %s: invalid argument: total_phases=%d\n",
425 mmc_hostname(mmc), __func__, total_phases);
426 return -EINVAL;
427 }
428
429 for (cnt = 0; cnt < total_phases; cnt++) {
430 ranges[row_index][col_index] = phase_table[cnt];
431 phases_per_row[row_index] += 1;
432 col_index++;
433
434 if ((cnt + 1) == total_phases) {
435 continue;
436 /* check if next phase in phase_table is consecutive or not */
437 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
438 row_index++;
439 col_index = 0;
440 }
441 }
442
443 if (row_index >= MAX_PHASES)
444 return -EINVAL;
445
446 /* Check if phase-0 is present in first valid window? */
447 if (!ranges[0][0]) {
448 phase_0_found = true;
449 phase_0_raw_index = 0;
450 /* Check if cycle exist between 2 valid windows */
451 for (cnt = 1; cnt <= row_index; cnt++) {
452 if (phases_per_row[cnt]) {
453 for (i = 0; i < phases_per_row[cnt]; i++) {
454 if (ranges[cnt][i] == 15) {
455 phase_15_found = true;
456 phase_15_raw_index = cnt;
457 break;
458 }
459 }
460 }
461 }
462 }
463
464 /* If 2 valid windows form cycle then merge them as single window */
465 if (phase_0_found && phase_15_found) {
466 /* number of phases in raw where phase 0 is present */
467 u8 phases_0 = phases_per_row[phase_0_raw_index];
468 /* number of phases in raw where phase 15 is present */
469 u8 phases_15 = phases_per_row[phase_15_raw_index];
470
471 if (phases_0 + phases_15 >= MAX_PHASES)
472 /*
473 * If there are more than 1 phase windows then total
474 * number of phases in both the windows should not be
475 * more than or equal to MAX_PHASES.
476 */
477 return -EINVAL;
478
479 /* Merge 2 cyclic windows */
480 i = phases_15;
481 for (cnt = 0; cnt < phases_0; cnt++) {
482 ranges[phase_15_raw_index][i] =
483 ranges[phase_0_raw_index][cnt];
484 if (++i >= MAX_PHASES)
485 break;
486 }
487
488 phases_per_row[phase_0_raw_index] = 0;
489 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
490 }
491
492 for (cnt = 0; cnt <= row_index; cnt++) {
493 if (phases_per_row[cnt] > curr_max) {
494 curr_max = phases_per_row[cnt];
495 selected_row_index = cnt;
496 }
497 }
498
499 i = ((curr_max * 3) / 4);
500 if (i)
501 i--;
502
503 ret = (int)ranges[selected_row_index][i];
504
505 if (ret >= MAX_PHASES) {
506 ret = -EINVAL;
507 pr_err("%s: %s: invalid phase selected=%d\n",
508 mmc_hostname(mmc), __func__, ret);
509 }
510
511 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
512 return ret;
513}
514
515static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
516{
517 u32 mclk_freq = 0;
518
519 /* Program the MCLK value to MCLK_FREQ bit field */
520 if (host->clock <= 112000000)
521 mclk_freq = 0;
522 else if (host->clock <= 125000000)
523 mclk_freq = 1;
524 else if (host->clock <= 137000000)
525 mclk_freq = 2;
526 else if (host->clock <= 150000000)
527 mclk_freq = 3;
528 else if (host->clock <= 162000000)
529 mclk_freq = 4;
530 else if (host->clock <= 175000000)
531 mclk_freq = 5;
532 else if (host->clock <= 187000000)
533 mclk_freq = 6;
534 else if (host->clock <= 200000000)
535 mclk_freq = 7;
536
537 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
538 & ~(7 << 24)) | (mclk_freq << 24)),
539 host->ioaddr + CORE_DLL_CONFIG);
540}
541
542/* Initialize the DLL (Programmable Delay Line ) */
543static int msm_init_cm_dll(struct sdhci_host *host)
544{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800545 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
546 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700547 struct mmc_host *mmc = host->mmc;
548 int rc = 0;
549 unsigned long flags;
550 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530551 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700552
553 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
554 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530555 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
556 CORE_CLK_PWRSAVE);
557 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700558 /*
559 * Make sure that clock is always enabled when DLL
560 * tuning is in progress. Keeping PWRSAVE ON may
561 * turn off the clock. So let's disable the PWRSAVE
562 * here and re-enable it once tuning is completed.
563 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530564 if (prev_pwrsave) {
565 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
566 & ~CORE_CLK_PWRSAVE),
567 host->ioaddr + CORE_VENDOR_SPEC);
568 curr_pwrsave = false;
569 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700570
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800571 if (msm_host->use_updated_dll_reset) {
572 /* Disable the DLL clock */
573 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
574 & ~CORE_CK_OUT_EN),
575 host->ioaddr + CORE_DLL_CONFIG);
576
577 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
578 | CORE_DLL_CLOCK_DISABLE),
579 host->ioaddr + CORE_DLL_CONFIG_2);
580 }
581
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700582 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
583 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
584 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
585
586 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
587 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
588 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
589 msm_cm_dll_set_freq(host);
590
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800591 if (msm_host->use_updated_dll_reset) {
592 u32 mclk_freq = 0;
593
594 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
595 & CORE_FLL_CYCLE_CNT))
596 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
597 else
598 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
599
600 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
601 & ~(0xFF << 10)) | (mclk_freq << 10)),
602 host->ioaddr + CORE_DLL_CONFIG_2);
603 /* wait for 5us before enabling DLL clock */
604 udelay(5);
605 }
606
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700607 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
608 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
609 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
610
611 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
612 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
613 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
614
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800615 if (msm_host->use_updated_dll_reset) {
616 msm_cm_dll_set_freq(host);
617 /* Enable the DLL clock */
618 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
619 & ~CORE_DLL_CLOCK_DISABLE),
620 host->ioaddr + CORE_DLL_CONFIG_2);
621 }
622
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700623 /* Set DLL_EN bit to 1. */
624 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
625 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
626
627 /* Set CK_OUT_EN bit to 1. */
628 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
629 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
630
631 wait_cnt = 50;
632 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
633 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
634 CORE_DLL_LOCK)) {
635 /* max. wait for 50us sec for LOCK bit to be set */
636 if (--wait_cnt == 0) {
637 pr_err("%s: %s: DLL failed to LOCK\n",
638 mmc_hostname(mmc), __func__);
639 rc = -ETIMEDOUT;
640 goto out;
641 }
642 /* wait for 1us before polling again */
643 udelay(1);
644 }
645
646out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530647 /* Restore the correct PWRSAVE state */
648 if (prev_pwrsave ^ curr_pwrsave) {
649 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
650
651 if (prev_pwrsave)
652 reg |= CORE_CLK_PWRSAVE;
653 else
654 reg &= ~CORE_CLK_PWRSAVE;
655
656 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
657 }
658
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700659 spin_unlock_irqrestore(&host->lock, flags);
660 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
661 return rc;
662}
663
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700664static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
665{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700666 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700667 int ret = 0;
668 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700669
670 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
671
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700672 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
673 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
674 & ~CORE_CDC_T4_DLY_SEL),
675 host->ioaddr + CORE_DDR_200_CFG);
676
677 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
678 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
679 & ~CORE_CDC_SWITCH_BYPASS_OFF),
680 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
681
682 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
683 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
684 | CORE_CDC_SWITCH_RC_EN),
685 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
686
687 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
688 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
689 & ~CORE_START_CDC_TRAFFIC),
690 host->ioaddr + CORE_DDR_200_CFG);
691
692 /*
693 * Perform CDC Register Initialization Sequence
694 *
695 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
696 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
697 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
698 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
699 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
700 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
701 * CORE_CSR_CDC_DELAY_CFG 0x3AC
702 * CORE_CDC_OFFSET_CFG 0x0
703 * CORE_CDC_SLAVE_DDA_CFG 0x16334
704 */
705
706 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
707 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
708 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
709 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
710 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
711 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700712 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700713 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
714 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
715
716 /* CDC HW Calibration */
717
718 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
719 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
720 | CORE_SW_TRIG_FULL_CALIB),
721 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
722
723 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
724 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
725 & ~CORE_SW_TRIG_FULL_CALIB),
726 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
727
728 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
729 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
730 | CORE_HW_AUTOCAL_ENA),
731 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
732
733 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
734 writel_relaxed((readl_relaxed(host->ioaddr +
735 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
736 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
737
738 mb();
739
740 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700741 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
742 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
743
744 if (ret == -ETIMEDOUT) {
745 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700746 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700747 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700748 }
749
750 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
751 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
752 & CORE_CDC_ERROR_CODE_MASK;
753 if (cdc_err) {
754 pr_err("%s: %s: CDC Error Code %d\n",
755 mmc_hostname(host->mmc), __func__, cdc_err);
756 ret = -EINVAL;
757 goto out;
758 }
759
760 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
761 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
762 | CORE_START_CDC_TRAFFIC),
763 host->ioaddr + CORE_DDR_200_CFG);
764out:
765 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
766 __func__, ret);
767 return ret;
768}
769
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700770static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
771{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530772 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
773 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530774 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700775 int ret = 0;
776
777 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
778
779 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530780 * Reprogramming the value in case it might have been modified by
781 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700782 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700783 if (msm_host->rclk_delay_fix) {
784 writel_relaxed(DDR_CONFIG_2_POR_VAL,
785 host->ioaddr + CORE_DDR_CONFIG_2);
786 } else {
787 ddr_config = DDR_CONFIG_POR_VAL &
788 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
789 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
790 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
791 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700792
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530793 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530794 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
795 | CORE_CMDIN_RCLK_EN),
796 host->ioaddr + CORE_DDR_200_CFG);
797
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700798 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
799 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
800 | CORE_DDR_CAL_EN),
801 host->ioaddr + CORE_DLL_CONFIG_2);
802
803 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
804 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
805 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
806
807 if (ret == -ETIMEDOUT) {
808 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
809 mmc_hostname(host->mmc), __func__);
810 goto out;
811 }
812
Ritesh Harjani764065e2015-05-13 14:14:45 +0530813 /*
814 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
815 * when MCLK is gated OFF, it is not gated for less than 0.5us
816 * and MCLK must be switched on for at-least 1us before DATA
817 * starts coming. Controllers with 14lpp tech DLL cannot
818 * guarantee above requirement. So PWRSAVE_DLL should not be
819 * turned on for host controllers using this DLL.
820 */
821 if (!msm_host->use_14lpp_dll)
822 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
823 | CORE_PWRSAVE_DLL),
824 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700825 mb();
826out:
827 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
828 __func__, ret);
829 return ret;
830}
831
Ritesh Harjaniea709662015-05-27 15:40:24 +0530832static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
833{
834 int ret = 0;
835 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
836 struct sdhci_msm_host *msm_host = pltfm_host->priv;
837 struct mmc_host *mmc = host->mmc;
838
839 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
840
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530841 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
842 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530843 mmc_hostname(mmc));
844 return -EINVAL;
845 }
846
847 if (msm_host->calibration_done ||
848 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
849 return 0;
850 }
851
852 /*
853 * Reset the tuning block.
854 */
855 ret = msm_init_cm_dll(host);
856 if (ret)
857 goto out;
858
859 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
860out:
861 if (!ret)
862 msm_host->calibration_done = true;
863 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
864 __func__, ret);
865 return ret;
866}
867
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700868static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
869{
870 int ret = 0;
871 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
872 struct sdhci_msm_host *msm_host = pltfm_host->priv;
873
874 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
875
876 /*
877 * Retuning in HS400 (DDR mode) will fail, just reset the
878 * tuning block and restore the saved tuning phase.
879 */
880 ret = msm_init_cm_dll(host);
881 if (ret)
882 goto out;
883
884 /* Set the selected phase in delay line hw block */
885 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
886 if (ret)
887 goto out;
888
Krishna Konda0e8efba2014-06-23 14:50:38 -0700889 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
890 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
891 | CORE_CMD_DAT_TRACK_SEL),
892 host->ioaddr + CORE_DLL_CONFIG);
893
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700894 if (msm_host->use_cdclp533)
895 /* Calibrate CDCLP533 DLL HW */
896 ret = sdhci_msm_cdclp533_calibration(host);
897 else
898 /* Calibrate CM_DLL_SDC4 HW */
899 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
900out:
901 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
902 __func__, ret);
903 return ret;
904}
905
Krishna Konda96e6b112013-10-28 15:25:03 -0700906static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
907 u8 drv_type)
908{
909 struct mmc_command cmd = {0};
910 struct mmc_request mrq = {NULL};
911 struct mmc_host *mmc = host->mmc;
912 u8 val = ((drv_type << 4) | 2);
913
914 cmd.opcode = MMC_SWITCH;
915 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
916 (EXT_CSD_HS_TIMING << 16) |
917 (val << 8) |
918 EXT_CSD_CMD_SET_NORMAL;
919 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
920 /* 1 sec */
921 cmd.busy_timeout = 1000 * 1000;
922
923 memset(cmd.resp, 0, sizeof(cmd.resp));
924 cmd.retries = 3;
925
926 mrq.cmd = &cmd;
927 cmd.data = NULL;
928
929 mmc_wait_for_req(mmc, &mrq);
930 pr_debug("%s: %s: set card drive type to %d\n",
931 mmc_hostname(mmc), __func__,
932 drv_type);
933}
934
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700935int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
936{
937 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530938 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700939 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700940 const u32 *tuning_block_pattern = tuning_block_64;
941 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
942 int rc;
943 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530944 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700945 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
946 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700947 u8 drv_type = 0;
948 bool drv_type_changed = false;
949 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530950 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530951
952 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700953 * Tuning is required for SDR104, HS200 and HS400 cards and
954 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530955 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700956 if (host->clock <= CORE_FREQ_100MHZ ||
957 !((ios.timing == MMC_TIMING_MMC_HS400) ||
958 (ios.timing == MMC_TIMING_MMC_HS200) ||
959 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530960 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700961
962 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700963
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700964 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700965 if (msm_host->tuning_done && !msm_host->calibration_done &&
966 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700967 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700968 spin_lock_irqsave(&host->lock, flags);
969 if (!rc)
970 msm_host->calibration_done = true;
971 spin_unlock_irqrestore(&host->lock, flags);
972 goto out;
973 }
974
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700975 spin_lock_irqsave(&host->lock, flags);
976
977 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
978 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
979 tuning_block_pattern = tuning_block_128;
980 size = sizeof(tuning_block_128);
981 }
982 spin_unlock_irqrestore(&host->lock, flags);
983
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700984 data_buf = kmalloc(size, GFP_KERNEL);
985 if (!data_buf) {
986 rc = -ENOMEM;
987 goto out;
988 }
989
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530990retry:
Krishna Konda96e6b112013-10-28 15:25:03 -0700991 tuned_phase_cnt = 0;
992
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530993 /* first of all reset the tuning block */
994 rc = msm_init_cm_dll(host);
995 if (rc)
996 goto kfree;
997
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700998 phase = 0;
999 do {
1000 struct mmc_command cmd = {0};
1001 struct mmc_data data = {0};
1002 struct mmc_request mrq = {
1003 .cmd = &cmd,
1004 .data = &data
1005 };
1006 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301007 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001008
1009 /* set the phase in delay line hw block */
1010 rc = msm_config_cm_dll_phase(host, phase);
1011 if (rc)
1012 goto kfree;
1013
1014 cmd.opcode = opcode;
1015 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1016
1017 data.blksz = size;
1018 data.blocks = 1;
1019 data.flags = MMC_DATA_READ;
1020 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1021
1022 data.sg = &sg;
1023 data.sg_len = 1;
1024 sg_init_one(&sg, data_buf, size);
1025 memset(data_buf, 0, size);
1026 mmc_wait_for_req(mmc, &mrq);
1027
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301028 if (card && (cmd.error || data.error)) {
1029 sts_cmd.opcode = MMC_SEND_STATUS;
1030 sts_cmd.arg = card->rca << 16;
1031 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1032 sts_retry = 5;
1033 while (sts_retry) {
1034 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1035
1036 if (sts_cmd.error ||
1037 (R1_CURRENT_STATE(sts_cmd.resp[0])
1038 != R1_STATE_TRAN)) {
1039 sts_retry--;
1040 /*
1041 * wait for at least 146 MCLK cycles for
1042 * the card to move to TRANS state. As
1043 * the MCLK would be min 200MHz for
1044 * tuning, we need max 0.73us delay. To
1045 * be on safer side 1ms delay is given.
1046 */
1047 usleep_range(1000, 1200);
1048 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1049 mmc_hostname(mmc), phase,
1050 sts_cmd.error, sts_cmd.resp[0]);
1051 continue;
1052 }
1053 break;
1054 };
1055 }
1056
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001057 if (!cmd.error && !data.error &&
1058 !memcmp(data_buf, tuning_block_pattern, size)) {
1059 /* tuning is successful at this tuning point */
1060 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001061 pr_debug("%s: %s: found *** good *** phase = %d\n",
1062 mmc_hostname(mmc), __func__, phase);
1063 } else {
1064 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001065 mmc_hostname(mmc), __func__, phase);
1066 }
1067 } while (++phase < 16);
1068
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301069 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1070 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001071 /*
1072 * If all phases pass then its a problem. So change the card's
1073 * drive type to a different value, if supported and repeat
1074 * tuning until at least one phase fails. Then set the original
1075 * drive type back.
1076 *
1077 * If all the phases still pass after trying all possible
1078 * drive types, then one of those 16 phases will be picked.
1079 * This is no different from what was going on before the
1080 * modification to change drive type and retune.
1081 */
1082 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1083 tuned_phase_cnt);
1084
1085 /* set drive type to other value . default setting is 0x0 */
1086 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
1087 if (card->ext_csd.raw_driver_strength &
1088 (1 << drv_type)) {
1089 sdhci_msm_set_mmc_drv_type(host, opcode,
1090 drv_type);
1091 if (!drv_type_changed)
1092 drv_type_changed = true;
1093 goto retry;
1094 }
1095 }
1096 }
1097
1098 /* reset drive type to default (50 ohm) if changed */
1099 if (drv_type_changed)
1100 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1101
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001102 if (tuned_phase_cnt) {
1103 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1104 tuned_phase_cnt);
1105 if (rc < 0)
1106 goto kfree;
1107 else
1108 phase = (u8)rc;
1109
1110 /*
1111 * Finally set the selected phase in delay
1112 * line hw block.
1113 */
1114 rc = msm_config_cm_dll_phase(host, phase);
1115 if (rc)
1116 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001117 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001118 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1119 mmc_hostname(mmc), __func__, phase);
1120 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301121 if (--tuning_seq_cnt)
1122 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001123 /* tuning failed */
1124 pr_err("%s: %s: no tuning point found\n",
1125 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301126 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001127 }
1128
1129kfree:
1130 kfree(data_buf);
1131out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001132 spin_lock_irqsave(&host->lock, flags);
1133 if (!rc)
1134 msm_host->tuning_done = true;
1135 spin_unlock_irqrestore(&host->lock, flags);
1136 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001137 return rc;
1138}
1139
Asutosh Das0ef24812012-12-18 16:14:02 +05301140static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1141{
1142 struct sdhci_msm_gpio_data *curr;
1143 int i, ret = 0;
1144
1145 curr = pdata->pin_data->gpio_data;
1146 for (i = 0; i < curr->size; i++) {
1147 if (!gpio_is_valid(curr->gpio[i].no)) {
1148 ret = -EINVAL;
1149 pr_err("%s: Invalid gpio = %d\n", __func__,
1150 curr->gpio[i].no);
1151 goto free_gpios;
1152 }
1153 if (enable) {
1154 ret = gpio_request(curr->gpio[i].no,
1155 curr->gpio[i].name);
1156 if (ret) {
1157 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1158 __func__, curr->gpio[i].no,
1159 curr->gpio[i].name, ret);
1160 goto free_gpios;
1161 }
1162 curr->gpio[i].is_enabled = true;
1163 } else {
1164 gpio_free(curr->gpio[i].no);
1165 curr->gpio[i].is_enabled = false;
1166 }
1167 }
1168 return ret;
1169
1170free_gpios:
1171 for (i--; i >= 0; i--) {
1172 gpio_free(curr->gpio[i].no);
1173 curr->gpio[i].is_enabled = false;
1174 }
1175 return ret;
1176}
1177
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301178static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1179 bool enable)
1180{
1181 int ret = 0;
1182
1183 if (enable)
1184 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1185 pdata->pctrl_data->pins_active);
1186 else
1187 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1188 pdata->pctrl_data->pins_sleep);
1189
1190 if (ret < 0)
1191 pr_err("%s state for pinctrl failed with %d\n",
1192 enable ? "Enabling" : "Disabling", ret);
1193
1194 return ret;
1195}
1196
Asutosh Das0ef24812012-12-18 16:14:02 +05301197static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1198{
1199 int ret = 0;
1200
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301201 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301202 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301203 } else if (pdata->pctrl_data) {
1204 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1205 goto out;
1206 } else if (!pdata->pin_data) {
1207 return 0;
1208 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301209
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301210 if (pdata->pin_data->is_gpio)
1211 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301212out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301213 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301214 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301215
1216 return ret;
1217}
1218
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301219static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1220 u32 **out, int *len, u32 size)
1221{
1222 int ret = 0;
1223 struct device_node *np = dev->of_node;
1224 size_t sz;
1225 u32 *arr = NULL;
1226
1227 if (!of_get_property(np, prop_name, len)) {
1228 ret = -EINVAL;
1229 goto out;
1230 }
1231 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001232 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301233 dev_err(dev, "%s invalid size\n", prop_name);
1234 ret = -EINVAL;
1235 goto out;
1236 }
1237
1238 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1239 if (!arr) {
1240 dev_err(dev, "%s failed allocating memory\n", prop_name);
1241 ret = -ENOMEM;
1242 goto out;
1243 }
1244
1245 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1246 if (ret < 0) {
1247 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1248 goto out;
1249 }
1250 *out = arr;
1251out:
1252 if (ret)
1253 *len = 0;
1254 return ret;
1255}
1256
Asutosh Das0ef24812012-12-18 16:14:02 +05301257#define MAX_PROP_SIZE 32
1258static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1259 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1260{
1261 int len, ret = 0;
1262 const __be32 *prop;
1263 char prop_name[MAX_PROP_SIZE];
1264 struct sdhci_msm_reg_data *vreg;
1265 struct device_node *np = dev->of_node;
1266
1267 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1268 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301269 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301270 return ret;
1271 }
1272
1273 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1274 if (!vreg) {
1275 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1276 ret = -ENOMEM;
1277 return ret;
1278 }
1279
1280 vreg->name = vreg_name;
1281
1282 snprintf(prop_name, MAX_PROP_SIZE,
1283 "qcom,%s-always-on", vreg_name);
1284 if (of_get_property(np, prop_name, NULL))
1285 vreg->is_always_on = true;
1286
1287 snprintf(prop_name, MAX_PROP_SIZE,
1288 "qcom,%s-lpm-sup", vreg_name);
1289 if (of_get_property(np, prop_name, NULL))
1290 vreg->lpm_sup = true;
1291
1292 snprintf(prop_name, MAX_PROP_SIZE,
1293 "qcom,%s-voltage-level", vreg_name);
1294 prop = of_get_property(np, prop_name, &len);
1295 if (!prop || (len != (2 * sizeof(__be32)))) {
1296 dev_warn(dev, "%s %s property\n",
1297 prop ? "invalid format" : "no", prop_name);
1298 } else {
1299 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1300 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1301 }
1302
1303 snprintf(prop_name, MAX_PROP_SIZE,
1304 "qcom,%s-current-level", vreg_name);
1305 prop = of_get_property(np, prop_name, &len);
1306 if (!prop || (len != (2 * sizeof(__be32)))) {
1307 dev_warn(dev, "%s %s property\n",
1308 prop ? "invalid format" : "no", prop_name);
1309 } else {
1310 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1311 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1312 }
1313
1314 *vreg_data = vreg;
1315 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1316 vreg->name, vreg->is_always_on ? "always_on," : "",
1317 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1318 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1319
1320 return ret;
1321}
1322
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301323static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1324 struct sdhci_msm_pltfm_data *pdata)
1325{
1326 struct sdhci_pinctrl_data *pctrl_data;
1327 struct pinctrl *pctrl;
1328 int ret = 0;
1329
1330 /* Try to obtain pinctrl handle */
1331 pctrl = devm_pinctrl_get(dev);
1332 if (IS_ERR(pctrl)) {
1333 ret = PTR_ERR(pctrl);
1334 goto out;
1335 }
1336 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1337 if (!pctrl_data) {
1338 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1339 ret = -ENOMEM;
1340 goto out;
1341 }
1342 pctrl_data->pctrl = pctrl;
1343 /* Look-up and keep the states handy to be used later */
1344 pctrl_data->pins_active = pinctrl_lookup_state(
1345 pctrl_data->pctrl, "active");
1346 if (IS_ERR(pctrl_data->pins_active)) {
1347 ret = PTR_ERR(pctrl_data->pins_active);
1348 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1349 goto out;
1350 }
1351 pctrl_data->pins_sleep = pinctrl_lookup_state(
1352 pctrl_data->pctrl, "sleep");
1353 if (IS_ERR(pctrl_data->pins_sleep)) {
1354 ret = PTR_ERR(pctrl_data->pins_sleep);
1355 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1356 goto out;
1357 }
1358 pdata->pctrl_data = pctrl_data;
1359out:
1360 return ret;
1361}
1362
Asutosh Das0ef24812012-12-18 16:14:02 +05301363#define GPIO_NAME_MAX_LEN 32
1364static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1365 struct sdhci_msm_pltfm_data *pdata)
1366{
1367 int ret = 0, cnt, i;
1368 struct sdhci_msm_pin_data *pin_data;
1369 struct device_node *np = dev->of_node;
1370
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301371 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1372 if (!ret) {
1373 goto out;
1374 } else if (ret == -EPROBE_DEFER) {
1375 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1376 goto out;
1377 } else {
1378 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1379 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301380 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301381 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301382 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1383 if (!pin_data) {
1384 dev_err(dev, "No memory for pin_data\n");
1385 ret = -ENOMEM;
1386 goto out;
1387 }
1388
1389 cnt = of_gpio_count(np);
1390 if (cnt > 0) {
1391 pin_data->gpio_data = devm_kzalloc(dev,
1392 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1393 if (!pin_data->gpio_data) {
1394 dev_err(dev, "No memory for gpio_data\n");
1395 ret = -ENOMEM;
1396 goto out;
1397 }
1398 pin_data->gpio_data->size = cnt;
1399 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1400 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1401
1402 if (!pin_data->gpio_data->gpio) {
1403 dev_err(dev, "No memory for gpio\n");
1404 ret = -ENOMEM;
1405 goto out;
1406 }
1407
1408 for (i = 0; i < cnt; i++) {
1409 const char *name = NULL;
1410 char result[GPIO_NAME_MAX_LEN];
1411 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1412 of_property_read_string_index(np,
1413 "qcom,gpio-names", i, &name);
1414
1415 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1416 dev_name(dev), name ? name : "?");
1417 pin_data->gpio_data->gpio[i].name = result;
1418 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1419 pin_data->gpio_data->gpio[i].name,
1420 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301421 }
1422 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301423 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301424out:
1425 if (ret)
1426 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1427 return ret;
1428}
1429
Gilad Bronerc788a672015-09-08 15:39:11 +03001430static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1431 struct sdhci_msm_pltfm_data *pdata)
1432{
1433 struct device_node *np = dev->of_node;
1434 const char *str;
1435 u32 cpu;
1436 int ret = 0;
1437 int i;
1438
1439 pdata->pm_qos_data.irq_valid = false;
1440 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1441 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1442 !strcmp(str, "affine_irq")) {
1443 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1444 }
1445
1446 /* must specify cpu for "affine_cores" type */
1447 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1448 pdata->pm_qos_data.irq_cpu = -1;
1449 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1450 if (ret) {
1451 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1452 ret);
1453 goto out;
1454 }
1455 if (cpu < 0 || cpu >= num_possible_cpus()) {
1456 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1457 __func__, cpu, num_possible_cpus());
1458 ret = -EINVAL;
1459 goto out;
1460 }
1461 pdata->pm_qos_data.irq_cpu = cpu;
1462 }
1463
1464 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1465 SDHCI_POWER_POLICY_NUM) {
1466 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1467 __func__, SDHCI_POWER_POLICY_NUM);
1468 ret = -EINVAL;
1469 goto out;
1470 }
1471
1472 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1473 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1474 &pdata->pm_qos_data.irq_latency.latency[i]);
1475
1476 pdata->pm_qos_data.irq_valid = true;
1477out:
1478 return ret;
1479}
1480
1481static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1482 struct sdhci_msm_pltfm_data *pdata)
1483{
1484 struct device_node *np = dev->of_node;
1485 u32 mask;
1486 int nr_groups;
1487 int ret;
1488 int i;
1489
1490 /* Read cpu group mapping */
1491 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1492 if (nr_groups <= 0) {
1493 ret = -EINVAL;
1494 goto out;
1495 }
1496 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1497 pdata->pm_qos_data.cpu_group_map.mask =
1498 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1499 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1500 ret = -ENOMEM;
1501 goto out;
1502 }
1503
1504 for (i = 0; i < nr_groups; i++) {
1505 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1506 i, &mask);
1507
1508 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1509 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1510 cpu_possible_mask)) {
1511 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1512 __func__, mask, i);
1513 ret = -EINVAL;
1514 goto free_res;
1515 }
1516 }
1517 return 0;
1518
1519free_res:
1520 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1521out:
1522 return ret;
1523}
1524
1525static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1526 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1527{
1528 struct device_node *np = dev->of_node;
1529 struct sdhci_msm_pm_qos_latency *values;
1530 int ret;
1531 int i;
1532 int group;
1533 int cfg;
1534
1535 ret = of_property_count_u32_elems(np, name);
1536 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1537 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1538 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1539 ret);
1540 return -EINVAL;
1541 } else if (ret < 0) {
1542 return ret;
1543 }
1544
1545 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1546 GFP_KERNEL);
1547 if (!values)
1548 return -ENOMEM;
1549
1550 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1551 group = i / SDHCI_POWER_POLICY_NUM;
1552 cfg = i % SDHCI_POWER_POLICY_NUM;
1553 of_property_read_u32_index(np, name, i,
1554 &(values[group].latency[cfg]));
1555 }
1556
1557 *latency = values;
1558 return 0;
1559}
1560
1561static void sdhci_msm_pm_qos_parse(struct device *dev,
1562 struct sdhci_msm_pltfm_data *pdata)
1563{
1564 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1565 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1566 __func__);
1567
1568 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1569 pdata->pm_qos_data.cmdq_valid =
1570 !sdhci_msm_pm_qos_parse_latency(dev,
1571 "qcom,pm-qos-cmdq-latency-us",
1572 pdata->pm_qos_data.cpu_group_map.nr_groups,
1573 &pdata->pm_qos_data.cmdq_latency);
1574 pdata->pm_qos_data.legacy_valid =
1575 !sdhci_msm_pm_qos_parse_latency(dev,
1576 "qcom,pm-qos-legacy-latency-us",
1577 pdata->pm_qos_data.cpu_group_map.nr_groups,
1578 &pdata->pm_qos_data.latency);
1579 if (!pdata->pm_qos_data.cmdq_valid &&
1580 !pdata->pm_qos_data.legacy_valid) {
1581 /* clean-up previously allocated arrays */
1582 kfree(pdata->pm_qos_data.latency);
1583 kfree(pdata->pm_qos_data.cmdq_latency);
1584 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1585 __func__);
1586 }
1587 } else {
1588 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1589 __func__);
1590 }
1591}
1592
Asutosh Das0ef24812012-12-18 16:14:02 +05301593/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001594static
1595struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1596 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301597{
1598 struct sdhci_msm_pltfm_data *pdata = NULL;
1599 struct device_node *np = dev->of_node;
1600 u32 bus_width = 0;
1601 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301602 int clk_table_len;
1603 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301604 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301605
1606 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1607 if (!pdata) {
1608 dev_err(dev, "failed to allocate memory for platform data\n");
1609 goto out;
1610 }
1611
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301612 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1613 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1614 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301615
Asutosh Das0ef24812012-12-18 16:14:02 +05301616 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1617 if (bus_width == 8)
1618 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1619 else if (bus_width == 4)
1620 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1621 else {
1622 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1623 pdata->mmc_bus_width = 0;
1624 }
1625
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001626 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1627 &msm_host->mmc->clk_scaling.freq_table,
1628 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1629 pr_debug("%s: no clock scaling frequencies were supplied\n",
1630 dev_name(dev));
1631 else if (!msm_host->mmc->clk_scaling.freq_table ||
1632 !msm_host->mmc->clk_scaling.freq_table_sz)
1633 dev_err(dev, "bad dts clock scaling frequencies\n");
1634
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301635 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1636 &clk_table, &clk_table_len, 0)) {
1637 dev_err(dev, "failed parsing supported clock rates\n");
1638 goto out;
1639 }
1640 if (!clk_table || !clk_table_len) {
1641 dev_err(dev, "Invalid clock table\n");
1642 goto out;
1643 }
1644 pdata->sup_clk_table = clk_table;
1645 pdata->sup_clk_cnt = clk_table_len;
1646
Asutosh Das0ef24812012-12-18 16:14:02 +05301647 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1648 sdhci_msm_slot_reg_data),
1649 GFP_KERNEL);
1650 if (!pdata->vreg_data) {
1651 dev_err(dev, "failed to allocate memory for vreg data\n");
1652 goto out;
1653 }
1654
1655 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1656 "vdd")) {
1657 dev_err(dev, "failed parsing vdd data\n");
1658 goto out;
1659 }
1660 if (sdhci_msm_dt_parse_vreg_info(dev,
1661 &pdata->vreg_data->vdd_io_data,
1662 "vdd-io")) {
1663 dev_err(dev, "failed parsing vdd-io data\n");
1664 goto out;
1665 }
1666
1667 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1668 dev_err(dev, "failed parsing gpio data\n");
1669 goto out;
1670 }
1671
Asutosh Das0ef24812012-12-18 16:14:02 +05301672 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1673
1674 for (i = 0; i < len; i++) {
1675 const char *name = NULL;
1676
1677 of_property_read_string_index(np,
1678 "qcom,bus-speed-mode", i, &name);
1679 if (!name)
1680 continue;
1681
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001682 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1683 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1684 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1685 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1686 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301687 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1688 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1689 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1690 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1691 pdata->caps |= MMC_CAP_1_8V_DDR
1692 | MMC_CAP_UHS_DDR50;
1693 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1694 pdata->caps |= MMC_CAP_1_2V_DDR
1695 | MMC_CAP_UHS_DDR50;
1696 }
1697
1698 if (of_get_property(np, "qcom,nonremovable", NULL))
1699 pdata->nonremovable = true;
1700
Guoping Yuf7c91332014-08-20 16:56:18 +08001701 if (of_get_property(np, "qcom,nonhotplug", NULL))
1702 pdata->nonhotplug = true;
1703
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001704 pdata->largeaddressbus =
1705 of_property_read_bool(np, "qcom,large-address-bus");
1706
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001707 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1708 msm_host->mmc->wakeup_on_idle = true;
1709
Gilad Bronerc788a672015-09-08 15:39:11 +03001710 sdhci_msm_pm_qos_parse(dev, pdata);
1711
Asutosh Das0ef24812012-12-18 16:14:02 +05301712 return pdata;
1713out:
1714 return NULL;
1715}
1716
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301717/* Returns required bandwidth in Bytes per Sec */
1718static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1719 struct mmc_ios *ios)
1720{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301721 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1722 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1723
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301724 unsigned int bw;
1725
Sahitya Tummala2886c922013-04-03 18:03:31 +05301726 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301727 /*
1728 * For DDR mode, SDCC controller clock will be at
1729 * the double rate than the actual clock that goes to card.
1730 */
1731 if (ios->bus_width == MMC_BUS_WIDTH_4)
1732 bw /= 2;
1733 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1734 bw /= 8;
1735
1736 return bw;
1737}
1738
1739static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1740 unsigned int bw)
1741{
1742 unsigned int *table = host->pdata->voting_data->bw_vecs;
1743 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1744 int i;
1745
1746 if (host->msm_bus_vote.is_max_bw_needed && bw)
1747 return host->msm_bus_vote.max_bw_vote;
1748
1749 for (i = 0; i < size; i++) {
1750 if (bw <= table[i])
1751 break;
1752 }
1753
1754 if (i && (i == size))
1755 i--;
1756
1757 return i;
1758}
1759
1760/*
1761 * This function must be called with host lock acquired.
1762 * Caller of this function should also ensure that msm bus client
1763 * handle is not null.
1764 */
1765static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1766 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301767 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301768{
1769 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1770 int rc = 0;
1771
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301772 BUG_ON(!flags);
1773
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301774 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301775 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301776 rc = msm_bus_scale_client_update_request(
1777 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301778 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301779 if (rc) {
1780 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1781 mmc_hostname(host->mmc),
1782 msm_host->msm_bus_vote.client_handle, vote, rc);
1783 goto out;
1784 }
1785 msm_host->msm_bus_vote.curr_vote = vote;
1786 }
1787out:
1788 return rc;
1789}
1790
1791/*
1792 * Internal work. Work to set 0 bandwidth for msm bus.
1793 */
1794static void sdhci_msm_bus_work(struct work_struct *work)
1795{
1796 struct sdhci_msm_host *msm_host;
1797 struct sdhci_host *host;
1798 unsigned long flags;
1799
1800 msm_host = container_of(work, struct sdhci_msm_host,
1801 msm_bus_vote.vote_work.work);
1802 host = platform_get_drvdata(msm_host->pdev);
1803
1804 if (!msm_host->msm_bus_vote.client_handle)
1805 return;
1806
1807 spin_lock_irqsave(&host->lock, flags);
1808 /* don't vote for 0 bandwidth if any request is in progress */
1809 if (!host->mrq) {
1810 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301811 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301812 } else
1813 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1814 mmc_hostname(host->mmc), __func__);
1815 spin_unlock_irqrestore(&host->lock, flags);
1816}
1817
1818/*
1819 * This function cancels any scheduled delayed work and sets the bus
1820 * vote based on bw (bandwidth) argument.
1821 */
1822static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1823 unsigned int bw)
1824{
1825 int vote;
1826 unsigned long flags;
1827 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1828 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1829
1830 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1831 spin_lock_irqsave(&host->lock, flags);
1832 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301833 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301834 spin_unlock_irqrestore(&host->lock, flags);
1835}
1836
1837#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1838
1839/* This function queues a work which will set the bandwidth requiement to 0 */
1840static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1841{
1842 unsigned long flags;
1843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1844 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1845
1846 spin_lock_irqsave(&host->lock, flags);
1847 if (msm_host->msm_bus_vote.min_bw_vote !=
1848 msm_host->msm_bus_vote.curr_vote)
1849 queue_delayed_work(system_wq,
1850 &msm_host->msm_bus_vote.vote_work,
1851 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1852 spin_unlock_irqrestore(&host->lock, flags);
1853}
1854
1855static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1856 struct platform_device *pdev)
1857{
1858 int rc = 0;
1859 struct msm_bus_scale_pdata *bus_pdata;
1860
1861 struct sdhci_msm_bus_voting_data *data;
1862 struct device *dev = &pdev->dev;
1863
1864 data = devm_kzalloc(dev,
1865 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1866 if (!data) {
1867 dev_err(&pdev->dev,
1868 "%s: failed to allocate memory\n", __func__);
1869 rc = -ENOMEM;
1870 goto out;
1871 }
1872 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1873 if (data->bus_pdata) {
1874 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1875 &data->bw_vecs, &data->bw_vecs_size, 0);
1876 if (rc) {
1877 dev_err(&pdev->dev,
1878 "%s: Failed to get bus-bw-vectors-bps\n",
1879 __func__);
1880 goto out;
1881 }
1882 host->pdata->voting_data = data;
1883 }
1884 if (host->pdata->voting_data &&
1885 host->pdata->voting_data->bus_pdata &&
1886 host->pdata->voting_data->bw_vecs &&
1887 host->pdata->voting_data->bw_vecs_size) {
1888
1889 bus_pdata = host->pdata->voting_data->bus_pdata;
1890 host->msm_bus_vote.client_handle =
1891 msm_bus_scale_register_client(bus_pdata);
1892 if (!host->msm_bus_vote.client_handle) {
1893 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1894 rc = -EFAULT;
1895 goto out;
1896 }
1897 /* cache the vote index for minimum and maximum bandwidth */
1898 host->msm_bus_vote.min_bw_vote =
1899 sdhci_msm_bus_get_vote_for_bw(host, 0);
1900 host->msm_bus_vote.max_bw_vote =
1901 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1902 } else {
1903 devm_kfree(dev, data);
1904 }
1905
1906out:
1907 return rc;
1908}
1909
1910static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1911{
1912 if (host->msm_bus_vote.client_handle)
1913 msm_bus_scale_unregister_client(
1914 host->msm_bus_vote.client_handle);
1915}
1916
1917static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1918{
1919 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1920 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1921 struct mmc_ios *ios = &host->mmc->ios;
1922 unsigned int bw;
1923
1924 if (!msm_host->msm_bus_vote.client_handle)
1925 return;
1926
1927 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301928 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301929 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301930 } else {
1931 /*
1932 * If clock gating is enabled, then remove the vote
1933 * immediately because clocks will be disabled only
1934 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1935 * additional delay is required to remove the bus vote.
1936 */
1937#ifdef CONFIG_MMC_CLKGATE
1938 if (host->mmc->clkgate_delay)
1939 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1940 else
1941#endif
1942 sdhci_msm_bus_queue_work(host);
1943 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301944}
1945
Asutosh Das0ef24812012-12-18 16:14:02 +05301946/* Regulator utility functions */
1947static int sdhci_msm_vreg_init_reg(struct device *dev,
1948 struct sdhci_msm_reg_data *vreg)
1949{
1950 int ret = 0;
1951
1952 /* check if regulator is already initialized? */
1953 if (vreg->reg)
1954 goto out;
1955
1956 /* Get the regulator handle */
1957 vreg->reg = devm_regulator_get(dev, vreg->name);
1958 if (IS_ERR(vreg->reg)) {
1959 ret = PTR_ERR(vreg->reg);
1960 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1961 __func__, vreg->name, ret);
1962 goto out;
1963 }
1964
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301965 if (regulator_count_voltages(vreg->reg) > 0) {
1966 vreg->set_voltage_sup = true;
1967 /* sanity check */
1968 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1969 pr_err("%s: %s invalid constraints specified\n",
1970 __func__, vreg->name);
1971 ret = -EINVAL;
1972 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301973 }
1974
1975out:
1976 return ret;
1977}
1978
1979static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
1980{
1981 if (vreg->reg)
1982 devm_regulator_put(vreg->reg);
1983}
1984
1985static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
1986 *vreg, int uA_load)
1987{
1988 int ret = 0;
1989
1990 /*
1991 * regulators that do not support regulator_set_voltage also
1992 * do not support regulator_set_optimum_mode
1993 */
1994 if (vreg->set_voltage_sup) {
1995 ret = regulator_set_load(vreg->reg, uA_load);
1996 if (ret < 0)
1997 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
1998 __func__, vreg->name, uA_load, ret);
1999 else
2000 /*
2001 * regulator_set_load() can return non zero
2002 * value even for success case.
2003 */
2004 ret = 0;
2005 }
2006 return ret;
2007}
2008
2009static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2010 int min_uV, int max_uV)
2011{
2012 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302013 if (vreg->set_voltage_sup) {
2014 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2015 if (ret) {
2016 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302017 __func__, vreg->name, min_uV, max_uV, ret);
2018 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302019 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302020
2021 return ret;
2022}
2023
2024static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2025{
2026 int ret = 0;
2027
2028 /* Put regulator in HPM (high power mode) */
2029 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2030 if (ret < 0)
2031 return ret;
2032
2033 if (!vreg->is_enabled) {
2034 /* Set voltage level */
2035 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2036 vreg->high_vol_level);
2037 if (ret)
2038 return ret;
2039 }
2040 ret = regulator_enable(vreg->reg);
2041 if (ret) {
2042 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2043 __func__, vreg->name, ret);
2044 return ret;
2045 }
2046 vreg->is_enabled = true;
2047 return ret;
2048}
2049
2050static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2051{
2052 int ret = 0;
2053
2054 /* Never disable regulator marked as always_on */
2055 if (vreg->is_enabled && !vreg->is_always_on) {
2056 ret = regulator_disable(vreg->reg);
2057 if (ret) {
2058 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2059 __func__, vreg->name, ret);
2060 goto out;
2061 }
2062 vreg->is_enabled = false;
2063
2064 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2065 if (ret < 0)
2066 goto out;
2067
2068 /* Set min. voltage level to 0 */
2069 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2070 if (ret)
2071 goto out;
2072 } else if (vreg->is_enabled && vreg->is_always_on) {
2073 if (vreg->lpm_sup) {
2074 /* Put always_on regulator in LPM (low power mode) */
2075 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2076 vreg->lpm_uA);
2077 if (ret < 0)
2078 goto out;
2079 }
2080 }
2081out:
2082 return ret;
2083}
2084
2085static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2086 bool enable, bool is_init)
2087{
2088 int ret = 0, i;
2089 struct sdhci_msm_slot_reg_data *curr_slot;
2090 struct sdhci_msm_reg_data *vreg_table[2];
2091
2092 curr_slot = pdata->vreg_data;
2093 if (!curr_slot) {
2094 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2095 __func__);
2096 goto out;
2097 }
2098
2099 vreg_table[0] = curr_slot->vdd_data;
2100 vreg_table[1] = curr_slot->vdd_io_data;
2101
2102 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2103 if (vreg_table[i]) {
2104 if (enable)
2105 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2106 else
2107 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2108 if (ret)
2109 goto out;
2110 }
2111 }
2112out:
2113 return ret;
2114}
2115
2116/*
2117 * Reset vreg by ensuring it is off during probe. A call
2118 * to enable vreg is needed to balance disable vreg
2119 */
2120static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2121{
2122 int ret;
2123
2124 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2125 if (ret)
2126 return ret;
2127 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2128 return ret;
2129}
2130
2131/* This init function should be called only once for each SDHC slot */
2132static int sdhci_msm_vreg_init(struct device *dev,
2133 struct sdhci_msm_pltfm_data *pdata,
2134 bool is_init)
2135{
2136 int ret = 0;
2137 struct sdhci_msm_slot_reg_data *curr_slot;
2138 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2139
2140 curr_slot = pdata->vreg_data;
2141 if (!curr_slot)
2142 goto out;
2143
2144 curr_vdd_reg = curr_slot->vdd_data;
2145 curr_vdd_io_reg = curr_slot->vdd_io_data;
2146
2147 if (!is_init)
2148 /* Deregister all regulators from regulator framework */
2149 goto vdd_io_reg_deinit;
2150
2151 /*
2152 * Get the regulator handle from voltage regulator framework
2153 * and then try to set the voltage level for the regulator
2154 */
2155 if (curr_vdd_reg) {
2156 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2157 if (ret)
2158 goto out;
2159 }
2160 if (curr_vdd_io_reg) {
2161 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2162 if (ret)
2163 goto vdd_reg_deinit;
2164 }
2165 ret = sdhci_msm_vreg_reset(pdata);
2166 if (ret)
2167 dev_err(dev, "vreg reset failed (%d)\n", ret);
2168 goto out;
2169
2170vdd_io_reg_deinit:
2171 if (curr_vdd_io_reg)
2172 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2173vdd_reg_deinit:
2174 if (curr_vdd_reg)
2175 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2176out:
2177 return ret;
2178}
2179
2180
2181static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2182 enum vdd_io_level level,
2183 unsigned int voltage_level)
2184{
2185 int ret = 0;
2186 int set_level;
2187 struct sdhci_msm_reg_data *vdd_io_reg;
2188
2189 if (!pdata->vreg_data)
2190 return ret;
2191
2192 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2193 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2194 switch (level) {
2195 case VDD_IO_LOW:
2196 set_level = vdd_io_reg->low_vol_level;
2197 break;
2198 case VDD_IO_HIGH:
2199 set_level = vdd_io_reg->high_vol_level;
2200 break;
2201 case VDD_IO_SET_LEVEL:
2202 set_level = voltage_level;
2203 break;
2204 default:
2205 pr_err("%s: invalid argument level = %d",
2206 __func__, level);
2207 ret = -EINVAL;
2208 return ret;
2209 }
2210 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2211 set_level);
2212 }
2213 return ret;
2214}
2215
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302216void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2217{
2218 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2219 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2220
2221 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2222 mmc_hostname(host->mmc),
2223 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2224 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2225 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2226}
2227
Asutosh Das0ef24812012-12-18 16:14:02 +05302228static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2229{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002230 struct sdhci_host *host = (struct sdhci_host *)data;
2231 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2232 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302233 u8 irq_status = 0;
2234 u8 irq_ack = 0;
2235 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302236 int pwr_state = 0, io_level = 0;
2237 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302238 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302239
2240 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2241 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2242 mmc_hostname(msm_host->mmc), irq, irq_status);
2243
2244 /* Clear the interrupt */
2245 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2246 /*
2247 * SDHC has core_mem and hc_mem device memory and these memory
2248 * addresses do not fall within 1KB region. Hence, any update to
2249 * core_mem address space would require an mb() to ensure this gets
2250 * completed before its next update to registers within hc_mem.
2251 */
2252 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302253 /*
2254 * There is a rare HW scenario where the first clear pulse could be
2255 * lost when actual reset and clear/read of status register is
2256 * happening at a time. Hence, retry for at least 10 times to make
2257 * sure status register is cleared. Otherwise, this will result in
2258 * a spurious power IRQ resulting in system instability.
2259 */
2260 while (irq_status &
2261 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2262 if (retry == 0) {
2263 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2264 mmc_hostname(host->mmc), irq_status);
2265 sdhci_msm_dump_pwr_ctrl_regs(host);
2266 BUG_ON(1);
2267 }
2268 writeb_relaxed(irq_status,
2269 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2270 retry--;
2271 udelay(10);
2272 }
2273 if (likely(retry < 10))
2274 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2275 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302276
2277 /* Handle BUS ON/OFF*/
2278 if (irq_status & CORE_PWRCTL_BUS_ON) {
2279 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302280 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302281 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302282 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2283 VDD_IO_HIGH, 0);
2284 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302285 if (ret)
2286 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2287 else
2288 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302289
2290 pwr_state = REQ_BUS_ON;
2291 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302292 }
2293 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2294 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302295 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302296 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302297 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2298 VDD_IO_LOW, 0);
2299 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302300 if (ret)
2301 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2302 else
2303 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302304
2305 pwr_state = REQ_BUS_OFF;
2306 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302307 }
2308 /* Handle IO LOW/HIGH */
2309 if (irq_status & CORE_PWRCTL_IO_LOW) {
2310 /* Switch voltage Low */
2311 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2312 if (ret)
2313 irq_ack |= CORE_PWRCTL_IO_FAIL;
2314 else
2315 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302316
2317 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302318 }
2319 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2320 /* Switch voltage High */
2321 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2322 if (ret)
2323 irq_ack |= CORE_PWRCTL_IO_FAIL;
2324 else
2325 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302326
2327 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302328 }
2329
2330 /* ACK status to the core */
2331 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2332 /*
2333 * SDHC has core_mem and hc_mem device memory and these memory
2334 * addresses do not fall within 1KB region. Hence, any update to
2335 * core_mem address space would require an mb() to ensure this gets
2336 * completed before its next update to registers within hc_mem.
2337 */
2338 mb();
2339
Krishna Konda46fd1432014-10-30 21:13:27 -07002340 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002341 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2342 ~CORE_IO_PAD_PWR_SWITCH),
2343 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002344 else if ((io_level & REQ_IO_LOW) ||
2345 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002346 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2347 CORE_IO_PAD_PWR_SWITCH),
2348 host->ioaddr + CORE_VENDOR_SPEC);
2349 mb();
2350
Asutosh Das0ef24812012-12-18 16:14:02 +05302351 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2352 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302353 spin_lock_irqsave(&host->lock, flags);
2354 if (pwr_state)
2355 msm_host->curr_pwr_state = pwr_state;
2356 if (io_level)
2357 msm_host->curr_io_level = io_level;
2358 complete(&msm_host->pwr_irq_completion);
2359 spin_unlock_irqrestore(&host->lock, flags);
2360
Asutosh Das0ef24812012-12-18 16:14:02 +05302361 return IRQ_HANDLED;
2362}
2363
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302364static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302365show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2366{
2367 struct sdhci_host *host = dev_get_drvdata(dev);
2368 int poll;
2369 unsigned long flags;
2370
2371 spin_lock_irqsave(&host->lock, flags);
2372 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2373 spin_unlock_irqrestore(&host->lock, flags);
2374
2375 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2376}
2377
2378static ssize_t
2379store_polling(struct device *dev, struct device_attribute *attr,
2380 const char *buf, size_t count)
2381{
2382 struct sdhci_host *host = dev_get_drvdata(dev);
2383 int value;
2384 unsigned long flags;
2385
2386 if (!kstrtou32(buf, 0, &value)) {
2387 spin_lock_irqsave(&host->lock, flags);
2388 if (value) {
2389 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2390 mmc_detect_change(host->mmc, 0);
2391 } else {
2392 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2393 }
2394 spin_unlock_irqrestore(&host->lock, flags);
2395 }
2396 return count;
2397}
2398
2399static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302400show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2401 char *buf)
2402{
2403 struct sdhci_host *host = dev_get_drvdata(dev);
2404 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2405 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2406
2407 return snprintf(buf, PAGE_SIZE, "%u\n",
2408 msm_host->msm_bus_vote.is_max_bw_needed);
2409}
2410
2411static ssize_t
2412store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2413 const char *buf, size_t count)
2414{
2415 struct sdhci_host *host = dev_get_drvdata(dev);
2416 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2417 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2418 uint32_t value;
2419 unsigned long flags;
2420
2421 if (!kstrtou32(buf, 0, &value)) {
2422 spin_lock_irqsave(&host->lock, flags);
2423 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2424 spin_unlock_irqrestore(&host->lock, flags);
2425 }
2426 return count;
2427}
2428
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302429static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302430{
2431 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2432 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302433 unsigned long flags;
2434 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302435 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302436
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302437 spin_lock_irqsave(&host->lock, flags);
2438 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2439 mmc_hostname(host->mmc), __func__, req_type,
2440 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302441 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2442 /*
2443 * The IRQ for request type IO High/Low will be generated when -
2444 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2445 * 2. If 1 is true and when there is a state change in 1.8V enable
2446 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2447 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2448 * layer tries to set it to 3.3V before card detection happens, the
2449 * IRQ doesn't get triggered as there is no state change in this bit.
2450 * The driver already handles this case by changing the IO voltage
2451 * level to high as part of controller power up sequence. Hence, check
2452 * for host->pwr to handle a case where IO voltage high request is
2453 * issued even before controller power up.
2454 */
2455 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2456 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2457 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2458 pr_debug("%s: do not wait for power IRQ that never comes\n",
2459 mmc_hostname(host->mmc));
2460 spin_unlock_irqrestore(&host->lock, flags);
2461 return;
2462 }
2463 }
2464
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302465 if ((req_type & msm_host->curr_pwr_state) ||
2466 (req_type & msm_host->curr_io_level))
2467 done = true;
2468 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302469
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302470 /*
2471 * This is needed here to hanlde a case where IRQ gets
2472 * triggered even before this function is called so that
2473 * x->done counter of completion gets reset. Otherwise,
2474 * next call to wait_for_completion returns immediately
2475 * without actually waiting for the IRQ to be handled.
2476 */
2477 if (done)
2478 init_completion(&msm_host->pwr_irq_completion);
2479 else
2480 wait_for_completion(&msm_host->pwr_irq_completion);
2481
2482 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2483 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302484}
2485
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002486static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2487{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302488 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2489
2490 if (enable) {
2491 config |= CORE_CDR_EN;
2492 config &= ~CORE_CDR_EXT_EN;
2493 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2494 } else {
2495 config &= ~CORE_CDR_EN;
2496 config |= CORE_CDR_EXT_EN;
2497 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2498 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002499}
2500
Asutosh Das648f9d12013-01-10 21:11:04 +05302501static unsigned int sdhci_msm_max_segs(void)
2502{
2503 return SDHCI_MSM_MAX_SEGMENTS;
2504}
2505
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302506static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302507{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302508 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2509 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302510
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302511 return msm_host->pdata->sup_clk_table[0];
2512}
2513
2514static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2515{
2516 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2517 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2518 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2519
2520 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2521}
2522
2523static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2524 u32 req_clk)
2525{
2526 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2527 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2528 unsigned int sel_clk = -1;
2529 unsigned char cnt;
2530
2531 if (req_clk < sdhci_msm_get_min_clock(host)) {
2532 sel_clk = sdhci_msm_get_min_clock(host);
2533 return sel_clk;
2534 }
2535
2536 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2537 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2538 break;
2539 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2540 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2541 break;
2542 } else {
2543 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2544 }
2545 }
2546 return sel_clk;
2547}
2548
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302549static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2550{
2551 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2552 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2553 int rc = 0;
2554
2555 if (atomic_read(&msm_host->controller_clock))
2556 return 0;
2557
2558 sdhci_msm_bus_voting(host, 1);
2559
2560 if (!IS_ERR(msm_host->pclk)) {
2561 rc = clk_prepare_enable(msm_host->pclk);
2562 if (rc) {
2563 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2564 mmc_hostname(host->mmc), __func__, rc);
2565 goto remove_vote;
2566 }
2567 }
2568
2569 rc = clk_prepare_enable(msm_host->clk);
2570 if (rc) {
2571 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2572 mmc_hostname(host->mmc), __func__, rc);
2573 goto disable_pclk;
2574 }
2575
2576 atomic_set(&msm_host->controller_clock, 1);
2577 pr_debug("%s: %s: enabled controller clock\n",
2578 mmc_hostname(host->mmc), __func__);
2579 goto out;
2580
2581disable_pclk:
2582 if (!IS_ERR(msm_host->pclk))
2583 clk_disable_unprepare(msm_host->pclk);
2584remove_vote:
2585 if (msm_host->msm_bus_vote.client_handle)
2586 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2587out:
2588 return rc;
2589}
2590
2591
2592
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302593static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2594{
2595 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2596 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2597 int rc = 0;
2598
2599 if (enable && !atomic_read(&msm_host->clks_on)) {
2600 pr_debug("%s: request to enable clocks\n",
2601 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302602
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302603 /*
2604 * The bus-width or the clock rate might have changed
2605 * after controller clocks are enbaled, update bus vote
2606 * in such case.
2607 */
2608 if (atomic_read(&msm_host->controller_clock))
2609 sdhci_msm_bus_voting(host, 1);
2610
2611 rc = sdhci_msm_enable_controller_clock(host);
2612 if (rc)
2613 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302614
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302615 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2616 rc = clk_prepare_enable(msm_host->bus_clk);
2617 if (rc) {
2618 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2619 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302620 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302621 }
2622 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002623 if (!IS_ERR(msm_host->ff_clk)) {
2624 rc = clk_prepare_enable(msm_host->ff_clk);
2625 if (rc) {
2626 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2627 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302628 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002629 }
2630 }
2631 if (!IS_ERR(msm_host->sleep_clk)) {
2632 rc = clk_prepare_enable(msm_host->sleep_clk);
2633 if (rc) {
2634 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2635 mmc_hostname(host->mmc), __func__, rc);
2636 goto disable_ff_clk;
2637 }
2638 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302639 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302640
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302641 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302642 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2643 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302644 /*
2645 * During 1.8V signal switching the clock source must
2646 * still be ON as it requires accessing SDHC
2647 * registers (SDHCi host control2 register bit 3 must
2648 * be written and polled after stopping the SDCLK).
2649 */
2650 if (host->mmc->card_clock_off)
2651 return 0;
2652 pr_debug("%s: request to disable clocks\n",
2653 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002654 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2655 clk_disable_unprepare(msm_host->sleep_clk);
2656 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2657 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302658 clk_disable_unprepare(msm_host->clk);
2659 if (!IS_ERR(msm_host->pclk))
2660 clk_disable_unprepare(msm_host->pclk);
2661 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2662 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302663
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302664 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302665 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302666 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302667 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302668 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002669disable_ff_clk:
2670 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2671 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302672disable_bus_clk:
2673 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2674 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302675disable_controller_clk:
2676 if (!IS_ERR_OR_NULL(msm_host->clk))
2677 clk_disable_unprepare(msm_host->clk);
2678 if (!IS_ERR_OR_NULL(msm_host->pclk))
2679 clk_disable_unprepare(msm_host->pclk);
2680 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302681remove_vote:
2682 if (msm_host->msm_bus_vote.client_handle)
2683 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302684out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302685 return rc;
2686}
2687
2688static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2689{
2690 int rc;
2691 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2692 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2693 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002694 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302695 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302696
2697 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302698 /*
2699 * disable pwrsave to ensure clock is not auto-gated until
2700 * the rate is >400KHz (initialization complete).
2701 */
2702 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2703 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302704 sdhci_msm_prepare_clocks(host, false);
2705 host->clock = clock;
2706 goto out;
2707 }
2708
2709 rc = sdhci_msm_prepare_clocks(host, true);
2710 if (rc)
2711 goto out;
2712
Sahitya Tummala043744a2013-06-24 09:55:33 +05302713 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2714 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302715 if ((clock > 400000) &&
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002716 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302717 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2718 | CORE_CLK_PWRSAVE,
2719 host->ioaddr + CORE_VENDOR_SPEC);
2720 /*
2721 * Disable pwrsave for a newly added card if doesn't allow clock
2722 * gating.
2723 */
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002724 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302725 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2726 & ~CORE_CLK_PWRSAVE,
2727 host->ioaddr + CORE_VENDOR_SPEC);
2728
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302729 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002730 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002731 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002732 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302733 /*
2734 * The SDHC requires internal clock frequency to be double the
2735 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002736 * uses the faster clock(100/400MHz) for some of its parts and
2737 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302738 */
2739 ddr_clock = clock * 2;
2740 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2741 ddr_clock);
2742 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002743
2744 /*
2745 * In general all timing modes are controlled via UHS mode select in
2746 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2747 * their respective modes defined here, hence we use these values.
2748 *
2749 * HS200 - SDR104 (Since they both are equivalent in functionality)
2750 * HS400 - This involves multiple configurations
2751 * Initially SDR104 - when tuning is required as HS200
2752 * Then when switching to DDR @ 400MHz (HS400) we use
2753 * the vendor specific HC_SELECT_IN to control the mode.
2754 *
2755 * In addition to controlling the modes we also need to select the
2756 * correct input clock for DLL depending on the mode.
2757 *
2758 * HS400 - divided clock (free running MCLK/2)
2759 * All other modes - default (free running MCLK)
2760 */
2761 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2762 /* Select the divided clock (free running MCLK/2) */
2763 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2764 & ~CORE_HC_MCLK_SEL_MASK)
2765 | CORE_HC_MCLK_SEL_HS400),
2766 host->ioaddr + CORE_VENDOR_SPEC);
2767 /*
2768 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2769 * register
2770 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302771 if ((msm_host->tuning_done ||
2772 (mmc_card_strobe(msm_host->mmc->card) &&
2773 msm_host->enhanced_strobe)) &&
2774 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002775 /*
2776 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2777 * field in VENDOR_SPEC_FUNC
2778 */
2779 writel_relaxed((readl_relaxed(host->ioaddr + \
2780 CORE_VENDOR_SPEC)
2781 | CORE_HC_SELECT_IN_HS400
2782 | CORE_HC_SELECT_IN_EN),
2783 host->ioaddr + CORE_VENDOR_SPEC);
2784 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002785 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2786 /*
2787 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2788 * CORE_DLL_STATUS to be set. This should get set
2789 * with in 15 us at 200 MHz.
2790 */
2791 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2792 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2793 CORE_DDR_DLL_LOCK)), 10, 1000);
2794 if (rc == -ETIMEDOUT)
2795 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2796 mmc_hostname(host->mmc),
2797 dll_lock);
2798 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002799 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002800 if (!msm_host->use_cdclp533)
2801 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2802 writel_relaxed((readl_relaxed(host->ioaddr +
2803 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2804 host->ioaddr + CORE_VENDOR_SPEC3);
2805
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002806 /* Select the default clock (free running MCLK) */
2807 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2808 & ~CORE_HC_MCLK_SEL_MASK)
2809 | CORE_HC_MCLK_SEL_DFLT),
2810 host->ioaddr + CORE_VENDOR_SPEC);
2811
2812 /*
2813 * Disable HC_SELECT_IN to be able to use the UHS mode select
2814 * configuration from Host Control2 register for all other
2815 * modes.
2816 *
2817 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2818 * in VENDOR_SPEC_FUNC
2819 */
2820 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2821 & ~CORE_HC_SELECT_IN_EN
2822 & ~CORE_HC_SELECT_IN_MASK),
2823 host->ioaddr + CORE_VENDOR_SPEC);
2824 }
2825 mb();
2826
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302827 if (sup_clock != msm_host->clk_rate) {
2828 pr_debug("%s: %s: setting clk rate to %u\n",
2829 mmc_hostname(host->mmc), __func__, sup_clock);
2830 rc = clk_set_rate(msm_host->clk, sup_clock);
2831 if (rc) {
2832 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2833 mmc_hostname(host->mmc), __func__,
2834 sup_clock, rc);
2835 goto out;
2836 }
2837 msm_host->clk_rate = sup_clock;
2838 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302839 /*
2840 * Update the bus vote in case of frequency change due to
2841 * clock scaling.
2842 */
2843 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302844 }
2845out:
2846 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302847}
2848
Sahitya Tummala14613432013-03-21 11:13:25 +05302849static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2850 unsigned int uhs)
2851{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002852 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2853 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302854 u16 ctrl_2;
2855
2856 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2857 /* Select Bus Speed Mode for host */
2858 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002859 if ((uhs == MMC_TIMING_MMC_HS400) ||
2860 (uhs == MMC_TIMING_MMC_HS200) ||
2861 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302862 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2863 else if (uhs == MMC_TIMING_UHS_SDR12)
2864 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2865 else if (uhs == MMC_TIMING_UHS_SDR25)
2866 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2867 else if (uhs == MMC_TIMING_UHS_SDR50)
2868 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002869 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2870 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302871 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302872 /*
2873 * When clock frquency is less than 100MHz, the feedback clock must be
2874 * provided and DLL must not be used so that tuning can be skipped. To
2875 * provide feedback clock, the mode selection can be any value less
2876 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2877 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002878 if (host->clock <= CORE_FREQ_100MHZ) {
2879 if ((uhs == MMC_TIMING_MMC_HS400) ||
2880 (uhs == MMC_TIMING_MMC_HS200) ||
2881 (uhs == MMC_TIMING_UHS_SDR104))
2882 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302883
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002884 /*
2885 * Make sure DLL is disabled when not required
2886 *
2887 * Write 1 to DLL_RST bit of DLL_CONFIG register
2888 */
2889 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2890 | CORE_DLL_RST),
2891 host->ioaddr + CORE_DLL_CONFIG);
2892
2893 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2894 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2895 | CORE_DLL_PDN),
2896 host->ioaddr + CORE_DLL_CONFIG);
2897 mb();
2898
2899 /*
2900 * The DLL needs to be restored and CDCLP533 recalibrated
2901 * when the clock frequency is set back to 400MHz.
2902 */
2903 msm_host->calibration_done = false;
2904 }
2905
2906 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2907 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302908 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2909
2910}
2911
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002912#define MAX_TEST_BUS 60
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302913
2914void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2915{
2916 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2917 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2918 int tbsel, tbsel2;
2919 int i, index = 0;
2920 u32 test_bus_val = 0;
2921 u32 debug_reg[MAX_TEST_BUS] = {0};
2922
2923 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
2924 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
2925 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
2926 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
2927 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
2928 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
2929 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
2930 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
2931 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
2932 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
2933 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
2934 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
2935 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05302936 pr_info("Vndr func2: 0x%08x\n",
2937 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302938
2939 /*
2940 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
2941 * of CORE_TESTBUS_CONFIG register.
2942 *
2943 * To select test bus 0 to 7 use tbsel and to select any test bus
2944 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
2945 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
2946 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
2947 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002948 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302949 for (tbsel = 0; tbsel < 8; tbsel++) {
2950 if (index >= MAX_TEST_BUS)
2951 break;
2952 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
2953 tbsel | CORE_TESTBUS_ENA;
2954 writel_relaxed(test_bus_val,
2955 msm_host->core_mem + CORE_TESTBUS_CONFIG);
2956 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
2957 CORE_SDCC_DEBUG_REG);
2958 }
2959 }
2960 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
2961 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2962 i, i + 3, debug_reg[i], debug_reg[i+1],
2963 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07002964}
2965
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05302966/*
2967 * sdhci_msm_enhanced_strobe_mask :-
2968 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
2969 * SW should write 3 to
2970 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
2971 * The default reset value of this register is 2.
2972 */
2973static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
2974{
2975 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2976 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2977
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302978 if (!msm_host->enhanced_strobe ||
2979 !mmc_card_strobe(msm_host->mmc->card)) {
2980 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05302981 mmc_hostname(host->mmc));
2982 return;
2983 }
2984
2985 if (set) {
2986 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
2987 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
2988 host->ioaddr + CORE_VENDOR_SPEC3);
2989 } else {
2990 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
2991 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
2992 host->ioaddr + CORE_VENDOR_SPEC3);
2993 }
2994}
2995
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07002996static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
2997{
2998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3000
3001 if (set) {
3002 writel_relaxed(CORE_TESTBUS_ENA,
3003 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3004 } else {
3005 u32 value;
3006
3007 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3008 value &= ~CORE_TESTBUS_ENA;
3009 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3010 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303011}
3012
Dov Levenglick9c575e22015-07-20 09:30:52 +03003013static void sdhci_msm_detect(struct sdhci_host *host, bool detected)
3014{
3015 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3016 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3017 struct mmc_host *mmc = msm_host->mmc;
3018 struct mmc_card *card = mmc->card;
3019
3020 if (detected && mmc_card_sdio(card))
3021 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3022 else
3023 mmc->pm_caps &= ~MMC_PM_KEEP_POWER;
3024}
3025
Pavan Anamula691dd592015-08-25 16:11:20 +05303026void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3027{
3028 u32 vendor_func2;
3029 unsigned long timeout;
3030
3031 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3032
3033 if (enable) {
3034 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3035 CORE_VENDOR_SPEC_FUNC2);
3036 timeout = 10000;
3037 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3038 HC_SW_RST_REQ) {
3039 if (timeout == 0) {
3040 pr_info("%s: Applying wait idle disable workaround\n",
3041 mmc_hostname(host->mmc));
3042 /*
3043 * Apply the reset workaround to not wait for
3044 * pending data transfers on AXI before
3045 * resetting the controller. This could be
3046 * risky if the transfers were stuck on the
3047 * AXI bus.
3048 */
3049 vendor_func2 = readl_relaxed(host->ioaddr +
3050 CORE_VENDOR_SPEC_FUNC2);
3051 writel_relaxed(vendor_func2 |
3052 HC_SW_RST_WAIT_IDLE_DIS,
3053 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3054 host->reset_wa_t = ktime_get();
3055 return;
3056 }
3057 timeout--;
3058 udelay(10);
3059 }
3060 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3061 mmc_hostname(host->mmc));
3062 } else {
3063 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3064 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3065 }
3066}
3067
Gilad Broner44445992015-09-29 16:05:39 +03003068static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3069{
3070 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
3071 container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);
3072
3073 if (atomic_read(&pm_qos_irq->counter))
3074 return;
3075
3076 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3077 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3078}
3079
3080void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3081{
3082 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3083 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3084 struct sdhci_msm_pm_qos_latency *latency =
3085 &msm_host->pdata->pm_qos_data.irq_latency;
3086 int counter;
3087
3088 if (!msm_host->pm_qos_irq.enabled)
3089 return;
3090
3091 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3092 /* Make sure to update the voting in case power policy has changed */
3093 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3094 && counter > 1)
3095 return;
3096
3097 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3098 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3099 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3100 msm_host->pm_qos_irq.latency);
3101}
3102
3103void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3104{
3105 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3106 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3107 int counter;
3108
3109 if (!msm_host->pm_qos_irq.enabled)
3110 return;
3111
3112 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3113 if (counter < 0) {
3114 pr_err("%s: counter=%d\n", __func__, counter);
3115 BUG();
3116 }
3117 if (counter)
3118 return;
3119
3120 if (async) {
3121 schedule_work(&msm_host->pm_qos_irq.unvote_work);
3122 return;
3123 }
3124
3125 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3126 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3127 msm_host->pm_qos_irq.latency);
3128}
3129
3130void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3131{
3132 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3133 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3134 struct sdhci_msm_pm_qos_latency *irq_latency;
3135
3136 if (!msm_host->pdata->pm_qos_data.irq_valid)
3137 return;
3138
3139 /* Initialize only once as this gets called per partition */
3140 if (msm_host->pm_qos_irq.enabled)
3141 return;
3142
3143 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3144 msm_host->pm_qos_irq.req.type =
3145 msm_host->pdata->pm_qos_data.irq_req_type;
3146 if (msm_host->pm_qos_irq.req.type == PM_QOS_REQ_AFFINE_IRQ)
3147 msm_host->pm_qos_irq.req.irq = host->irq;
3148 else
3149 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3150 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3151
3152 INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
3153 sdhci_msm_pm_qos_irq_unvote_work);
3154 /* For initialization phase, set the performance latency */
3155 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3156 msm_host->pm_qos_irq.latency =
3157 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3158 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3159 msm_host->pm_qos_irq.latency);
3160 msm_host->pm_qos_irq.enabled = true;
3161}
3162
3163static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3164{
3165 int i;
3166 struct sdhci_msm_cpu_group_map *map =
3167 &msm_host->pdata->pm_qos_data.cpu_group_map;
3168
3169 if (cpu < 0)
3170 goto not_found;
3171
3172 for (i = 0; i < map->nr_groups; i++)
3173 if (cpumask_test_cpu(cpu, &map->mask[i]))
3174 return i;
3175
3176not_found:
3177 return -EINVAL;
3178}
3179
3180void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3181 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3182{
3183 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3184 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3185 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3186 struct sdhci_msm_pm_qos_group *pm_qos_group;
3187 int counter;
3188
3189 if (!msm_host->pm_qos_group_enable || group < 0)
3190 return;
3191
3192 pm_qos_group = &msm_host->pm_qos[group];
3193 counter = atomic_inc_return(&pm_qos_group->counter);
3194
3195 /* Make sure to update the voting in case power policy has changed */
3196 if (pm_qos_group->latency == latency->latency[host->power_policy]
3197 && counter > 1)
3198 return;
3199
3200 cancel_work_sync(&pm_qos_group->unvote_work);
3201
3202 pm_qos_group->latency = latency->latency[host->power_policy];
3203 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3204}
3205
3206static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3207{
3208 struct sdhci_msm_pm_qos_group *group =
3209 container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);
3210
3211 if (atomic_read(&group->counter))
3212 return;
3213
3214 group->latency = PM_QOS_DEFAULT_VALUE;
3215 pm_qos_update_request(&group->req, group->latency);
3216}
3217
3218void sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
3219{
3220 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3221 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3222 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3223
3224 if (!msm_host->pm_qos_group_enable || group < 0 ||
3225 atomic_dec_return(&msm_host->pm_qos[group].counter))
3226 return;
3227
3228 if (async) {
3229 schedule_work(&msm_host->pm_qos[group].unvote_work);
3230 return;
3231 }
3232
3233 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3234 pm_qos_update_request(&msm_host->pm_qos[group].req,
3235 msm_host->pm_qos[group].latency);
3236}
3237
3238void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3239 struct sdhci_msm_pm_qos_latency *latency)
3240{
3241 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3242 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3243 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3244 struct sdhci_msm_pm_qos_group *group;
3245 int i;
3246
3247 if (msm_host->pm_qos_group_enable)
3248 return;
3249
3250 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3251 GFP_KERNEL);
3252 if (!msm_host->pm_qos)
3253 return;
3254
3255 for (i = 0; i < nr_groups; i++) {
3256 group = &msm_host->pm_qos[i];
3257 INIT_WORK(&group->unvote_work,
3258 sdhci_msm_pm_qos_cpu_unvote_work);
3259 atomic_set(&group->counter, 0);
3260 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3261 cpumask_copy(&group->req.cpus_affine,
3262 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3263 /* For initialization phase, set the performance mode latency */
3264 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3265 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3266 group->latency);
3267 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3268 __func__, i,
3269 group->req.cpus_affine.bits[0],
3270 group->latency,
3271 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3272 }
3273 msm_host->pm_qos_group_enable = true;
3274}
3275
Asutosh Das0ef24812012-12-18 16:14:02 +05303276static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303277 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303278 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003279 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303280 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003281 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303282 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303283 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303284 .get_min_clock = sdhci_msm_get_min_clock,
3285 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303286 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303287 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303288 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003289 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003290 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003291 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303292 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Dov Levenglick9c575e22015-07-20 09:30:52 +03003293 .detect = sdhci_msm_detect,
Pavan Anamula691dd592015-08-25 16:11:20 +05303294 .reset_workaround = sdhci_msm_reset_workaround,
Asutosh Das0ef24812012-12-18 16:14:02 +05303295};
3296
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303297static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3298 struct sdhci_host *host)
3299{
Krishna Konda46fd1432014-10-30 21:13:27 -07003300 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303301 u16 minor;
3302 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303303 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303304
3305 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3306 major = (version & CORE_VERSION_MAJOR_MASK) >>
3307 CORE_VERSION_MAJOR_SHIFT;
3308 minor = version & CORE_VERSION_TARGET_MASK;
3309
Krishna Konda46fd1432014-10-30 21:13:27 -07003310 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3311
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303312 /*
3313 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003314 * controller won't advertise 3.0v, 1.8v and 8-bit features
3315 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303316 */
3317 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003318 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003319 /*
3320 * Enable 1.8V support capability on controllers that
3321 * support dual voltage
3322 */
3323 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003324 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3325 caps |= CORE_3_0V_SUPPORT;
3326 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003327 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303328 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3329 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303330 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003331
3332 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303333 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3334 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3335 */
3336 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303337 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303338 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3339 writel_relaxed((val | CORE_ONE_MID_EN),
3340 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3341 }
3342 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003343 * SDCC 5 controller with major version 1, minor version 0x34 and later
3344 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3345 */
3346 if ((major == 1) && (minor < 0x34))
3347 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003348
3349 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003350 * SDCC 5 controller with major version 1, minor version 0x42 and later
3351 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303352 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003353 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303354 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003355 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303356 msm_host->enhanced_strobe = true;
3357 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003358
3359 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003360 * SDCC 5 controller with major version 1 and minor version 0x42,
3361 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3362 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303363 * when MCLK is gated OFF, it is not gated for less than 0.5us
3364 * and MCLK must be switched on for at-least 1us before DATA
3365 * starts coming.
3366 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003367 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3368 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303369 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003370
3371 if ((major == 1) && (minor >= 0x49))
3372 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303373 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003374 * Mask 64-bit support for controller with 32-bit address bus so that
3375 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003376 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003377 if (!msm_host->pdata->largeaddressbus)
3378 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3379
Gilad Broner2a10ca02014-10-02 17:20:35 +03003380 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003381 /* keep track of the value in SDHCI_CAPABILITIES */
3382 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303383}
3384
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003385#ifdef CONFIG_MMC_CQ_HCI
3386static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3387 struct platform_device *pdev)
3388{
3389 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3390 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3391
3392 host->cq_host = cmdq_pltfm_init(pdev);
3393 if (IS_ERR(host->cq_host))
3394 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3395 PTR_ERR(host->cq_host));
3396 else
3397 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
3398}
3399#else
3400static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3401 struct platform_device *pdev)
3402{
3403
3404}
3405#endif
3406
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003407static bool sdhci_msm_is_bootdevice(struct device *dev)
3408{
3409 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3410 strlen(saved_command_line))) {
3411 char search_string[50];
3412
3413 snprintf(search_string, ARRAY_SIZE(search_string),
3414 "androidboot.bootdevice=%s", dev_name(dev));
3415 if (strnstr(saved_command_line, search_string,
3416 strlen(saved_command_line)))
3417 return true;
3418 else
3419 return false;
3420 }
3421
3422 /*
3423 * "androidboot.bootdevice=" argument is not present then
3424 * return true as we don't know the boot device anyways.
3425 */
3426 return true;
3427}
3428
Asutosh Das0ef24812012-12-18 16:14:02 +05303429static int sdhci_msm_probe(struct platform_device *pdev)
3430{
3431 struct sdhci_host *host;
3432 struct sdhci_pltfm_host *pltfm_host;
3433 struct sdhci_msm_host *msm_host;
3434 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003435 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003436 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003437 u32 irq_status, irq_ctl;
Asutosh Das0ef24812012-12-18 16:14:02 +05303438
3439 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3440 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3441 GFP_KERNEL);
3442 if (!msm_host) {
3443 ret = -ENOMEM;
3444 goto out;
3445 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303446
3447 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3448 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3449 if (IS_ERR(host)) {
3450 ret = PTR_ERR(host);
3451 goto out;
3452 }
3453
3454 pltfm_host = sdhci_priv(host);
3455 pltfm_host->priv = msm_host;
3456 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303457 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303458
3459 /* Extract platform data */
3460 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003461 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
3462 if (ret < 0) {
3463 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3464 ret);
3465 goto pltfm_free;
3466 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003467
3468 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003469 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3470 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003471 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003472 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003473
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003474 if (disable_slots & (1 << (ret - 1))) {
3475 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3476 ret);
3477 ret = -ENODEV;
3478 goto pltfm_free;
3479 }
3480
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003481 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3482 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303483 if (!msm_host->pdata) {
3484 dev_err(&pdev->dev, "DT parsing error\n");
3485 goto pltfm_free;
3486 }
3487 } else {
3488 dev_err(&pdev->dev, "No device tree node\n");
3489 goto pltfm_free;
3490 }
3491
3492 /* Setup Clocks */
3493
3494 /* Setup SDCC bus voter clock. */
3495 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3496 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3497 /* Vote for max. clk rate for max. performance */
3498 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3499 if (ret)
3500 goto pltfm_free;
3501 ret = clk_prepare_enable(msm_host->bus_clk);
3502 if (ret)
3503 goto pltfm_free;
3504 }
3505
3506 /* Setup main peripheral bus clock */
3507 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3508 if (!IS_ERR(msm_host->pclk)) {
3509 ret = clk_prepare_enable(msm_host->pclk);
3510 if (ret)
3511 goto bus_clk_disable;
3512 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303513 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303514
3515 /* Setup SDC MMC clock */
3516 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3517 if (IS_ERR(msm_host->clk)) {
3518 ret = PTR_ERR(msm_host->clk);
3519 goto pclk_disable;
3520 }
3521
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303522 /* Set to the minimum supported clock frequency */
3523 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3524 if (ret) {
3525 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303526 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303527 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303528 ret = clk_prepare_enable(msm_host->clk);
3529 if (ret)
3530 goto pclk_disable;
3531
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303532 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303533 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303534
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003535 /* Setup CDC calibration fixed feedback clock */
3536 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3537 if (!IS_ERR(msm_host->ff_clk)) {
3538 ret = clk_prepare_enable(msm_host->ff_clk);
3539 if (ret)
3540 goto clk_disable;
3541 }
3542
3543 /* Setup CDC calibration sleep clock */
3544 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3545 if (!IS_ERR(msm_host->sleep_clk)) {
3546 ret = clk_prepare_enable(msm_host->sleep_clk);
3547 if (ret)
3548 goto ff_clk_disable;
3549 }
3550
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003551 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3552
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303553 ret = sdhci_msm_bus_register(msm_host, pdev);
3554 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003555 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303556
3557 if (msm_host->msm_bus_vote.client_handle)
3558 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3559 sdhci_msm_bus_work);
3560 sdhci_msm_bus_voting(host, 1);
3561
Asutosh Das0ef24812012-12-18 16:14:02 +05303562 /* Setup regulators */
3563 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3564 if (ret) {
3565 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303566 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303567 }
3568
3569 /* Reset the core and Enable SDHC mode */
3570 core_memres = platform_get_resource_byname(pdev,
3571 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303572 if (!core_memres) {
3573 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3574 goto vreg_deinit;
3575 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303576 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3577 resource_size(core_memres));
3578
3579 if (!msm_host->core_mem) {
3580 dev_err(&pdev->dev, "Failed to remap registers\n");
3581 ret = -ENOMEM;
3582 goto vreg_deinit;
3583 }
3584
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303585 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003586 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303587 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003588 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3589 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303590
Asutosh Das0ef24812012-12-18 16:14:02 +05303591 /* Set HC_MODE_EN bit in HC_MODE register */
3592 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3593
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003594 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3595 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3596 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3597
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303598 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003599
3600 /*
3601 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3602 * be used as required later on.
3603 */
3604 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3605 CORE_IO_PAD_PWR_SWITCH_EN),
3606 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303607 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303608 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3609 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3610 * interrupt in GIC (by registering the interrupt handler), we need to
3611 * ensure that any pending power irq interrupt status is acknowledged
3612 * otherwise power irq interrupt handler would be fired prematurely.
3613 */
3614 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
3615 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
3616 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
3617 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
3618 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
3619 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
3620 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
3621 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07003622
Subhash Jadavani28137342013-05-14 17:46:43 +05303623 /*
3624 * Ensure that above writes are propogated before interrupt enablement
3625 * in GIC.
3626 */
3627 mb();
3628
3629 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05303630 * Following are the deviations from SDHC spec v3.0 -
3631 * 1. Card detection is handled using separate GPIO.
3632 * 2. Bus power control is handled by interacting with PMIC.
3633 */
3634 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
3635 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303636 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03003637 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303638 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05303639 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05303640 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05303641 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das0ef24812012-12-18 16:14:02 +05303642
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05303643 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
3644 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
3645
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003646 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003647 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
3648 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
3649 SDHCI_VENDOR_VER_SHIFT));
3650 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
3651 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
3652 /*
3653 * Add 40us delay in interrupt handler when
3654 * operating at initialization frequency(400KHz).
3655 */
3656 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
3657 /*
3658 * Set Software Reset for DAT line in Software
3659 * Reset Register (Bit 2).
3660 */
3661 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
3662 }
3663
Asutosh Das214b9662013-06-13 14:27:42 +05303664 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
3665
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003666 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003667 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
3668 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05303669 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003670 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05303671 goto vreg_deinit;
3672 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003673 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05303674 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003675 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303676 if (ret) {
3677 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003678 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05303679 goto vreg_deinit;
3680 }
3681
3682 /* Enable pwr irq interrupts */
3683 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
3684
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303685#ifdef CONFIG_MMC_CLKGATE
3686 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
3687 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
3688#endif
3689
Asutosh Das0ef24812012-12-18 16:14:02 +05303690 /* Set host capabilities */
3691 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
3692 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003693 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05303694 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05303695 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08003696 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3697 msm_host->mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
Subhash Jadavani6d472b22013-05-29 15:52:10 +05303698 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08003699 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03003700 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05303701 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Asutosh Das0ef24812012-12-18 16:14:02 +05303702
3703 if (msm_host->pdata->nonremovable)
3704 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
3705
Guoping Yuf7c91332014-08-20 16:56:18 +08003706 if (msm_host->pdata->nonhotplug)
3707 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
3708
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303709 init_completion(&msm_host->pwr_irq_completion);
3710
Sahitya Tummala581df132013-03-12 14:57:46 +05303711 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05303712 /*
3713 * Set up the card detect GPIO in active configuration before
3714 * configuring it as an IRQ. Otherwise, it can be in some
3715 * weird/inconsistent state resulting in flood of interrupts.
3716 */
3717 sdhci_msm_setup_pins(msm_host->pdata, true);
3718
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05303719 /*
3720 * This delay is needed for stabilizing the card detect GPIO
3721 * line after changing the pull configs.
3722 */
3723 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05303724 ret = mmc_gpio_request_cd(msm_host->mmc,
3725 msm_host->pdata->status_gpio, 0);
3726 if (ret) {
3727 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
3728 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303729 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05303730 }
3731 }
3732
Krishna Konda7feab352013-09-17 23:55:40 -07003733 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
3734 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
3735 host->dma_mask = DMA_BIT_MASK(64);
3736 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05303737 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07003738 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05303739 host->dma_mask = DMA_BIT_MASK(32);
3740 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05303741 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05303742 } else {
3743 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
3744 }
3745
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003746 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05303747 ret = sdhci_add_host(host);
3748 if (ret) {
3749 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05303750 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05303751 }
3752
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003753 pm_runtime_set_active(&pdev->dev);
3754 pm_runtime_enable(&pdev->dev);
3755 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
3756 pm_runtime_use_autosuspend(&pdev->dev);
3757
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303758 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
3759 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
3760 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
3761 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
3762 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
3763 ret = device_create_file(&pdev->dev,
3764 &msm_host->msm_bus_vote.max_bus_bw);
3765 if (ret)
3766 goto remove_host;
3767
Sahitya Tummala5c55b932013-06-20 14:00:18 +05303768 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
3769 msm_host->polling.show = show_polling;
3770 msm_host->polling.store = store_polling;
3771 sysfs_attr_init(&msm_host->polling.attr);
3772 msm_host->polling.attr.name = "polling";
3773 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
3774 ret = device_create_file(&pdev->dev, &msm_host->polling);
3775 if (ret)
3776 goto remove_max_bus_bw_file;
3777 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303778
3779 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
3780 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
3781 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
3782 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
3783 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
3784 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
3785 if (ret) {
3786 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
3787 mmc_hostname(host->mmc), __func__, ret);
3788 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
3789 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303790 /* Successful initialization */
3791 goto out;
3792
Sahitya Tummala5c55b932013-06-20 14:00:18 +05303793remove_max_bus_bw_file:
3794 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05303795remove_host:
3796 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003797 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05303798 sdhci_remove_host(host, dead);
3799vreg_deinit:
3800 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303801bus_unregister:
3802 if (msm_host->msm_bus_vote.client_handle)
3803 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3804 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003805sleep_clk_disable:
3806 if (!IS_ERR(msm_host->sleep_clk))
3807 clk_disable_unprepare(msm_host->sleep_clk);
3808ff_clk_disable:
3809 if (!IS_ERR(msm_host->ff_clk))
3810 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05303811clk_disable:
3812 if (!IS_ERR(msm_host->clk))
3813 clk_disable_unprepare(msm_host->clk);
3814pclk_disable:
3815 if (!IS_ERR(msm_host->pclk))
3816 clk_disable_unprepare(msm_host->pclk);
3817bus_clk_disable:
3818 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3819 clk_disable_unprepare(msm_host->bus_clk);
3820pltfm_free:
3821 sdhci_pltfm_free(pdev);
3822out:
3823 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
3824 return ret;
3825}
3826
3827static int sdhci_msm_remove(struct platform_device *pdev)
3828{
3829 struct sdhci_host *host = platform_get_drvdata(pdev);
3830 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3831 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3832 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
3833 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
3834 0xffffffff);
3835
3836 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05303837 if (!gpio_is_valid(msm_host->pdata->status_gpio))
3838 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303839 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003840 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05303841 sdhci_remove_host(host, dead);
3842 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05303843
Asutosh Das0ef24812012-12-18 16:14:02 +05303844 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303845
Pratibhasagar V9acf2642013-11-21 21:07:21 +05303846 sdhci_msm_setup_pins(pdata, true);
3847 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303848
3849 if (msm_host->msm_bus_vote.client_handle) {
3850 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3851 sdhci_msm_bus_unregister(msm_host);
3852 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303853 return 0;
3854}
3855
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003856#ifdef CONFIG_PM
3857static int sdhci_msm_runtime_suspend(struct device *dev)
3858{
3859 struct sdhci_host *host = dev_get_drvdata(dev);
3860 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3861 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003862 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003863
3864 disable_irq(host->irq);
3865 disable_irq(msm_host->pwr_irq);
3866
3867 /*
3868 * Remove the vote immediately only if clocks are off in which
3869 * case we might have queued work to remove vote but it may not
3870 * be completed before runtime suspend or system suspend.
3871 */
3872 if (!atomic_read(&msm_host->clks_on)) {
3873 if (msm_host->msm_bus_vote.client_handle)
3874 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3875 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003876 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
3877 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003878
3879 return 0;
3880}
3881
3882static int sdhci_msm_runtime_resume(struct device *dev)
3883{
3884 struct sdhci_host *host = dev_get_drvdata(dev);
3885 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3886 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003887 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003888
3889 enable_irq(msm_host->pwr_irq);
3890 enable_irq(host->irq);
3891
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003892 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
3893 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003894 return 0;
3895}
3896
3897static int sdhci_msm_suspend(struct device *dev)
3898{
3899 struct sdhci_host *host = dev_get_drvdata(dev);
3900 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3901 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003902 int ret = 0;
3903 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003904
3905 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
3906 (msm_host->mmc->slot.cd_irq >= 0))
3907 disable_irq(msm_host->mmc->slot.cd_irq);
3908
3909 if (pm_runtime_suspended(dev)) {
3910 pr_debug("%s: %s: already runtime suspended\n",
3911 mmc_hostname(host->mmc), __func__);
3912 goto out;
3913 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003914 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003915out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003916 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
3917 ktime_to_us(ktime_sub(ktime_get(), start)));
3918 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003919}
3920
3921static int sdhci_msm_resume(struct device *dev)
3922{
3923 struct sdhci_host *host = dev_get_drvdata(dev);
3924 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3925 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3926 int ret = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003927 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003928
3929 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
3930 (msm_host->mmc->slot.cd_irq >= 0))
3931 enable_irq(msm_host->mmc->slot.cd_irq);
3932
3933 if (pm_runtime_suspended(dev)) {
3934 pr_debug("%s: %s: runtime suspended, defer system resume\n",
3935 mmc_hostname(host->mmc), __func__);
3936 goto out;
3937 }
3938
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003939 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003940out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02003941 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
3942 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003943 return ret;
3944}
3945
3946static const struct dev_pm_ops sdhci_msm_pmops = {
3947 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
3948 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
3949 NULL)
3950};
3951
3952#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
3953
3954#else
3955#define SDHCI_MSM_PMOPS NULL
3956#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05303957static const struct of_device_id sdhci_msm_dt_match[] = {
3958 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07003959 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05303960};
3961MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
3962
3963static struct platform_driver sdhci_msm_driver = {
3964 .probe = sdhci_msm_probe,
3965 .remove = sdhci_msm_remove,
3966 .driver = {
3967 .name = "sdhci_msm",
3968 .owner = THIS_MODULE,
3969 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003970 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05303971 },
3972};
3973
3974module_platform_driver(sdhci_msm_driver);
3975
3976MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
3977MODULE_LICENSE("GPL v2");