blob: 63b127efe0442b58983ed5494bbc767583f79524 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Asutosh Das36c2e922015-12-01 12:19:58 +053046#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080047#define CORE_POWER 0x0
48#define CORE_SW_RST (1 << 7)
49
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070050#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080051#define CORE_MCI_DATA_CNT 0x30
52#define CORE_MCI_STATUS 0x34
53#define CORE_MCI_FIFO_CNT 0x44
54
55#define CORE_VERSION_STEP_MASK 0x0000FFFF
56#define CORE_VERSION_MINOR_MASK 0x0FFF0000
57#define CORE_VERSION_MINOR_SHIFT 16
58#define CORE_VERSION_MAJOR_MASK 0xF0000000
59#define CORE_VERSION_MAJOR_SHIFT 28
60#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030061#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062
63#define CORE_GENERICS 0x70
64#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053065
66#define CORE_VERSION_MAJOR_MASK 0xF0000000
67#define CORE_VERSION_MAJOR_SHIFT 28
68
Asutosh Das0ef24812012-12-18 16:14:02 +053069#define CORE_HC_MODE 0x78
70#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070071#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053072
Sahitya Tummala67717bc2013-08-02 09:21:37 +053073#define CORE_MCI_VERSION 0x050
74#define CORE_TESTBUS_CONFIG 0x0CC
75#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080076#define CORE_TESTBUS_SEL2_BIT 4
77#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053078
Asutosh Das0ef24812012-12-18 16:14:02 +053079#define CORE_PWRCTL_STATUS 0xDC
80#define CORE_PWRCTL_MASK 0xE0
81#define CORE_PWRCTL_CLEAR 0xE4
82#define CORE_PWRCTL_CTL 0xE8
83
84#define CORE_PWRCTL_BUS_OFF 0x01
85#define CORE_PWRCTL_BUS_ON (1 << 1)
86#define CORE_PWRCTL_IO_LOW (1 << 2)
87#define CORE_PWRCTL_IO_HIGH (1 << 3)
88
89#define CORE_PWRCTL_BUS_SUCCESS 0x01
90#define CORE_PWRCTL_BUS_FAIL (1 << 1)
91#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
92#define CORE_PWRCTL_IO_FAIL (1 << 3)
93
94#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070095#define MAX_PHASES 16
96
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070097#define CORE_DLL_CONFIG 0x100
98#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070099#define CORE_DLL_EN (1 << 16)
100#define CORE_CDR_EN (1 << 17)
101#define CORE_CK_OUT_EN (1 << 18)
102#define CORE_CDR_EXT_EN (1 << 19)
103#define CORE_DLL_PDN (1 << 29)
104#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700105
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700106#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700107#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700108#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700109
110#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700111#define CORE_CLK_PWRSAVE (1 << 1)
112#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
113#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
114#define CORE_HC_MCLK_SEL_MASK (3 << 8)
115#define CORE_HC_AUTO_CMD21_EN (1 << 6)
116#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700117#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700118#define CORE_HC_SELECT_IN_EN (1 << 18)
119#define CORE_HC_SELECT_IN_HS400 (6 << 19)
120#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700121#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700122
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800123#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
124#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
125
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530126#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530127#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
128#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530129#define CORE_ONE_MID_EN (1 << 25)
130
Krishna Konda7feab352013-09-17 23:55:40 -0700131#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530132#define CORE_8_BIT_SUPPORT (1 << 18)
133#define CORE_3_3V_SUPPORT (1 << 24)
134#define CORE_3_0V_SUPPORT (1 << 25)
135#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300136#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700137
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800138#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530139
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700140#define CORE_CSR_CDC_CTLR_CFG0 0x130
141#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
142#define CORE_HW_AUTOCAL_ENA (1 << 17)
143
144#define CORE_CSR_CDC_CTLR_CFG1 0x134
145#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
146#define CORE_TIMER_ENA (1 << 16)
147
148#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
149#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
150#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
151#define CORE_CDC_OFFSET_CFG 0x14C
152#define CORE_CSR_CDC_DELAY_CFG 0x150
153#define CORE_CDC_SLAVE_DDA_CFG 0x160
154#define CORE_CSR_CDC_STATUS0 0x164
155#define CORE_CALIBRATION_DONE (1 << 0)
156
157#define CORE_CDC_ERROR_CODE_MASK 0x7000000
158
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300159#define CQ_CMD_DBG_RAM 0x110
160#define CQ_CMD_DBG_RAM_WA 0x150
161#define CQ_CMD_DBG_RAM_OL 0x154
162
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700163#define CORE_CSR_CDC_GEN_CFG 0x178
164#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
165#define CORE_CDC_SWITCH_RC_EN (1 << 1)
166
167#define CORE_DDR_200_CFG 0x184
168#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530169#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700170#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530171
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700172#define CORE_VENDOR_SPEC3 0x1B0
173#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530174#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700175
176#define CORE_DLL_CONFIG_2 0x1B4
177#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800178#define CORE_FLL_CYCLE_CNT (1 << 18)
179#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700180
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530181#define CORE_DDR_CONFIG 0x1B8
182#define DDR_CONFIG_POR_VAL 0x80040853
183#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
184#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700185#define CORE_DDR_CONFIG_2 0x1BC
186#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700187
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700188/* 512 descriptors */
189#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530190#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530191
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700192#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800193#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700194
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700195#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530196#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700197
Krishna Konda96e6b112013-10-28 15:25:03 -0700198#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200199#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200200#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700201
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700202static const u32 tuning_block_64[] = {
203 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
204 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
205 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
206 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
207};
208
209static const u32 tuning_block_128[] = {
210 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
211 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
212 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
213 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
214 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
215 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
216 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
217 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
218};
Asutosh Das0ef24812012-12-18 16:14:02 +0530219
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700220/* global to hold each slot instance for debug */
221static struct sdhci_msm_host *sdhci_slot[2];
222
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700223static int disable_slots;
224/* root can write, others read */
225module_param(disable_slots, int, S_IRUGO|S_IWUSR);
226
Asutosh Das0ef24812012-12-18 16:14:02 +0530227enum vdd_io_level {
228 /* set vdd_io_data->low_vol_level */
229 VDD_IO_LOW,
230 /* set vdd_io_data->high_vol_level */
231 VDD_IO_HIGH,
232 /*
233 * set whatever there in voltage_level (third argument) of
234 * sdhci_msm_set_vdd_io_vol() function.
235 */
236 VDD_IO_SET_LEVEL,
237};
238
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700239/* MSM platform specific tuning */
240static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
241 u8 poll)
242{
243 int rc = 0;
244 u32 wait_cnt = 50;
245 u8 ck_out_en = 0;
246 struct mmc_host *mmc = host->mmc;
247
248 /* poll for CK_OUT_EN bit. max. poll time = 50us */
249 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
250 CORE_CK_OUT_EN);
251
252 while (ck_out_en != poll) {
253 if (--wait_cnt == 0) {
254 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
255 mmc_hostname(mmc), __func__, poll);
256 rc = -ETIMEDOUT;
257 goto out;
258 }
259 udelay(1);
260
261 ck_out_en = !!(readl_relaxed(host->ioaddr +
262 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
263 }
264out:
265 return rc;
266}
267
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530268/*
269 * Enable CDR to track changes of DAT lines and adjust sampling
270 * point according to voltage/temperature variations
271 */
272static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
273{
274 int rc = 0;
275 u32 config;
276
277 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
278 config |= CORE_CDR_EN;
279 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
280 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
281
282 rc = msm_dll_poll_ck_out_en(host, 0);
283 if (rc)
284 goto err;
285
286 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
287 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
288
289 rc = msm_dll_poll_ck_out_en(host, 1);
290 if (rc)
291 goto err;
292 goto out;
293err:
294 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
295out:
296 return rc;
297}
298
299static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
300 *attr, const char *buf, size_t count)
301{
302 struct sdhci_host *host = dev_get_drvdata(dev);
303 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
304 struct sdhci_msm_host *msm_host = pltfm_host->priv;
305 u32 tmp;
306 unsigned long flags;
307
308 if (!kstrtou32(buf, 0, &tmp)) {
309 spin_lock_irqsave(&host->lock, flags);
310 msm_host->en_auto_cmd21 = !!tmp;
311 spin_unlock_irqrestore(&host->lock, flags);
312 }
313 return count;
314}
315
316static ssize_t show_auto_cmd21(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct sdhci_host *host = dev_get_drvdata(dev);
320 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
321 struct sdhci_msm_host *msm_host = pltfm_host->priv;
322
323 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
324}
325
326/* MSM auto-tuning handler */
327static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
328 bool enable,
329 u32 type)
330{
331 int rc = 0;
332 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
333 struct sdhci_msm_host *msm_host = pltfm_host->priv;
334 u32 val = 0;
335
336 if (!msm_host->en_auto_cmd21)
337 return 0;
338
339 if (type == MMC_SEND_TUNING_BLOCK_HS200)
340 val = CORE_HC_AUTO_CMD21_EN;
341 else
342 return 0;
343
344 if (enable) {
345 rc = msm_enable_cdr_cm_sdc4_dll(host);
346 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
347 val, host->ioaddr + CORE_VENDOR_SPEC);
348 } else {
349 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
350 ~val, host->ioaddr + CORE_VENDOR_SPEC);
351 }
352 return rc;
353}
354
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700355static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
356{
357 int rc = 0;
358 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
359 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
360 0x8};
361 unsigned long flags;
362 u32 config;
363 struct mmc_host *mmc = host->mmc;
364
365 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
366 spin_lock_irqsave(&host->lock, flags);
367
368 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
369 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
370 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
371 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
372
373 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
374 rc = msm_dll_poll_ck_out_en(host, 0);
375 if (rc)
376 goto err_out;
377
378 /*
379 * Write the selected DLL clock output phase (0 ... 15)
380 * to CDR_SELEXT bit field of DLL_CONFIG register.
381 */
382 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
383 & ~(0xF << 20))
384 | (grey_coded_phase_table[phase] << 20)),
385 host->ioaddr + CORE_DLL_CONFIG);
386
387 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
388 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
389 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
390
391 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
392 rc = msm_dll_poll_ck_out_en(host, 1);
393 if (rc)
394 goto err_out;
395
396 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
397 config |= CORE_CDR_EN;
398 config &= ~CORE_CDR_EXT_EN;
399 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
400 goto out;
401
402err_out:
403 pr_err("%s: %s: Failed to set DLL phase: %d\n",
404 mmc_hostname(mmc), __func__, phase);
405out:
406 spin_unlock_irqrestore(&host->lock, flags);
407 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
408 return rc;
409}
410
411/*
412 * Find out the greatest range of consecuitive selected
413 * DLL clock output phases that can be used as sampling
414 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700415 * timing mode) or for eMMC4.5 card read operation (in
416 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700417 * Select the 3/4 of the range and configure the DLL with the
418 * selected DLL clock output phase.
419 */
420
421static int msm_find_most_appropriate_phase(struct sdhci_host *host,
422 u8 *phase_table, u8 total_phases)
423{
424 int ret;
425 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
426 u8 phases_per_row[MAX_PHASES] = {0};
427 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
428 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
429 bool phase_0_found = false, phase_15_found = false;
430 struct mmc_host *mmc = host->mmc;
431
432 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
433 if (!total_phases || (total_phases > MAX_PHASES)) {
434 pr_err("%s: %s: invalid argument: total_phases=%d\n",
435 mmc_hostname(mmc), __func__, total_phases);
436 return -EINVAL;
437 }
438
439 for (cnt = 0; cnt < total_phases; cnt++) {
440 ranges[row_index][col_index] = phase_table[cnt];
441 phases_per_row[row_index] += 1;
442 col_index++;
443
444 if ((cnt + 1) == total_phases) {
445 continue;
446 /* check if next phase in phase_table is consecutive or not */
447 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
448 row_index++;
449 col_index = 0;
450 }
451 }
452
453 if (row_index >= MAX_PHASES)
454 return -EINVAL;
455
456 /* Check if phase-0 is present in first valid window? */
457 if (!ranges[0][0]) {
458 phase_0_found = true;
459 phase_0_raw_index = 0;
460 /* Check if cycle exist between 2 valid windows */
461 for (cnt = 1; cnt <= row_index; cnt++) {
462 if (phases_per_row[cnt]) {
463 for (i = 0; i < phases_per_row[cnt]; i++) {
464 if (ranges[cnt][i] == 15) {
465 phase_15_found = true;
466 phase_15_raw_index = cnt;
467 break;
468 }
469 }
470 }
471 }
472 }
473
474 /* If 2 valid windows form cycle then merge them as single window */
475 if (phase_0_found && phase_15_found) {
476 /* number of phases in raw where phase 0 is present */
477 u8 phases_0 = phases_per_row[phase_0_raw_index];
478 /* number of phases in raw where phase 15 is present */
479 u8 phases_15 = phases_per_row[phase_15_raw_index];
480
481 if (phases_0 + phases_15 >= MAX_PHASES)
482 /*
483 * If there are more than 1 phase windows then total
484 * number of phases in both the windows should not be
485 * more than or equal to MAX_PHASES.
486 */
487 return -EINVAL;
488
489 /* Merge 2 cyclic windows */
490 i = phases_15;
491 for (cnt = 0; cnt < phases_0; cnt++) {
492 ranges[phase_15_raw_index][i] =
493 ranges[phase_0_raw_index][cnt];
494 if (++i >= MAX_PHASES)
495 break;
496 }
497
498 phases_per_row[phase_0_raw_index] = 0;
499 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
500 }
501
502 for (cnt = 0; cnt <= row_index; cnt++) {
503 if (phases_per_row[cnt] > curr_max) {
504 curr_max = phases_per_row[cnt];
505 selected_row_index = cnt;
506 }
507 }
508
509 i = ((curr_max * 3) / 4);
510 if (i)
511 i--;
512
513 ret = (int)ranges[selected_row_index][i];
514
515 if (ret >= MAX_PHASES) {
516 ret = -EINVAL;
517 pr_err("%s: %s: invalid phase selected=%d\n",
518 mmc_hostname(mmc), __func__, ret);
519 }
520
521 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
522 return ret;
523}
524
525static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
526{
527 u32 mclk_freq = 0;
528
529 /* Program the MCLK value to MCLK_FREQ bit field */
530 if (host->clock <= 112000000)
531 mclk_freq = 0;
532 else if (host->clock <= 125000000)
533 mclk_freq = 1;
534 else if (host->clock <= 137000000)
535 mclk_freq = 2;
536 else if (host->clock <= 150000000)
537 mclk_freq = 3;
538 else if (host->clock <= 162000000)
539 mclk_freq = 4;
540 else if (host->clock <= 175000000)
541 mclk_freq = 5;
542 else if (host->clock <= 187000000)
543 mclk_freq = 6;
544 else if (host->clock <= 200000000)
545 mclk_freq = 7;
546
547 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
548 & ~(7 << 24)) | (mclk_freq << 24)),
549 host->ioaddr + CORE_DLL_CONFIG);
550}
551
552/* Initialize the DLL (Programmable Delay Line ) */
553static int msm_init_cm_dll(struct sdhci_host *host)
554{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
556 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700557 struct mmc_host *mmc = host->mmc;
558 int rc = 0;
559 unsigned long flags;
560 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530561 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700562
563 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
564 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530565 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
566 CORE_CLK_PWRSAVE);
567 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700568 /*
569 * Make sure that clock is always enabled when DLL
570 * tuning is in progress. Keeping PWRSAVE ON may
571 * turn off the clock. So let's disable the PWRSAVE
572 * here and re-enable it once tuning is completed.
573 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530574 if (prev_pwrsave) {
575 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
576 & ~CORE_CLK_PWRSAVE),
577 host->ioaddr + CORE_VENDOR_SPEC);
578 curr_pwrsave = false;
579 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700580
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800581 if (msm_host->use_updated_dll_reset) {
582 /* Disable the DLL clock */
583 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
584 & ~CORE_CK_OUT_EN),
585 host->ioaddr + CORE_DLL_CONFIG);
586
587 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
588 | CORE_DLL_CLOCK_DISABLE),
589 host->ioaddr + CORE_DLL_CONFIG_2);
590 }
591
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700592 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
593 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
594 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
595
596 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
597 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
598 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
599 msm_cm_dll_set_freq(host);
600
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800601 if (msm_host->use_updated_dll_reset) {
602 u32 mclk_freq = 0;
603
604 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
605 & CORE_FLL_CYCLE_CNT))
606 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
607 else
608 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
609
610 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
611 & ~(0xFF << 10)) | (mclk_freq << 10)),
612 host->ioaddr + CORE_DLL_CONFIG_2);
613 /* wait for 5us before enabling DLL clock */
614 udelay(5);
615 }
616
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700617 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
618 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
619 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
620
621 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
622 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
623 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
624
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800625 if (msm_host->use_updated_dll_reset) {
626 msm_cm_dll_set_freq(host);
627 /* Enable the DLL clock */
628 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
629 & ~CORE_DLL_CLOCK_DISABLE),
630 host->ioaddr + CORE_DLL_CONFIG_2);
631 }
632
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700633 /* Set DLL_EN bit to 1. */
634 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
635 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
636
637 /* Set CK_OUT_EN bit to 1. */
638 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
639 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
640
641 wait_cnt = 50;
642 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
643 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
644 CORE_DLL_LOCK)) {
645 /* max. wait for 50us sec for LOCK bit to be set */
646 if (--wait_cnt == 0) {
647 pr_err("%s: %s: DLL failed to LOCK\n",
648 mmc_hostname(mmc), __func__);
649 rc = -ETIMEDOUT;
650 goto out;
651 }
652 /* wait for 1us before polling again */
653 udelay(1);
654 }
655
656out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530657 /* Restore the correct PWRSAVE state */
658 if (prev_pwrsave ^ curr_pwrsave) {
659 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
660
661 if (prev_pwrsave)
662 reg |= CORE_CLK_PWRSAVE;
663 else
664 reg &= ~CORE_CLK_PWRSAVE;
665
666 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
667 }
668
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700669 spin_unlock_irqrestore(&host->lock, flags);
670 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
671 return rc;
672}
673
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700674static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
675{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700676 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700677 int ret = 0;
678 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700679
680 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
681
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700682 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
683 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
684 & ~CORE_CDC_T4_DLY_SEL),
685 host->ioaddr + CORE_DDR_200_CFG);
686
687 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
688 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
689 & ~CORE_CDC_SWITCH_BYPASS_OFF),
690 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
691
692 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
693 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
694 | CORE_CDC_SWITCH_RC_EN),
695 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
696
697 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
698 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
699 & ~CORE_START_CDC_TRAFFIC),
700 host->ioaddr + CORE_DDR_200_CFG);
701
702 /*
703 * Perform CDC Register Initialization Sequence
704 *
705 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
706 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
707 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
708 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
709 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
710 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
711 * CORE_CSR_CDC_DELAY_CFG 0x3AC
712 * CORE_CDC_OFFSET_CFG 0x0
713 * CORE_CDC_SLAVE_DDA_CFG 0x16334
714 */
715
716 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
717 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
718 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
719 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
720 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
721 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700722 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700723 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
724 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
725
726 /* CDC HW Calibration */
727
728 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
729 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
730 | CORE_SW_TRIG_FULL_CALIB),
731 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
732
733 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
734 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
735 & ~CORE_SW_TRIG_FULL_CALIB),
736 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
737
738 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
739 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
740 | CORE_HW_AUTOCAL_ENA),
741 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
742
743 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
744 writel_relaxed((readl_relaxed(host->ioaddr +
745 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
746 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
747
748 mb();
749
750 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700751 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
752 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
753
754 if (ret == -ETIMEDOUT) {
755 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700756 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700757 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700758 }
759
760 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
761 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
762 & CORE_CDC_ERROR_CODE_MASK;
763 if (cdc_err) {
764 pr_err("%s: %s: CDC Error Code %d\n",
765 mmc_hostname(host->mmc), __func__, cdc_err);
766 ret = -EINVAL;
767 goto out;
768 }
769
770 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
771 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
772 | CORE_START_CDC_TRAFFIC),
773 host->ioaddr + CORE_DDR_200_CFG);
774out:
775 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
776 __func__, ret);
777 return ret;
778}
779
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700780static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
781{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530782 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
783 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530784 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700785 int ret = 0;
786
787 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
788
789 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530790 * Reprogramming the value in case it might have been modified by
791 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700792 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700793 if (msm_host->rclk_delay_fix) {
794 writel_relaxed(DDR_CONFIG_2_POR_VAL,
795 host->ioaddr + CORE_DDR_CONFIG_2);
796 } else {
797 ddr_config = DDR_CONFIG_POR_VAL &
798 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
799 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
800 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
801 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700802
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530803 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530804 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
805 | CORE_CMDIN_RCLK_EN),
806 host->ioaddr + CORE_DDR_200_CFG);
807
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700808 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
809 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
810 | CORE_DDR_CAL_EN),
811 host->ioaddr + CORE_DLL_CONFIG_2);
812
813 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
814 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
815 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
816
817 if (ret == -ETIMEDOUT) {
818 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
819 mmc_hostname(host->mmc), __func__);
820 goto out;
821 }
822
Ritesh Harjani764065e2015-05-13 14:14:45 +0530823 /*
824 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
825 * when MCLK is gated OFF, it is not gated for less than 0.5us
826 * and MCLK must be switched on for at-least 1us before DATA
827 * starts coming. Controllers with 14lpp tech DLL cannot
828 * guarantee above requirement. So PWRSAVE_DLL should not be
829 * turned on for host controllers using this DLL.
830 */
831 if (!msm_host->use_14lpp_dll)
832 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
833 | CORE_PWRSAVE_DLL),
834 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700835 mb();
836out:
837 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
838 __func__, ret);
839 return ret;
840}
841
Ritesh Harjaniea709662015-05-27 15:40:24 +0530842static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
843{
844 int ret = 0;
845 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
846 struct sdhci_msm_host *msm_host = pltfm_host->priv;
847 struct mmc_host *mmc = host->mmc;
848
849 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
850
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530851 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
852 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530853 mmc_hostname(mmc));
854 return -EINVAL;
855 }
856
857 if (msm_host->calibration_done ||
858 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
859 return 0;
860 }
861
862 /*
863 * Reset the tuning block.
864 */
865 ret = msm_init_cm_dll(host);
866 if (ret)
867 goto out;
868
869 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
870out:
871 if (!ret)
872 msm_host->calibration_done = true;
873 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
874 __func__, ret);
875 return ret;
876}
877
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700878static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
879{
880 int ret = 0;
881 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
882 struct sdhci_msm_host *msm_host = pltfm_host->priv;
883
884 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
885
886 /*
887 * Retuning in HS400 (DDR mode) will fail, just reset the
888 * tuning block and restore the saved tuning phase.
889 */
890 ret = msm_init_cm_dll(host);
891 if (ret)
892 goto out;
893
894 /* Set the selected phase in delay line hw block */
895 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
896 if (ret)
897 goto out;
898
Krishna Konda0e8efba2014-06-23 14:50:38 -0700899 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
900 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
901 | CORE_CMD_DAT_TRACK_SEL),
902 host->ioaddr + CORE_DLL_CONFIG);
903
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700904 if (msm_host->use_cdclp533)
905 /* Calibrate CDCLP533 DLL HW */
906 ret = sdhci_msm_cdclp533_calibration(host);
907 else
908 /* Calibrate CM_DLL_SDC4 HW */
909 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
910out:
911 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
912 __func__, ret);
913 return ret;
914}
915
Krishna Konda96e6b112013-10-28 15:25:03 -0700916static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
917 u8 drv_type)
918{
919 struct mmc_command cmd = {0};
920 struct mmc_request mrq = {NULL};
921 struct mmc_host *mmc = host->mmc;
922 u8 val = ((drv_type << 4) | 2);
923
924 cmd.opcode = MMC_SWITCH;
925 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
926 (EXT_CSD_HS_TIMING << 16) |
927 (val << 8) |
928 EXT_CSD_CMD_SET_NORMAL;
929 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
930 /* 1 sec */
931 cmd.busy_timeout = 1000 * 1000;
932
933 memset(cmd.resp, 0, sizeof(cmd.resp));
934 cmd.retries = 3;
935
936 mrq.cmd = &cmd;
937 cmd.data = NULL;
938
939 mmc_wait_for_req(mmc, &mrq);
940 pr_debug("%s: %s: set card drive type to %d\n",
941 mmc_hostname(mmc), __func__,
942 drv_type);
943}
944
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700945int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
946{
947 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530948 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700949 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700950 const u32 *tuning_block_pattern = tuning_block_64;
951 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
952 int rc;
953 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530954 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700955 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
956 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700957 u8 drv_type = 0;
958 bool drv_type_changed = false;
959 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530960 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530961
962 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700963 * Tuning is required for SDR104, HS200 and HS400 cards and
964 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530965 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700966 if (host->clock <= CORE_FREQ_100MHZ ||
967 !((ios.timing == MMC_TIMING_MMC_HS400) ||
968 (ios.timing == MMC_TIMING_MMC_HS200) ||
969 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530970 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700971
972 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700973
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700974 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700975 if (msm_host->tuning_done && !msm_host->calibration_done &&
976 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700977 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700978 spin_lock_irqsave(&host->lock, flags);
979 if (!rc)
980 msm_host->calibration_done = true;
981 spin_unlock_irqrestore(&host->lock, flags);
982 goto out;
983 }
984
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700985 spin_lock_irqsave(&host->lock, flags);
986
987 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
988 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
989 tuning_block_pattern = tuning_block_128;
990 size = sizeof(tuning_block_128);
991 }
992 spin_unlock_irqrestore(&host->lock, flags);
993
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700994 data_buf = kmalloc(size, GFP_KERNEL);
995 if (!data_buf) {
996 rc = -ENOMEM;
997 goto out;
998 }
999
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301000retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001001 tuned_phase_cnt = 0;
1002
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301003 /* first of all reset the tuning block */
1004 rc = msm_init_cm_dll(host);
1005 if (rc)
1006 goto kfree;
1007
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001008 phase = 0;
1009 do {
1010 struct mmc_command cmd = {0};
1011 struct mmc_data data = {0};
1012 struct mmc_request mrq = {
1013 .cmd = &cmd,
1014 .data = &data
1015 };
1016 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301017 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001018
1019 /* set the phase in delay line hw block */
1020 rc = msm_config_cm_dll_phase(host, phase);
1021 if (rc)
1022 goto kfree;
1023
1024 cmd.opcode = opcode;
1025 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1026
1027 data.blksz = size;
1028 data.blocks = 1;
1029 data.flags = MMC_DATA_READ;
1030 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1031
1032 data.sg = &sg;
1033 data.sg_len = 1;
1034 sg_init_one(&sg, data_buf, size);
1035 memset(data_buf, 0, size);
1036 mmc_wait_for_req(mmc, &mrq);
1037
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301038 if (card && (cmd.error || data.error)) {
1039 sts_cmd.opcode = MMC_SEND_STATUS;
1040 sts_cmd.arg = card->rca << 16;
1041 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1042 sts_retry = 5;
1043 while (sts_retry) {
1044 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1045
1046 if (sts_cmd.error ||
1047 (R1_CURRENT_STATE(sts_cmd.resp[0])
1048 != R1_STATE_TRAN)) {
1049 sts_retry--;
1050 /*
1051 * wait for at least 146 MCLK cycles for
1052 * the card to move to TRANS state. As
1053 * the MCLK would be min 200MHz for
1054 * tuning, we need max 0.73us delay. To
1055 * be on safer side 1ms delay is given.
1056 */
1057 usleep_range(1000, 1200);
1058 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1059 mmc_hostname(mmc), phase,
1060 sts_cmd.error, sts_cmd.resp[0]);
1061 continue;
1062 }
1063 break;
1064 };
1065 }
1066
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001067 if (!cmd.error && !data.error &&
1068 !memcmp(data_buf, tuning_block_pattern, size)) {
1069 /* tuning is successful at this tuning point */
1070 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001071 pr_debug("%s: %s: found *** good *** phase = %d\n",
1072 mmc_hostname(mmc), __func__, phase);
1073 } else {
1074 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001075 mmc_hostname(mmc), __func__, phase);
1076 }
1077 } while (++phase < 16);
1078
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301079 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1080 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001081 /*
1082 * If all phases pass then its a problem. So change the card's
1083 * drive type to a different value, if supported and repeat
1084 * tuning until at least one phase fails. Then set the original
1085 * drive type back.
1086 *
1087 * If all the phases still pass after trying all possible
1088 * drive types, then one of those 16 phases will be picked.
1089 * This is no different from what was going on before the
1090 * modification to change drive type and retune.
1091 */
1092 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1093 tuned_phase_cnt);
1094
1095 /* set drive type to other value . default setting is 0x0 */
1096 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001097 pr_debug("%s: trying different drive strength (%d)\n",
1098 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001099 if (card->ext_csd.raw_driver_strength &
1100 (1 << drv_type)) {
1101 sdhci_msm_set_mmc_drv_type(host, opcode,
1102 drv_type);
1103 if (!drv_type_changed)
1104 drv_type_changed = true;
1105 goto retry;
1106 }
1107 }
1108 }
1109
1110 /* reset drive type to default (50 ohm) if changed */
1111 if (drv_type_changed)
1112 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1113
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001114 if (tuned_phase_cnt) {
1115 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1116 tuned_phase_cnt);
1117 if (rc < 0)
1118 goto kfree;
1119 else
1120 phase = (u8)rc;
1121
1122 /*
1123 * Finally set the selected phase in delay
1124 * line hw block.
1125 */
1126 rc = msm_config_cm_dll_phase(host, phase);
1127 if (rc)
1128 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001129 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001130 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1131 mmc_hostname(mmc), __func__, phase);
1132 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301133 if (--tuning_seq_cnt)
1134 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001135 /* tuning failed */
1136 pr_err("%s: %s: no tuning point found\n",
1137 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301138 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001139 }
1140
1141kfree:
1142 kfree(data_buf);
1143out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001144 spin_lock_irqsave(&host->lock, flags);
1145 if (!rc)
1146 msm_host->tuning_done = true;
1147 spin_unlock_irqrestore(&host->lock, flags);
1148 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001149 return rc;
1150}
1151
Asutosh Das0ef24812012-12-18 16:14:02 +05301152static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1153{
1154 struct sdhci_msm_gpio_data *curr;
1155 int i, ret = 0;
1156
1157 curr = pdata->pin_data->gpio_data;
1158 for (i = 0; i < curr->size; i++) {
1159 if (!gpio_is_valid(curr->gpio[i].no)) {
1160 ret = -EINVAL;
1161 pr_err("%s: Invalid gpio = %d\n", __func__,
1162 curr->gpio[i].no);
1163 goto free_gpios;
1164 }
1165 if (enable) {
1166 ret = gpio_request(curr->gpio[i].no,
1167 curr->gpio[i].name);
1168 if (ret) {
1169 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1170 __func__, curr->gpio[i].no,
1171 curr->gpio[i].name, ret);
1172 goto free_gpios;
1173 }
1174 curr->gpio[i].is_enabled = true;
1175 } else {
1176 gpio_free(curr->gpio[i].no);
1177 curr->gpio[i].is_enabled = false;
1178 }
1179 }
1180 return ret;
1181
1182free_gpios:
1183 for (i--; i >= 0; i--) {
1184 gpio_free(curr->gpio[i].no);
1185 curr->gpio[i].is_enabled = false;
1186 }
1187 return ret;
1188}
1189
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301190static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1191 bool enable)
1192{
1193 int ret = 0;
1194
1195 if (enable)
1196 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1197 pdata->pctrl_data->pins_active);
1198 else
1199 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1200 pdata->pctrl_data->pins_sleep);
1201
1202 if (ret < 0)
1203 pr_err("%s state for pinctrl failed with %d\n",
1204 enable ? "Enabling" : "Disabling", ret);
1205
1206 return ret;
1207}
1208
Asutosh Das0ef24812012-12-18 16:14:02 +05301209static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1210{
1211 int ret = 0;
1212
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301213 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301214 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301215 } else if (pdata->pctrl_data) {
1216 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1217 goto out;
1218 } else if (!pdata->pin_data) {
1219 return 0;
1220 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301221
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301222 if (pdata->pin_data->is_gpio)
1223 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301224out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301225 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301226 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301227
1228 return ret;
1229}
1230
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301231static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1232 u32 **out, int *len, u32 size)
1233{
1234 int ret = 0;
1235 struct device_node *np = dev->of_node;
1236 size_t sz;
1237 u32 *arr = NULL;
1238
1239 if (!of_get_property(np, prop_name, len)) {
1240 ret = -EINVAL;
1241 goto out;
1242 }
1243 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001244 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301245 dev_err(dev, "%s invalid size\n", prop_name);
1246 ret = -EINVAL;
1247 goto out;
1248 }
1249
1250 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1251 if (!arr) {
1252 dev_err(dev, "%s failed allocating memory\n", prop_name);
1253 ret = -ENOMEM;
1254 goto out;
1255 }
1256
1257 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1258 if (ret < 0) {
1259 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1260 goto out;
1261 }
1262 *out = arr;
1263out:
1264 if (ret)
1265 *len = 0;
1266 return ret;
1267}
1268
Asutosh Das0ef24812012-12-18 16:14:02 +05301269#define MAX_PROP_SIZE 32
1270static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1271 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1272{
1273 int len, ret = 0;
1274 const __be32 *prop;
1275 char prop_name[MAX_PROP_SIZE];
1276 struct sdhci_msm_reg_data *vreg;
1277 struct device_node *np = dev->of_node;
1278
1279 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1280 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301281 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301282 return ret;
1283 }
1284
1285 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1286 if (!vreg) {
1287 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1288 ret = -ENOMEM;
1289 return ret;
1290 }
1291
1292 vreg->name = vreg_name;
1293
1294 snprintf(prop_name, MAX_PROP_SIZE,
1295 "qcom,%s-always-on", vreg_name);
1296 if (of_get_property(np, prop_name, NULL))
1297 vreg->is_always_on = true;
1298
1299 snprintf(prop_name, MAX_PROP_SIZE,
1300 "qcom,%s-lpm-sup", vreg_name);
1301 if (of_get_property(np, prop_name, NULL))
1302 vreg->lpm_sup = true;
1303
1304 snprintf(prop_name, MAX_PROP_SIZE,
1305 "qcom,%s-voltage-level", vreg_name);
1306 prop = of_get_property(np, prop_name, &len);
1307 if (!prop || (len != (2 * sizeof(__be32)))) {
1308 dev_warn(dev, "%s %s property\n",
1309 prop ? "invalid format" : "no", prop_name);
1310 } else {
1311 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1312 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1313 }
1314
1315 snprintf(prop_name, MAX_PROP_SIZE,
1316 "qcom,%s-current-level", vreg_name);
1317 prop = of_get_property(np, prop_name, &len);
1318 if (!prop || (len != (2 * sizeof(__be32)))) {
1319 dev_warn(dev, "%s %s property\n",
1320 prop ? "invalid format" : "no", prop_name);
1321 } else {
1322 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1323 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1324 }
1325
1326 *vreg_data = vreg;
1327 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1328 vreg->name, vreg->is_always_on ? "always_on," : "",
1329 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1330 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1331
1332 return ret;
1333}
1334
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301335static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1336 struct sdhci_msm_pltfm_data *pdata)
1337{
1338 struct sdhci_pinctrl_data *pctrl_data;
1339 struct pinctrl *pctrl;
1340 int ret = 0;
1341
1342 /* Try to obtain pinctrl handle */
1343 pctrl = devm_pinctrl_get(dev);
1344 if (IS_ERR(pctrl)) {
1345 ret = PTR_ERR(pctrl);
1346 goto out;
1347 }
1348 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1349 if (!pctrl_data) {
1350 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1351 ret = -ENOMEM;
1352 goto out;
1353 }
1354 pctrl_data->pctrl = pctrl;
1355 /* Look-up and keep the states handy to be used later */
1356 pctrl_data->pins_active = pinctrl_lookup_state(
1357 pctrl_data->pctrl, "active");
1358 if (IS_ERR(pctrl_data->pins_active)) {
1359 ret = PTR_ERR(pctrl_data->pins_active);
1360 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1361 goto out;
1362 }
1363 pctrl_data->pins_sleep = pinctrl_lookup_state(
1364 pctrl_data->pctrl, "sleep");
1365 if (IS_ERR(pctrl_data->pins_sleep)) {
1366 ret = PTR_ERR(pctrl_data->pins_sleep);
1367 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1368 goto out;
1369 }
1370 pdata->pctrl_data = pctrl_data;
1371out:
1372 return ret;
1373}
1374
Asutosh Das0ef24812012-12-18 16:14:02 +05301375#define GPIO_NAME_MAX_LEN 32
1376static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1377 struct sdhci_msm_pltfm_data *pdata)
1378{
1379 int ret = 0, cnt, i;
1380 struct sdhci_msm_pin_data *pin_data;
1381 struct device_node *np = dev->of_node;
1382
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301383 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1384 if (!ret) {
1385 goto out;
1386 } else if (ret == -EPROBE_DEFER) {
1387 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1388 goto out;
1389 } else {
1390 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1391 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301392 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301393 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301394 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1395 if (!pin_data) {
1396 dev_err(dev, "No memory for pin_data\n");
1397 ret = -ENOMEM;
1398 goto out;
1399 }
1400
1401 cnt = of_gpio_count(np);
1402 if (cnt > 0) {
1403 pin_data->gpio_data = devm_kzalloc(dev,
1404 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1405 if (!pin_data->gpio_data) {
1406 dev_err(dev, "No memory for gpio_data\n");
1407 ret = -ENOMEM;
1408 goto out;
1409 }
1410 pin_data->gpio_data->size = cnt;
1411 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1412 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1413
1414 if (!pin_data->gpio_data->gpio) {
1415 dev_err(dev, "No memory for gpio\n");
1416 ret = -ENOMEM;
1417 goto out;
1418 }
1419
1420 for (i = 0; i < cnt; i++) {
1421 const char *name = NULL;
1422 char result[GPIO_NAME_MAX_LEN];
1423 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1424 of_property_read_string_index(np,
1425 "qcom,gpio-names", i, &name);
1426
1427 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1428 dev_name(dev), name ? name : "?");
1429 pin_data->gpio_data->gpio[i].name = result;
1430 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1431 pin_data->gpio_data->gpio[i].name,
1432 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301433 }
1434 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301435 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301436out:
1437 if (ret)
1438 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1439 return ret;
1440}
1441
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001442#ifdef CONFIG_SMP
1443static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1444{
1445 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1446}
1447#else
1448static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1449#endif
1450
Gilad Bronerc788a672015-09-08 15:39:11 +03001451static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1452 struct sdhci_msm_pltfm_data *pdata)
1453{
1454 struct device_node *np = dev->of_node;
1455 const char *str;
1456 u32 cpu;
1457 int ret = 0;
1458 int i;
1459
1460 pdata->pm_qos_data.irq_valid = false;
1461 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1462 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1463 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001464 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001465 }
1466
1467 /* must specify cpu for "affine_cores" type */
1468 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1469 pdata->pm_qos_data.irq_cpu = -1;
1470 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1471 if (ret) {
1472 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1473 ret);
1474 goto out;
1475 }
1476 if (cpu < 0 || cpu >= num_possible_cpus()) {
1477 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1478 __func__, cpu, num_possible_cpus());
1479 ret = -EINVAL;
1480 goto out;
1481 }
1482 pdata->pm_qos_data.irq_cpu = cpu;
1483 }
1484
1485 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1486 SDHCI_POWER_POLICY_NUM) {
1487 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1488 __func__, SDHCI_POWER_POLICY_NUM);
1489 ret = -EINVAL;
1490 goto out;
1491 }
1492
1493 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1494 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1495 &pdata->pm_qos_data.irq_latency.latency[i]);
1496
1497 pdata->pm_qos_data.irq_valid = true;
1498out:
1499 return ret;
1500}
1501
1502static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1503 struct sdhci_msm_pltfm_data *pdata)
1504{
1505 struct device_node *np = dev->of_node;
1506 u32 mask;
1507 int nr_groups;
1508 int ret;
1509 int i;
1510
1511 /* Read cpu group mapping */
1512 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1513 if (nr_groups <= 0) {
1514 ret = -EINVAL;
1515 goto out;
1516 }
1517 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1518 pdata->pm_qos_data.cpu_group_map.mask =
1519 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1520 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1521 ret = -ENOMEM;
1522 goto out;
1523 }
1524
1525 for (i = 0; i < nr_groups; i++) {
1526 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1527 i, &mask);
1528
1529 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1530 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1531 cpu_possible_mask)) {
1532 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1533 __func__, mask, i);
1534 ret = -EINVAL;
1535 goto free_res;
1536 }
1537 }
1538 return 0;
1539
1540free_res:
1541 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1542out:
1543 return ret;
1544}
1545
1546static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1547 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1548{
1549 struct device_node *np = dev->of_node;
1550 struct sdhci_msm_pm_qos_latency *values;
1551 int ret;
1552 int i;
1553 int group;
1554 int cfg;
1555
1556 ret = of_property_count_u32_elems(np, name);
1557 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1558 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1559 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1560 ret);
1561 return -EINVAL;
1562 } else if (ret < 0) {
1563 return ret;
1564 }
1565
1566 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1567 GFP_KERNEL);
1568 if (!values)
1569 return -ENOMEM;
1570
1571 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1572 group = i / SDHCI_POWER_POLICY_NUM;
1573 cfg = i % SDHCI_POWER_POLICY_NUM;
1574 of_property_read_u32_index(np, name, i,
1575 &(values[group].latency[cfg]));
1576 }
1577
1578 *latency = values;
1579 return 0;
1580}
1581
1582static void sdhci_msm_pm_qos_parse(struct device *dev,
1583 struct sdhci_msm_pltfm_data *pdata)
1584{
1585 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1586 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1587 __func__);
1588
1589 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1590 pdata->pm_qos_data.cmdq_valid =
1591 !sdhci_msm_pm_qos_parse_latency(dev,
1592 "qcom,pm-qos-cmdq-latency-us",
1593 pdata->pm_qos_data.cpu_group_map.nr_groups,
1594 &pdata->pm_qos_data.cmdq_latency);
1595 pdata->pm_qos_data.legacy_valid =
1596 !sdhci_msm_pm_qos_parse_latency(dev,
1597 "qcom,pm-qos-legacy-latency-us",
1598 pdata->pm_qos_data.cpu_group_map.nr_groups,
1599 &pdata->pm_qos_data.latency);
1600 if (!pdata->pm_qos_data.cmdq_valid &&
1601 !pdata->pm_qos_data.legacy_valid) {
1602 /* clean-up previously allocated arrays */
1603 kfree(pdata->pm_qos_data.latency);
1604 kfree(pdata->pm_qos_data.cmdq_latency);
1605 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1606 __func__);
1607 }
1608 } else {
1609 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1610 __func__);
1611 }
1612}
1613
Asutosh Das0ef24812012-12-18 16:14:02 +05301614/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001615static
1616struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1617 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301618{
1619 struct sdhci_msm_pltfm_data *pdata = NULL;
1620 struct device_node *np = dev->of_node;
1621 u32 bus_width = 0;
1622 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301623 int clk_table_len;
1624 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301625 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301626
1627 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1628 if (!pdata) {
1629 dev_err(dev, "failed to allocate memory for platform data\n");
1630 goto out;
1631 }
1632
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301633 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1634 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1635 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301636
Asutosh Das0ef24812012-12-18 16:14:02 +05301637 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1638 if (bus_width == 8)
1639 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1640 else if (bus_width == 4)
1641 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1642 else {
1643 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1644 pdata->mmc_bus_width = 0;
1645 }
1646
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001647 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1648 &msm_host->mmc->clk_scaling.freq_table,
1649 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1650 pr_debug("%s: no clock scaling frequencies were supplied\n",
1651 dev_name(dev));
1652 else if (!msm_host->mmc->clk_scaling.freq_table ||
1653 !msm_host->mmc->clk_scaling.freq_table_sz)
1654 dev_err(dev, "bad dts clock scaling frequencies\n");
1655
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301656 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1657 &clk_table, &clk_table_len, 0)) {
1658 dev_err(dev, "failed parsing supported clock rates\n");
1659 goto out;
1660 }
1661 if (!clk_table || !clk_table_len) {
1662 dev_err(dev, "Invalid clock table\n");
1663 goto out;
1664 }
1665 pdata->sup_clk_table = clk_table;
1666 pdata->sup_clk_cnt = clk_table_len;
1667
Asutosh Das0ef24812012-12-18 16:14:02 +05301668 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1669 sdhci_msm_slot_reg_data),
1670 GFP_KERNEL);
1671 if (!pdata->vreg_data) {
1672 dev_err(dev, "failed to allocate memory for vreg data\n");
1673 goto out;
1674 }
1675
1676 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1677 "vdd")) {
1678 dev_err(dev, "failed parsing vdd data\n");
1679 goto out;
1680 }
1681 if (sdhci_msm_dt_parse_vreg_info(dev,
1682 &pdata->vreg_data->vdd_io_data,
1683 "vdd-io")) {
1684 dev_err(dev, "failed parsing vdd-io data\n");
1685 goto out;
1686 }
1687
1688 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1689 dev_err(dev, "failed parsing gpio data\n");
1690 goto out;
1691 }
1692
Asutosh Das0ef24812012-12-18 16:14:02 +05301693 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1694
1695 for (i = 0; i < len; i++) {
1696 const char *name = NULL;
1697
1698 of_property_read_string_index(np,
1699 "qcom,bus-speed-mode", i, &name);
1700 if (!name)
1701 continue;
1702
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001703 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1704 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1705 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1706 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1707 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301708 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1709 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1710 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1711 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1712 pdata->caps |= MMC_CAP_1_8V_DDR
1713 | MMC_CAP_UHS_DDR50;
1714 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1715 pdata->caps |= MMC_CAP_1_2V_DDR
1716 | MMC_CAP_UHS_DDR50;
1717 }
1718
1719 if (of_get_property(np, "qcom,nonremovable", NULL))
1720 pdata->nonremovable = true;
1721
Guoping Yuf7c91332014-08-20 16:56:18 +08001722 if (of_get_property(np, "qcom,nonhotplug", NULL))
1723 pdata->nonhotplug = true;
1724
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001725 pdata->largeaddressbus =
1726 of_property_read_bool(np, "qcom,large-address-bus");
1727
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001728 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1729 msm_host->mmc->wakeup_on_idle = true;
1730
Gilad Bronerc788a672015-09-08 15:39:11 +03001731 sdhci_msm_pm_qos_parse(dev, pdata);
1732
Pavan Anamula5a256df2015-10-16 14:38:28 +05301733 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1734 pdata->core_3_0v_support = true;
1735
Asutosh Das0ef24812012-12-18 16:14:02 +05301736 return pdata;
1737out:
1738 return NULL;
1739}
1740
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301741/* Returns required bandwidth in Bytes per Sec */
1742static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1743 struct mmc_ios *ios)
1744{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301745 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1746 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1747
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301748 unsigned int bw;
1749
Sahitya Tummala2886c922013-04-03 18:03:31 +05301750 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301751 /*
1752 * For DDR mode, SDCC controller clock will be at
1753 * the double rate than the actual clock that goes to card.
1754 */
1755 if (ios->bus_width == MMC_BUS_WIDTH_4)
1756 bw /= 2;
1757 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1758 bw /= 8;
1759
1760 return bw;
1761}
1762
1763static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1764 unsigned int bw)
1765{
1766 unsigned int *table = host->pdata->voting_data->bw_vecs;
1767 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1768 int i;
1769
1770 if (host->msm_bus_vote.is_max_bw_needed && bw)
1771 return host->msm_bus_vote.max_bw_vote;
1772
1773 for (i = 0; i < size; i++) {
1774 if (bw <= table[i])
1775 break;
1776 }
1777
1778 if (i && (i == size))
1779 i--;
1780
1781 return i;
1782}
1783
1784/*
1785 * This function must be called with host lock acquired.
1786 * Caller of this function should also ensure that msm bus client
1787 * handle is not null.
1788 */
1789static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1790 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301791 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301792{
1793 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1794 int rc = 0;
1795
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301796 BUG_ON(!flags);
1797
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301798 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301799 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301800 rc = msm_bus_scale_client_update_request(
1801 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301802 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301803 if (rc) {
1804 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1805 mmc_hostname(host->mmc),
1806 msm_host->msm_bus_vote.client_handle, vote, rc);
1807 goto out;
1808 }
1809 msm_host->msm_bus_vote.curr_vote = vote;
1810 }
1811out:
1812 return rc;
1813}
1814
1815/*
1816 * Internal work. Work to set 0 bandwidth for msm bus.
1817 */
1818static void sdhci_msm_bus_work(struct work_struct *work)
1819{
1820 struct sdhci_msm_host *msm_host;
1821 struct sdhci_host *host;
1822 unsigned long flags;
1823
1824 msm_host = container_of(work, struct sdhci_msm_host,
1825 msm_bus_vote.vote_work.work);
1826 host = platform_get_drvdata(msm_host->pdev);
1827
1828 if (!msm_host->msm_bus_vote.client_handle)
1829 return;
1830
1831 spin_lock_irqsave(&host->lock, flags);
1832 /* don't vote for 0 bandwidth if any request is in progress */
1833 if (!host->mrq) {
1834 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301835 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301836 } else
1837 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1838 mmc_hostname(host->mmc), __func__);
1839 spin_unlock_irqrestore(&host->lock, flags);
1840}
1841
1842/*
1843 * This function cancels any scheduled delayed work and sets the bus
1844 * vote based on bw (bandwidth) argument.
1845 */
1846static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1847 unsigned int bw)
1848{
1849 int vote;
1850 unsigned long flags;
1851 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1852 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1853
1854 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1855 spin_lock_irqsave(&host->lock, flags);
1856 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301857 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301858 spin_unlock_irqrestore(&host->lock, flags);
1859}
1860
1861#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1862
1863/* This function queues a work which will set the bandwidth requiement to 0 */
1864static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1865{
1866 unsigned long flags;
1867 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1868 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1869
1870 spin_lock_irqsave(&host->lock, flags);
1871 if (msm_host->msm_bus_vote.min_bw_vote !=
1872 msm_host->msm_bus_vote.curr_vote)
1873 queue_delayed_work(system_wq,
1874 &msm_host->msm_bus_vote.vote_work,
1875 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1876 spin_unlock_irqrestore(&host->lock, flags);
1877}
1878
1879static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1880 struct platform_device *pdev)
1881{
1882 int rc = 0;
1883 struct msm_bus_scale_pdata *bus_pdata;
1884
1885 struct sdhci_msm_bus_voting_data *data;
1886 struct device *dev = &pdev->dev;
1887
1888 data = devm_kzalloc(dev,
1889 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1890 if (!data) {
1891 dev_err(&pdev->dev,
1892 "%s: failed to allocate memory\n", __func__);
1893 rc = -ENOMEM;
1894 goto out;
1895 }
1896 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1897 if (data->bus_pdata) {
1898 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1899 &data->bw_vecs, &data->bw_vecs_size, 0);
1900 if (rc) {
1901 dev_err(&pdev->dev,
1902 "%s: Failed to get bus-bw-vectors-bps\n",
1903 __func__);
1904 goto out;
1905 }
1906 host->pdata->voting_data = data;
1907 }
1908 if (host->pdata->voting_data &&
1909 host->pdata->voting_data->bus_pdata &&
1910 host->pdata->voting_data->bw_vecs &&
1911 host->pdata->voting_data->bw_vecs_size) {
1912
1913 bus_pdata = host->pdata->voting_data->bus_pdata;
1914 host->msm_bus_vote.client_handle =
1915 msm_bus_scale_register_client(bus_pdata);
1916 if (!host->msm_bus_vote.client_handle) {
1917 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1918 rc = -EFAULT;
1919 goto out;
1920 }
1921 /* cache the vote index for minimum and maximum bandwidth */
1922 host->msm_bus_vote.min_bw_vote =
1923 sdhci_msm_bus_get_vote_for_bw(host, 0);
1924 host->msm_bus_vote.max_bw_vote =
1925 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1926 } else {
1927 devm_kfree(dev, data);
1928 }
1929
1930out:
1931 return rc;
1932}
1933
1934static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1935{
1936 if (host->msm_bus_vote.client_handle)
1937 msm_bus_scale_unregister_client(
1938 host->msm_bus_vote.client_handle);
1939}
1940
1941static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1942{
1943 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1944 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1945 struct mmc_ios *ios = &host->mmc->ios;
1946 unsigned int bw;
1947
1948 if (!msm_host->msm_bus_vote.client_handle)
1949 return;
1950
1951 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301952 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301953 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301954 } else {
1955 /*
1956 * If clock gating is enabled, then remove the vote
1957 * immediately because clocks will be disabled only
1958 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1959 * additional delay is required to remove the bus vote.
1960 */
1961#ifdef CONFIG_MMC_CLKGATE
1962 if (host->mmc->clkgate_delay)
1963 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1964 else
1965#endif
1966 sdhci_msm_bus_queue_work(host);
1967 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301968}
1969
Asutosh Das0ef24812012-12-18 16:14:02 +05301970/* Regulator utility functions */
1971static int sdhci_msm_vreg_init_reg(struct device *dev,
1972 struct sdhci_msm_reg_data *vreg)
1973{
1974 int ret = 0;
1975
1976 /* check if regulator is already initialized? */
1977 if (vreg->reg)
1978 goto out;
1979
1980 /* Get the regulator handle */
1981 vreg->reg = devm_regulator_get(dev, vreg->name);
1982 if (IS_ERR(vreg->reg)) {
1983 ret = PTR_ERR(vreg->reg);
1984 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1985 __func__, vreg->name, ret);
1986 goto out;
1987 }
1988
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301989 if (regulator_count_voltages(vreg->reg) > 0) {
1990 vreg->set_voltage_sup = true;
1991 /* sanity check */
1992 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1993 pr_err("%s: %s invalid constraints specified\n",
1994 __func__, vreg->name);
1995 ret = -EINVAL;
1996 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301997 }
1998
1999out:
2000 return ret;
2001}
2002
2003static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2004{
2005 if (vreg->reg)
2006 devm_regulator_put(vreg->reg);
2007}
2008
2009static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2010 *vreg, int uA_load)
2011{
2012 int ret = 0;
2013
2014 /*
2015 * regulators that do not support regulator_set_voltage also
2016 * do not support regulator_set_optimum_mode
2017 */
2018 if (vreg->set_voltage_sup) {
2019 ret = regulator_set_load(vreg->reg, uA_load);
2020 if (ret < 0)
2021 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2022 __func__, vreg->name, uA_load, ret);
2023 else
2024 /*
2025 * regulator_set_load() can return non zero
2026 * value even for success case.
2027 */
2028 ret = 0;
2029 }
2030 return ret;
2031}
2032
2033static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2034 int min_uV, int max_uV)
2035{
2036 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302037 if (vreg->set_voltage_sup) {
2038 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2039 if (ret) {
2040 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302041 __func__, vreg->name, min_uV, max_uV, ret);
2042 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302043 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302044
2045 return ret;
2046}
2047
2048static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2049{
2050 int ret = 0;
2051
2052 /* Put regulator in HPM (high power mode) */
2053 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2054 if (ret < 0)
2055 return ret;
2056
2057 if (!vreg->is_enabled) {
2058 /* Set voltage level */
2059 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2060 vreg->high_vol_level);
2061 if (ret)
2062 return ret;
2063 }
2064 ret = regulator_enable(vreg->reg);
2065 if (ret) {
2066 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2067 __func__, vreg->name, ret);
2068 return ret;
2069 }
2070 vreg->is_enabled = true;
2071 return ret;
2072}
2073
2074static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2075{
2076 int ret = 0;
2077
2078 /* Never disable regulator marked as always_on */
2079 if (vreg->is_enabled && !vreg->is_always_on) {
2080 ret = regulator_disable(vreg->reg);
2081 if (ret) {
2082 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2083 __func__, vreg->name, ret);
2084 goto out;
2085 }
2086 vreg->is_enabled = false;
2087
2088 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2089 if (ret < 0)
2090 goto out;
2091
2092 /* Set min. voltage level to 0 */
2093 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2094 if (ret)
2095 goto out;
2096 } else if (vreg->is_enabled && vreg->is_always_on) {
2097 if (vreg->lpm_sup) {
2098 /* Put always_on regulator in LPM (low power mode) */
2099 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2100 vreg->lpm_uA);
2101 if (ret < 0)
2102 goto out;
2103 }
2104 }
2105out:
2106 return ret;
2107}
2108
2109static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2110 bool enable, bool is_init)
2111{
2112 int ret = 0, i;
2113 struct sdhci_msm_slot_reg_data *curr_slot;
2114 struct sdhci_msm_reg_data *vreg_table[2];
2115
2116 curr_slot = pdata->vreg_data;
2117 if (!curr_slot) {
2118 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2119 __func__);
2120 goto out;
2121 }
2122
2123 vreg_table[0] = curr_slot->vdd_data;
2124 vreg_table[1] = curr_slot->vdd_io_data;
2125
2126 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2127 if (vreg_table[i]) {
2128 if (enable)
2129 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2130 else
2131 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2132 if (ret)
2133 goto out;
2134 }
2135 }
2136out:
2137 return ret;
2138}
2139
2140/*
2141 * Reset vreg by ensuring it is off during probe. A call
2142 * to enable vreg is needed to balance disable vreg
2143 */
2144static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2145{
2146 int ret;
2147
2148 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2149 if (ret)
2150 return ret;
2151 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2152 return ret;
2153}
2154
2155/* This init function should be called only once for each SDHC slot */
2156static int sdhci_msm_vreg_init(struct device *dev,
2157 struct sdhci_msm_pltfm_data *pdata,
2158 bool is_init)
2159{
2160 int ret = 0;
2161 struct sdhci_msm_slot_reg_data *curr_slot;
2162 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2163
2164 curr_slot = pdata->vreg_data;
2165 if (!curr_slot)
2166 goto out;
2167
2168 curr_vdd_reg = curr_slot->vdd_data;
2169 curr_vdd_io_reg = curr_slot->vdd_io_data;
2170
2171 if (!is_init)
2172 /* Deregister all regulators from regulator framework */
2173 goto vdd_io_reg_deinit;
2174
2175 /*
2176 * Get the regulator handle from voltage regulator framework
2177 * and then try to set the voltage level for the regulator
2178 */
2179 if (curr_vdd_reg) {
2180 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2181 if (ret)
2182 goto out;
2183 }
2184 if (curr_vdd_io_reg) {
2185 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2186 if (ret)
2187 goto vdd_reg_deinit;
2188 }
2189 ret = sdhci_msm_vreg_reset(pdata);
2190 if (ret)
2191 dev_err(dev, "vreg reset failed (%d)\n", ret);
2192 goto out;
2193
2194vdd_io_reg_deinit:
2195 if (curr_vdd_io_reg)
2196 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2197vdd_reg_deinit:
2198 if (curr_vdd_reg)
2199 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2200out:
2201 return ret;
2202}
2203
2204
2205static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2206 enum vdd_io_level level,
2207 unsigned int voltage_level)
2208{
2209 int ret = 0;
2210 int set_level;
2211 struct sdhci_msm_reg_data *vdd_io_reg;
2212
2213 if (!pdata->vreg_data)
2214 return ret;
2215
2216 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2217 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2218 switch (level) {
2219 case VDD_IO_LOW:
2220 set_level = vdd_io_reg->low_vol_level;
2221 break;
2222 case VDD_IO_HIGH:
2223 set_level = vdd_io_reg->high_vol_level;
2224 break;
2225 case VDD_IO_SET_LEVEL:
2226 set_level = voltage_level;
2227 break;
2228 default:
2229 pr_err("%s: invalid argument level = %d",
2230 __func__, level);
2231 ret = -EINVAL;
2232 return ret;
2233 }
2234 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2235 set_level);
2236 }
2237 return ret;
2238}
2239
Ritesh Harjani42876f42015-11-17 17:46:51 +05302240/*
2241 * Acquire spin-lock host->lock before calling this function
2242 */
2243static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2244 bool enable)
2245{
2246 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2247 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2248
2249 if (enable && !msm_host->is_sdiowakeup_enabled)
2250 enable_irq(msm_host->pdata->sdiowakeup_irq);
2251 else if (!enable && msm_host->is_sdiowakeup_enabled)
2252 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2253 else
2254 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2255 __func__, enable, msm_host->is_sdiowakeup_enabled);
2256 msm_host->is_sdiowakeup_enabled = enable;
2257}
2258
2259static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2260{
2261 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2264
Ritesh Harjani42876f42015-11-17 17:46:51 +05302265 unsigned long flags;
2266
2267 pr_debug("%s: irq (%d) received\n", __func__, irq);
2268
2269 spin_lock_irqsave(&host->lock, flags);
2270 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2271 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302272 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302273
2274 return IRQ_HANDLED;
2275}
2276
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302277void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2278{
2279 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2280 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2281
2282 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2283 mmc_hostname(host->mmc),
2284 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2285 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2286 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2287}
2288
Asutosh Das0ef24812012-12-18 16:14:02 +05302289static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2290{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002291 struct sdhci_host *host = (struct sdhci_host *)data;
2292 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2293 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302294 u8 irq_status = 0;
2295 u8 irq_ack = 0;
2296 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302297 int pwr_state = 0, io_level = 0;
2298 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302299 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302300
2301 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2302 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2303 mmc_hostname(msm_host->mmc), irq, irq_status);
2304
2305 /* Clear the interrupt */
2306 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2307 /*
2308 * SDHC has core_mem and hc_mem device memory and these memory
2309 * addresses do not fall within 1KB region. Hence, any update to
2310 * core_mem address space would require an mb() to ensure this gets
2311 * completed before its next update to registers within hc_mem.
2312 */
2313 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302314 /*
2315 * There is a rare HW scenario where the first clear pulse could be
2316 * lost when actual reset and clear/read of status register is
2317 * happening at a time. Hence, retry for at least 10 times to make
2318 * sure status register is cleared. Otherwise, this will result in
2319 * a spurious power IRQ resulting in system instability.
2320 */
2321 while (irq_status &
2322 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2323 if (retry == 0) {
2324 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2325 mmc_hostname(host->mmc), irq_status);
2326 sdhci_msm_dump_pwr_ctrl_regs(host);
2327 BUG_ON(1);
2328 }
2329 writeb_relaxed(irq_status,
2330 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2331 retry--;
2332 udelay(10);
2333 }
2334 if (likely(retry < 10))
2335 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2336 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302337
2338 /* Handle BUS ON/OFF*/
2339 if (irq_status & CORE_PWRCTL_BUS_ON) {
2340 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302341 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302342 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302343 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2344 VDD_IO_HIGH, 0);
2345 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302346 if (ret)
2347 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2348 else
2349 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302350
2351 pwr_state = REQ_BUS_ON;
2352 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302353 }
2354 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2355 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302356 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302357 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302358 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2359 VDD_IO_LOW, 0);
2360 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302361 if (ret)
2362 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2363 else
2364 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302365
2366 pwr_state = REQ_BUS_OFF;
2367 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302368 }
2369 /* Handle IO LOW/HIGH */
2370 if (irq_status & CORE_PWRCTL_IO_LOW) {
2371 /* Switch voltage Low */
2372 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2373 if (ret)
2374 irq_ack |= CORE_PWRCTL_IO_FAIL;
2375 else
2376 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302377
2378 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302379 }
2380 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2381 /* Switch voltage High */
2382 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2383 if (ret)
2384 irq_ack |= CORE_PWRCTL_IO_FAIL;
2385 else
2386 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302387
2388 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302389 }
2390
2391 /* ACK status to the core */
2392 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2393 /*
2394 * SDHC has core_mem and hc_mem device memory and these memory
2395 * addresses do not fall within 1KB region. Hence, any update to
2396 * core_mem address space would require an mb() to ensure this gets
2397 * completed before its next update to registers within hc_mem.
2398 */
2399 mb();
2400
Krishna Konda46fd1432014-10-30 21:13:27 -07002401 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002402 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2403 ~CORE_IO_PAD_PWR_SWITCH),
2404 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002405 else if ((io_level & REQ_IO_LOW) ||
2406 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002407 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2408 CORE_IO_PAD_PWR_SWITCH),
2409 host->ioaddr + CORE_VENDOR_SPEC);
2410 mb();
2411
Asutosh Das0ef24812012-12-18 16:14:02 +05302412 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2413 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302414 spin_lock_irqsave(&host->lock, flags);
2415 if (pwr_state)
2416 msm_host->curr_pwr_state = pwr_state;
2417 if (io_level)
2418 msm_host->curr_io_level = io_level;
2419 complete(&msm_host->pwr_irq_completion);
2420 spin_unlock_irqrestore(&host->lock, flags);
2421
Asutosh Das0ef24812012-12-18 16:14:02 +05302422 return IRQ_HANDLED;
2423}
2424
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302425static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302426show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2427{
2428 struct sdhci_host *host = dev_get_drvdata(dev);
2429 int poll;
2430 unsigned long flags;
2431
2432 spin_lock_irqsave(&host->lock, flags);
2433 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2434 spin_unlock_irqrestore(&host->lock, flags);
2435
2436 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2437}
2438
2439static ssize_t
2440store_polling(struct device *dev, struct device_attribute *attr,
2441 const char *buf, size_t count)
2442{
2443 struct sdhci_host *host = dev_get_drvdata(dev);
2444 int value;
2445 unsigned long flags;
2446
2447 if (!kstrtou32(buf, 0, &value)) {
2448 spin_lock_irqsave(&host->lock, flags);
2449 if (value) {
2450 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2451 mmc_detect_change(host->mmc, 0);
2452 } else {
2453 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2454 }
2455 spin_unlock_irqrestore(&host->lock, flags);
2456 }
2457 return count;
2458}
2459
2460static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302461show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2462 char *buf)
2463{
2464 struct sdhci_host *host = dev_get_drvdata(dev);
2465 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2466 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2467
2468 return snprintf(buf, PAGE_SIZE, "%u\n",
2469 msm_host->msm_bus_vote.is_max_bw_needed);
2470}
2471
2472static ssize_t
2473store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2474 const char *buf, size_t count)
2475{
2476 struct sdhci_host *host = dev_get_drvdata(dev);
2477 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2478 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2479 uint32_t value;
2480 unsigned long flags;
2481
2482 if (!kstrtou32(buf, 0, &value)) {
2483 spin_lock_irqsave(&host->lock, flags);
2484 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2485 spin_unlock_irqrestore(&host->lock, flags);
2486 }
2487 return count;
2488}
2489
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302490static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302491{
2492 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2493 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302494 unsigned long flags;
2495 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302496 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302497
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302498 spin_lock_irqsave(&host->lock, flags);
2499 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2500 mmc_hostname(host->mmc), __func__, req_type,
2501 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302502 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2503 /*
2504 * The IRQ for request type IO High/Low will be generated when -
2505 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2506 * 2. If 1 is true and when there is a state change in 1.8V enable
2507 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2508 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2509 * layer tries to set it to 3.3V before card detection happens, the
2510 * IRQ doesn't get triggered as there is no state change in this bit.
2511 * The driver already handles this case by changing the IO voltage
2512 * level to high as part of controller power up sequence. Hence, check
2513 * for host->pwr to handle a case where IO voltage high request is
2514 * issued even before controller power up.
2515 */
2516 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2517 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2518 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2519 pr_debug("%s: do not wait for power IRQ that never comes\n",
2520 mmc_hostname(host->mmc));
2521 spin_unlock_irqrestore(&host->lock, flags);
2522 return;
2523 }
2524 }
2525
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302526 if ((req_type & msm_host->curr_pwr_state) ||
2527 (req_type & msm_host->curr_io_level))
2528 done = true;
2529 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302530
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302531 /*
2532 * This is needed here to hanlde a case where IRQ gets
2533 * triggered even before this function is called so that
2534 * x->done counter of completion gets reset. Otherwise,
2535 * next call to wait_for_completion returns immediately
2536 * without actually waiting for the IRQ to be handled.
2537 */
2538 if (done)
2539 init_completion(&msm_host->pwr_irq_completion);
2540 else
2541 wait_for_completion(&msm_host->pwr_irq_completion);
2542
2543 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2544 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302545}
2546
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002547static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2548{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302549 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2550
2551 if (enable) {
2552 config |= CORE_CDR_EN;
2553 config &= ~CORE_CDR_EXT_EN;
2554 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2555 } else {
2556 config &= ~CORE_CDR_EN;
2557 config |= CORE_CDR_EXT_EN;
2558 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2559 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002560}
2561
Asutosh Das648f9d12013-01-10 21:11:04 +05302562static unsigned int sdhci_msm_max_segs(void)
2563{
2564 return SDHCI_MSM_MAX_SEGMENTS;
2565}
2566
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302567static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302568{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302569 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2570 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302571
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302572 return msm_host->pdata->sup_clk_table[0];
2573}
2574
2575static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2576{
2577 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2578 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2579 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2580
2581 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2582}
2583
2584static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2585 u32 req_clk)
2586{
2587 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2588 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2589 unsigned int sel_clk = -1;
2590 unsigned char cnt;
2591
2592 if (req_clk < sdhci_msm_get_min_clock(host)) {
2593 sel_clk = sdhci_msm_get_min_clock(host);
2594 return sel_clk;
2595 }
2596
2597 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2598 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2599 break;
2600 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2601 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2602 break;
2603 } else {
2604 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2605 }
2606 }
2607 return sel_clk;
2608}
2609
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302610static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2611{
2612 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2613 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2614 int rc = 0;
2615
2616 if (atomic_read(&msm_host->controller_clock))
2617 return 0;
2618
2619 sdhci_msm_bus_voting(host, 1);
2620
2621 if (!IS_ERR(msm_host->pclk)) {
2622 rc = clk_prepare_enable(msm_host->pclk);
2623 if (rc) {
2624 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2625 mmc_hostname(host->mmc), __func__, rc);
2626 goto remove_vote;
2627 }
2628 }
2629
2630 rc = clk_prepare_enable(msm_host->clk);
2631 if (rc) {
2632 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2633 mmc_hostname(host->mmc), __func__, rc);
2634 goto disable_pclk;
2635 }
2636
2637 atomic_set(&msm_host->controller_clock, 1);
2638 pr_debug("%s: %s: enabled controller clock\n",
2639 mmc_hostname(host->mmc), __func__);
2640 goto out;
2641
2642disable_pclk:
2643 if (!IS_ERR(msm_host->pclk))
2644 clk_disable_unprepare(msm_host->pclk);
2645remove_vote:
2646 if (msm_host->msm_bus_vote.client_handle)
2647 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2648out:
2649 return rc;
2650}
2651
2652
2653
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302654static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2655{
2656 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2657 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2658 int rc = 0;
2659
2660 if (enable && !atomic_read(&msm_host->clks_on)) {
2661 pr_debug("%s: request to enable clocks\n",
2662 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302663
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302664 /*
2665 * The bus-width or the clock rate might have changed
2666 * after controller clocks are enbaled, update bus vote
2667 * in such case.
2668 */
2669 if (atomic_read(&msm_host->controller_clock))
2670 sdhci_msm_bus_voting(host, 1);
2671
2672 rc = sdhci_msm_enable_controller_clock(host);
2673 if (rc)
2674 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302675
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302676 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2677 rc = clk_prepare_enable(msm_host->bus_clk);
2678 if (rc) {
2679 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2680 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302681 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302682 }
2683 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002684 if (!IS_ERR(msm_host->ff_clk)) {
2685 rc = clk_prepare_enable(msm_host->ff_clk);
2686 if (rc) {
2687 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2688 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302689 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002690 }
2691 }
2692 if (!IS_ERR(msm_host->sleep_clk)) {
2693 rc = clk_prepare_enable(msm_host->sleep_clk);
2694 if (rc) {
2695 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2696 mmc_hostname(host->mmc), __func__, rc);
2697 goto disable_ff_clk;
2698 }
2699 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302700 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302701
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302702 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302703 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2704 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302705 /*
2706 * During 1.8V signal switching the clock source must
2707 * still be ON as it requires accessing SDHC
2708 * registers (SDHCi host control2 register bit 3 must
2709 * be written and polled after stopping the SDCLK).
2710 */
2711 if (host->mmc->card_clock_off)
2712 return 0;
2713 pr_debug("%s: request to disable clocks\n",
2714 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002715 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2716 clk_disable_unprepare(msm_host->sleep_clk);
2717 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2718 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302719 clk_disable_unprepare(msm_host->clk);
2720 if (!IS_ERR(msm_host->pclk))
2721 clk_disable_unprepare(msm_host->pclk);
2722 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2723 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302724
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302725 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302726 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302727 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302728 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302729 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002730disable_ff_clk:
2731 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2732 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302733disable_bus_clk:
2734 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2735 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302736disable_controller_clk:
2737 if (!IS_ERR_OR_NULL(msm_host->clk))
2738 clk_disable_unprepare(msm_host->clk);
2739 if (!IS_ERR_OR_NULL(msm_host->pclk))
2740 clk_disable_unprepare(msm_host->pclk);
2741 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302742remove_vote:
2743 if (msm_host->msm_bus_vote.client_handle)
2744 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302745out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302746 return rc;
2747}
2748
2749static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2750{
2751 int rc;
2752 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2753 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2754 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002755 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302756 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302757
2758 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302759 /*
2760 * disable pwrsave to ensure clock is not auto-gated until
2761 * the rate is >400KHz (initialization complete).
2762 */
2763 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2764 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302765 sdhci_msm_prepare_clocks(host, false);
2766 host->clock = clock;
2767 goto out;
2768 }
2769
2770 rc = sdhci_msm_prepare_clocks(host, true);
2771 if (rc)
2772 goto out;
2773
Sahitya Tummala043744a2013-06-24 09:55:33 +05302774 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2775 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302776 if ((clock > 400000) &&
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002777 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302778 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2779 | CORE_CLK_PWRSAVE,
2780 host->ioaddr + CORE_VENDOR_SPEC);
2781 /*
2782 * Disable pwrsave for a newly added card if doesn't allow clock
2783 * gating.
2784 */
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002785 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302786 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2787 & ~CORE_CLK_PWRSAVE,
2788 host->ioaddr + CORE_VENDOR_SPEC);
2789
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302790 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002791 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002792 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002793 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302794 /*
2795 * The SDHC requires internal clock frequency to be double the
2796 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002797 * uses the faster clock(100/400MHz) for some of its parts and
2798 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302799 */
2800 ddr_clock = clock * 2;
2801 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2802 ddr_clock);
2803 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002804
2805 /*
2806 * In general all timing modes are controlled via UHS mode select in
2807 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2808 * their respective modes defined here, hence we use these values.
2809 *
2810 * HS200 - SDR104 (Since they both are equivalent in functionality)
2811 * HS400 - This involves multiple configurations
2812 * Initially SDR104 - when tuning is required as HS200
2813 * Then when switching to DDR @ 400MHz (HS400) we use
2814 * the vendor specific HC_SELECT_IN to control the mode.
2815 *
2816 * In addition to controlling the modes we also need to select the
2817 * correct input clock for DLL depending on the mode.
2818 *
2819 * HS400 - divided clock (free running MCLK/2)
2820 * All other modes - default (free running MCLK)
2821 */
2822 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2823 /* Select the divided clock (free running MCLK/2) */
2824 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2825 & ~CORE_HC_MCLK_SEL_MASK)
2826 | CORE_HC_MCLK_SEL_HS400),
2827 host->ioaddr + CORE_VENDOR_SPEC);
2828 /*
2829 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2830 * register
2831 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302832 if ((msm_host->tuning_done ||
2833 (mmc_card_strobe(msm_host->mmc->card) &&
2834 msm_host->enhanced_strobe)) &&
2835 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002836 /*
2837 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2838 * field in VENDOR_SPEC_FUNC
2839 */
2840 writel_relaxed((readl_relaxed(host->ioaddr + \
2841 CORE_VENDOR_SPEC)
2842 | CORE_HC_SELECT_IN_HS400
2843 | CORE_HC_SELECT_IN_EN),
2844 host->ioaddr + CORE_VENDOR_SPEC);
2845 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002846 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2847 /*
2848 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2849 * CORE_DLL_STATUS to be set. This should get set
2850 * with in 15 us at 200 MHz.
2851 */
2852 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2853 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2854 CORE_DDR_DLL_LOCK)), 10, 1000);
2855 if (rc == -ETIMEDOUT)
2856 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2857 mmc_hostname(host->mmc),
2858 dll_lock);
2859 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002860 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002861 if (!msm_host->use_cdclp533)
2862 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2863 writel_relaxed((readl_relaxed(host->ioaddr +
2864 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2865 host->ioaddr + CORE_VENDOR_SPEC3);
2866
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002867 /* Select the default clock (free running MCLK) */
2868 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2869 & ~CORE_HC_MCLK_SEL_MASK)
2870 | CORE_HC_MCLK_SEL_DFLT),
2871 host->ioaddr + CORE_VENDOR_SPEC);
2872
2873 /*
2874 * Disable HC_SELECT_IN to be able to use the UHS mode select
2875 * configuration from Host Control2 register for all other
2876 * modes.
2877 *
2878 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2879 * in VENDOR_SPEC_FUNC
2880 */
2881 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2882 & ~CORE_HC_SELECT_IN_EN
2883 & ~CORE_HC_SELECT_IN_MASK),
2884 host->ioaddr + CORE_VENDOR_SPEC);
2885 }
2886 mb();
2887
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302888 if (sup_clock != msm_host->clk_rate) {
2889 pr_debug("%s: %s: setting clk rate to %u\n",
2890 mmc_hostname(host->mmc), __func__, sup_clock);
2891 rc = clk_set_rate(msm_host->clk, sup_clock);
2892 if (rc) {
2893 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2894 mmc_hostname(host->mmc), __func__,
2895 sup_clock, rc);
2896 goto out;
2897 }
2898 msm_host->clk_rate = sup_clock;
2899 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302900 /*
2901 * Update the bus vote in case of frequency change due to
2902 * clock scaling.
2903 */
2904 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302905 }
2906out:
2907 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302908}
2909
Sahitya Tummala14613432013-03-21 11:13:25 +05302910static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2911 unsigned int uhs)
2912{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002913 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2914 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302915 u16 ctrl_2;
2916
2917 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2918 /* Select Bus Speed Mode for host */
2919 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002920 if ((uhs == MMC_TIMING_MMC_HS400) ||
2921 (uhs == MMC_TIMING_MMC_HS200) ||
2922 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302923 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2924 else if (uhs == MMC_TIMING_UHS_SDR12)
2925 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2926 else if (uhs == MMC_TIMING_UHS_SDR25)
2927 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2928 else if (uhs == MMC_TIMING_UHS_SDR50)
2929 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002930 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2931 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302932 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302933 /*
2934 * When clock frquency is less than 100MHz, the feedback clock must be
2935 * provided and DLL must not be used so that tuning can be skipped. To
2936 * provide feedback clock, the mode selection can be any value less
2937 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2938 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002939 if (host->clock <= CORE_FREQ_100MHZ) {
2940 if ((uhs == MMC_TIMING_MMC_HS400) ||
2941 (uhs == MMC_TIMING_MMC_HS200) ||
2942 (uhs == MMC_TIMING_UHS_SDR104))
2943 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302944
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002945 /*
2946 * Make sure DLL is disabled when not required
2947 *
2948 * Write 1 to DLL_RST bit of DLL_CONFIG register
2949 */
2950 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2951 | CORE_DLL_RST),
2952 host->ioaddr + CORE_DLL_CONFIG);
2953
2954 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2955 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2956 | CORE_DLL_PDN),
2957 host->ioaddr + CORE_DLL_CONFIG);
2958 mb();
2959
2960 /*
2961 * The DLL needs to be restored and CDCLP533 recalibrated
2962 * when the clock frequency is set back to 400MHz.
2963 */
2964 msm_host->calibration_done = false;
2965 }
2966
2967 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2968 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302969 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2970
2971}
2972
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002973#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002974#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05302975static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002976{
Ritesh Harjani17f5d812015-12-23 13:21:02 +05302977 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2978 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002979 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05302980 struct cmdq_host *cq_host = host->cq_host;
2981
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002982 u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2983 u16 minor = version & CORE_VERSION_TARGET_MASK;
2984 /* registers offset changed starting from 4.2.0 */
2985 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
2986
2987 pr_err("---- Debug RAM dump ----\n");
2988 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
2989 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
2990 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
2991
2992 while (i < 16) {
2993 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
2994 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
2995 i++;
2996 }
2997 pr_err("-------------------------\n");
2998}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302999
3000void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3001{
3002 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3003 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3004 int tbsel, tbsel2;
3005 int i, index = 0;
3006 u32 test_bus_val = 0;
3007 u32 debug_reg[MAX_TEST_BUS] = {0};
3008
3009 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003010 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303011 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003012
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303013 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
3014 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
3015 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
3016 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
3017 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
3018 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
3019 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
3020 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
3021 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
3022 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
3023 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3024 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303025 pr_info("Vndr func2: 0x%08x\n",
3026 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303027
3028 /*
3029 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3030 * of CORE_TESTBUS_CONFIG register.
3031 *
3032 * To select test bus 0 to 7 use tbsel and to select any test bus
3033 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3034 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3035 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3036 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003037 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303038 for (tbsel = 0; tbsel < 8; tbsel++) {
3039 if (index >= MAX_TEST_BUS)
3040 break;
3041 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
3042 tbsel | CORE_TESTBUS_ENA;
3043 writel_relaxed(test_bus_val,
3044 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3045 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
3046 CORE_SDCC_DEBUG_REG);
3047 }
3048 }
3049 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3050 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3051 i, i + 3, debug_reg[i], debug_reg[i+1],
3052 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003053}
3054
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303055/*
3056 * sdhci_msm_enhanced_strobe_mask :-
3057 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3058 * SW should write 3 to
3059 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3060 * The default reset value of this register is 2.
3061 */
3062static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3063{
3064 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3065 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3066
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303067 if (!msm_host->enhanced_strobe ||
3068 !mmc_card_strobe(msm_host->mmc->card)) {
3069 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303070 mmc_hostname(host->mmc));
3071 return;
3072 }
3073
3074 if (set) {
3075 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3076 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3077 host->ioaddr + CORE_VENDOR_SPEC3);
3078 } else {
3079 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3080 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3081 host->ioaddr + CORE_VENDOR_SPEC3);
3082 }
3083}
3084
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003085static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3086{
3087 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3088 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3089
3090 if (set) {
3091 writel_relaxed(CORE_TESTBUS_ENA,
3092 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3093 } else {
3094 u32 value;
3095
3096 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3097 value &= ~CORE_TESTBUS_ENA;
3098 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3099 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303100}
3101
Pavan Anamula691dd592015-08-25 16:11:20 +05303102void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3103{
3104 u32 vendor_func2;
3105 unsigned long timeout;
3106
3107 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3108
3109 if (enable) {
3110 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3111 CORE_VENDOR_SPEC_FUNC2);
3112 timeout = 10000;
3113 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3114 HC_SW_RST_REQ) {
3115 if (timeout == 0) {
3116 pr_info("%s: Applying wait idle disable workaround\n",
3117 mmc_hostname(host->mmc));
3118 /*
3119 * Apply the reset workaround to not wait for
3120 * pending data transfers on AXI before
3121 * resetting the controller. This could be
3122 * risky if the transfers were stuck on the
3123 * AXI bus.
3124 */
3125 vendor_func2 = readl_relaxed(host->ioaddr +
3126 CORE_VENDOR_SPEC_FUNC2);
3127 writel_relaxed(vendor_func2 |
3128 HC_SW_RST_WAIT_IDLE_DIS,
3129 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3130 host->reset_wa_t = ktime_get();
3131 return;
3132 }
3133 timeout--;
3134 udelay(10);
3135 }
3136 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3137 mmc_hostname(host->mmc));
3138 } else {
3139 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3140 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3141 }
3142}
3143
Gilad Broner44445992015-09-29 16:05:39 +03003144static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3145{
3146 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303147 container_of(work, struct sdhci_msm_pm_qos_irq,
3148 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003149
3150 if (atomic_read(&pm_qos_irq->counter))
3151 return;
3152
3153 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3154 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3155}
3156
3157void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3158{
3159 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3160 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3161 struct sdhci_msm_pm_qos_latency *latency =
3162 &msm_host->pdata->pm_qos_data.irq_latency;
3163 int counter;
3164
3165 if (!msm_host->pm_qos_irq.enabled)
3166 return;
3167
3168 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3169 /* Make sure to update the voting in case power policy has changed */
3170 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3171 && counter > 1)
3172 return;
3173
Asutosh Das36c2e922015-12-01 12:19:58 +05303174 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003175 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3176 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3177 msm_host->pm_qos_irq.latency);
3178}
3179
3180void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3181{
3182 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3183 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3184 int counter;
3185
3186 if (!msm_host->pm_qos_irq.enabled)
3187 return;
3188
Subhash Jadavani4d813902015-10-15 12:16:43 -07003189 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3190 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3191 } else {
3192 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3193 return;
Gilad Broner44445992015-09-29 16:05:39 +03003194 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003195
Gilad Broner44445992015-09-29 16:05:39 +03003196 if (counter)
3197 return;
3198
3199 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303200 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3201 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003202 return;
3203 }
3204
3205 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3206 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3207 msm_host->pm_qos_irq.latency);
3208}
3209
Gilad Broner68c54562015-09-20 11:59:46 +03003210static ssize_t
3211sdhci_msm_pm_qos_irq_show(struct device *dev,
3212 struct device_attribute *attr, char *buf)
3213{
3214 struct sdhci_host *host = dev_get_drvdata(dev);
3215 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3216 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3217 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3218
3219 return snprintf(buf, PAGE_SIZE,
3220 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3221 irq->enabled, atomic_read(&irq->counter), irq->latency);
3222}
3223
3224static ssize_t
3225sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3226 struct device_attribute *attr, char *buf)
3227{
3228 struct sdhci_host *host = dev_get_drvdata(dev);
3229 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3230 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3231
3232 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3233}
3234
3235static ssize_t
3236sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3237 struct device_attribute *attr, const char *buf, size_t count)
3238{
3239 struct sdhci_host *host = dev_get_drvdata(dev);
3240 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3241 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3242 uint32_t value;
3243 bool enable;
3244 int ret;
3245
3246 ret = kstrtou32(buf, 0, &value);
3247 if (ret)
3248 goto out;
3249 enable = !!value;
3250
3251 if (enable == msm_host->pm_qos_irq.enabled)
3252 goto out;
3253
3254 msm_host->pm_qos_irq.enabled = enable;
3255 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303256 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003257 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3258 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3259 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3260 msm_host->pm_qos_irq.latency);
3261 }
3262
3263out:
3264 return count;
3265}
3266
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003267#ifdef CONFIG_SMP
3268static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3269 struct sdhci_host *host)
3270{
3271 msm_host->pm_qos_irq.req.irq = host->irq;
3272}
3273#else
3274static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3275 struct sdhci_host *host) { }
3276#endif
3277
Gilad Broner44445992015-09-29 16:05:39 +03003278void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3279{
3280 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3281 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3282 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003283 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003284
3285 if (!msm_host->pdata->pm_qos_data.irq_valid)
3286 return;
3287
3288 /* Initialize only once as this gets called per partition */
3289 if (msm_host->pm_qos_irq.enabled)
3290 return;
3291
3292 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3293 msm_host->pm_qos_irq.req.type =
3294 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003295 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3296 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3297 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003298 else
3299 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3300 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3301
Asutosh Das36c2e922015-12-01 12:19:58 +05303302 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003303 sdhci_msm_pm_qos_irq_unvote_work);
3304 /* For initialization phase, set the performance latency */
3305 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3306 msm_host->pm_qos_irq.latency =
3307 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3308 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3309 msm_host->pm_qos_irq.latency);
3310 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003311
3312 /* sysfs */
3313 msm_host->pm_qos_irq.enable_attr.show =
3314 sdhci_msm_pm_qos_irq_enable_show;
3315 msm_host->pm_qos_irq.enable_attr.store =
3316 sdhci_msm_pm_qos_irq_enable_store;
3317 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3318 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3319 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3320 ret = device_create_file(&msm_host->pdev->dev,
3321 &msm_host->pm_qos_irq.enable_attr);
3322 if (ret)
3323 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3324 __func__, ret);
3325
3326 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3327 msm_host->pm_qos_irq.status_attr.store = NULL;
3328 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3329 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3330 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3331 ret = device_create_file(&msm_host->pdev->dev,
3332 &msm_host->pm_qos_irq.status_attr);
3333 if (ret)
3334 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3335 __func__, ret);
3336}
3337
3338static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3339 struct device_attribute *attr, char *buf)
3340{
3341 struct sdhci_host *host = dev_get_drvdata(dev);
3342 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3343 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3344 struct sdhci_msm_pm_qos_group *group;
3345 int i;
3346 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3347 int offset = 0;
3348
3349 for (i = 0; i < nr_groups; i++) {
3350 group = &msm_host->pm_qos[i];
3351 offset += snprintf(&buf[offset], PAGE_SIZE,
3352 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3353 i, group->req.cpus_affine.bits[0],
3354 msm_host->pm_qos_group_enable,
3355 atomic_read(&group->counter),
3356 group->latency);
3357 }
3358
3359 return offset;
3360}
3361
3362static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3363 struct device_attribute *attr, char *buf)
3364{
3365 struct sdhci_host *host = dev_get_drvdata(dev);
3366 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3367 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3368
3369 return snprintf(buf, PAGE_SIZE, "%s\n",
3370 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3371}
3372
3373static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3374 struct device_attribute *attr, const char *buf, size_t count)
3375{
3376 struct sdhci_host *host = dev_get_drvdata(dev);
3377 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3378 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3379 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3380 uint32_t value;
3381 bool enable;
3382 int ret;
3383 int i;
3384
3385 ret = kstrtou32(buf, 0, &value);
3386 if (ret)
3387 goto out;
3388 enable = !!value;
3389
3390 if (enable == msm_host->pm_qos_group_enable)
3391 goto out;
3392
3393 msm_host->pm_qos_group_enable = enable;
3394 if (!enable) {
3395 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303396 cancel_delayed_work_sync(
3397 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003398 atomic_set(&msm_host->pm_qos[i].counter, 0);
3399 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3400 pm_qos_update_request(&msm_host->pm_qos[i].req,
3401 msm_host->pm_qos[i].latency);
3402 }
3403 }
3404
3405out:
3406 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003407}
3408
3409static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3410{
3411 int i;
3412 struct sdhci_msm_cpu_group_map *map =
3413 &msm_host->pdata->pm_qos_data.cpu_group_map;
3414
3415 if (cpu < 0)
3416 goto not_found;
3417
3418 for (i = 0; i < map->nr_groups; i++)
3419 if (cpumask_test_cpu(cpu, &map->mask[i]))
3420 return i;
3421
3422not_found:
3423 return -EINVAL;
3424}
3425
3426void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3427 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3428{
3429 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3430 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3431 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3432 struct sdhci_msm_pm_qos_group *pm_qos_group;
3433 int counter;
3434
3435 if (!msm_host->pm_qos_group_enable || group < 0)
3436 return;
3437
3438 pm_qos_group = &msm_host->pm_qos[group];
3439 counter = atomic_inc_return(&pm_qos_group->counter);
3440
3441 /* Make sure to update the voting in case power policy has changed */
3442 if (pm_qos_group->latency == latency->latency[host->power_policy]
3443 && counter > 1)
3444 return;
3445
Asutosh Das36c2e922015-12-01 12:19:58 +05303446 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003447
3448 pm_qos_group->latency = latency->latency[host->power_policy];
3449 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3450}
3451
3452static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3453{
3454 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303455 container_of(work, struct sdhci_msm_pm_qos_group,
3456 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003457
3458 if (atomic_read(&group->counter))
3459 return;
3460
3461 group->latency = PM_QOS_DEFAULT_VALUE;
3462 pm_qos_update_request(&group->req, group->latency);
3463}
3464
Gilad Broner07d92eb2015-09-29 16:57:21 +03003465bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003466{
3467 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3468 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3469 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3470
3471 if (!msm_host->pm_qos_group_enable || group < 0 ||
3472 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003473 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003474
3475 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303476 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3477 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003478 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003479 }
3480
3481 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3482 pm_qos_update_request(&msm_host->pm_qos[group].req,
3483 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003484 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003485}
3486
3487void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3488 struct sdhci_msm_pm_qos_latency *latency)
3489{
3490 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3491 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3492 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3493 struct sdhci_msm_pm_qos_group *group;
3494 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003495 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003496
3497 if (msm_host->pm_qos_group_enable)
3498 return;
3499
3500 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3501 GFP_KERNEL);
3502 if (!msm_host->pm_qos)
3503 return;
3504
3505 for (i = 0; i < nr_groups; i++) {
3506 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303507 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003508 sdhci_msm_pm_qos_cpu_unvote_work);
3509 atomic_set(&group->counter, 0);
3510 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3511 cpumask_copy(&group->req.cpus_affine,
3512 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3513 /* For initialization phase, set the performance mode latency */
3514 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3515 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3516 group->latency);
3517 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3518 __func__, i,
3519 group->req.cpus_affine.bits[0],
3520 group->latency,
3521 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3522 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003523 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003524 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003525
3526 /* sysfs */
3527 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3528 msm_host->pm_qos_group_status_attr.store = NULL;
3529 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3530 msm_host->pm_qos_group_status_attr.attr.name =
3531 "pm_qos_cpu_groups_status";
3532 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3533 ret = device_create_file(&msm_host->pdev->dev,
3534 &msm_host->pm_qos_group_status_attr);
3535 if (ret)
3536 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3537 __func__, ret);
3538 msm_host->pm_qos_group_enable_attr.show =
3539 sdhci_msm_pm_qos_group_enable_show;
3540 msm_host->pm_qos_group_enable_attr.store =
3541 sdhci_msm_pm_qos_group_enable_store;
3542 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3543 msm_host->pm_qos_group_enable_attr.attr.name =
3544 "pm_qos_cpu_groups_enable";
3545 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3546 ret = device_create_file(&msm_host->pdev->dev,
3547 &msm_host->pm_qos_group_enable_attr);
3548 if (ret)
3549 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3550 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003551}
3552
Gilad Broner07d92eb2015-09-29 16:57:21 +03003553static void sdhci_msm_pre_req(struct sdhci_host *host,
3554 struct mmc_request *mmc_req)
3555{
3556 int cpu;
3557 int group;
3558 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3559 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3560 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3561 msm_host->pm_qos_prev_cpu);
3562
3563 sdhci_msm_pm_qos_irq_vote(host);
3564
3565 cpu = get_cpu();
3566 put_cpu();
3567 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3568 if (group < 0)
3569 return;
3570
3571 if (group != prev_group && prev_group >= 0) {
3572 sdhci_msm_pm_qos_cpu_unvote(host,
3573 msm_host->pm_qos_prev_cpu, false);
3574 prev_group = -1; /* make sure to vote for new group */
3575 }
3576
3577 if (prev_group < 0) {
3578 sdhci_msm_pm_qos_cpu_vote(host,
3579 msm_host->pdata->pm_qos_data.latency, cpu);
3580 msm_host->pm_qos_prev_cpu = cpu;
3581 }
3582}
3583
3584static void sdhci_msm_post_req(struct sdhci_host *host,
3585 struct mmc_request *mmc_req)
3586{
3587 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3588 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3589
3590 sdhci_msm_pm_qos_irq_unvote(host, false);
3591
3592 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3593 msm_host->pm_qos_prev_cpu = -1;
3594}
3595
3596static void sdhci_msm_init(struct sdhci_host *host)
3597{
3598 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3599 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3600
3601 sdhci_msm_pm_qos_irq_init(host);
3602
3603 if (msm_host->pdata->pm_qos_data.legacy_valid)
3604 sdhci_msm_pm_qos_cpu_init(host,
3605 msm_host->pdata->pm_qos_data.latency);
3606}
3607
Asutosh Das0ef24812012-12-18 16:14:02 +05303608static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303609 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303610 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003611 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303612 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003613 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303614 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303615 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303616 .get_min_clock = sdhci_msm_get_min_clock,
3617 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303618 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303619 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303620 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003621 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003622 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003623 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303624 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303625 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003626 .init = sdhci_msm_init,
3627 .pre_req = sdhci_msm_pre_req,
3628 .post_req = sdhci_msm_post_req,
Asutosh Das0ef24812012-12-18 16:14:02 +05303629};
3630
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303631static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3632 struct sdhci_host *host)
3633{
Krishna Konda46fd1432014-10-30 21:13:27 -07003634 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303635 u16 minor;
3636 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303637 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303638
3639 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3640 major = (version & CORE_VERSION_MAJOR_MASK) >>
3641 CORE_VERSION_MAJOR_SHIFT;
3642 minor = version & CORE_VERSION_TARGET_MASK;
3643
Krishna Konda46fd1432014-10-30 21:13:27 -07003644 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3645
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303646 /*
3647 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003648 * controller won't advertise 3.0v, 1.8v and 8-bit features
3649 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303650 */
3651 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003652 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003653 /*
3654 * Enable 1.8V support capability on controllers that
3655 * support dual voltage
3656 */
3657 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003658 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3659 caps |= CORE_3_0V_SUPPORT;
3660 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003661 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303662 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3663 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303664 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003665
3666 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303667 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3668 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3669 */
3670 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303671 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303672 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3673 writel_relaxed((val | CORE_ONE_MID_EN),
3674 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3675 }
3676 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003677 * SDCC 5 controller with major version 1, minor version 0x34 and later
3678 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3679 */
3680 if ((major == 1) && (minor < 0x34))
3681 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003682
3683 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003684 * SDCC 5 controller with major version 1, minor version 0x42 and later
3685 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303686 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003687 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303688 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003689 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303690 msm_host->enhanced_strobe = true;
3691 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003692
3693 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003694 * SDCC 5 controller with major version 1 and minor version 0x42,
3695 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3696 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303697 * when MCLK is gated OFF, it is not gated for less than 0.5us
3698 * and MCLK must be switched on for at-least 1us before DATA
3699 * starts coming.
3700 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003701 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3702 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303703 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003704
Pavan Anamula5a256df2015-10-16 14:38:28 +05303705 /* Fake 3.0V support for SDIO devices which requires such voltage */
3706 if (msm_host->pdata->core_3_0v_support) {
3707 caps |= CORE_3_0V_SUPPORT;
3708 writel_relaxed(
3709 (readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
3710 caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
3711 }
3712
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003713 if ((major == 1) && (minor >= 0x49))
3714 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303715 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003716 * Mask 64-bit support for controller with 32-bit address bus so that
3717 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003718 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003719 if (!msm_host->pdata->largeaddressbus)
3720 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3721
Gilad Broner2a10ca02014-10-02 17:20:35 +03003722 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003723 /* keep track of the value in SDHCI_CAPABILITIES */
3724 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303725}
3726
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003727#ifdef CONFIG_MMC_CQ_HCI
3728static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3729 struct platform_device *pdev)
3730{
3731 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3732 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3733
3734 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003735 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003736 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3737 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003738 host->cq_host = NULL;
3739 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003740 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003741 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003742}
3743#else
3744static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3745 struct platform_device *pdev)
3746{
3747
3748}
3749#endif
3750
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003751static bool sdhci_msm_is_bootdevice(struct device *dev)
3752{
3753 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3754 strlen(saved_command_line))) {
3755 char search_string[50];
3756
3757 snprintf(search_string, ARRAY_SIZE(search_string),
3758 "androidboot.bootdevice=%s", dev_name(dev));
3759 if (strnstr(saved_command_line, search_string,
3760 strlen(saved_command_line)))
3761 return true;
3762 else
3763 return false;
3764 }
3765
3766 /*
3767 * "androidboot.bootdevice=" argument is not present then
3768 * return true as we don't know the boot device anyways.
3769 */
3770 return true;
3771}
3772
Asutosh Das0ef24812012-12-18 16:14:02 +05303773static int sdhci_msm_probe(struct platform_device *pdev)
3774{
3775 struct sdhci_host *host;
3776 struct sdhci_pltfm_host *pltfm_host;
3777 struct sdhci_msm_host *msm_host;
3778 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003779 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003780 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003781 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05303782 struct resource *tlmm_memres = NULL;
3783 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05303784 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05303785
3786 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3787 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3788 GFP_KERNEL);
3789 if (!msm_host) {
3790 ret = -ENOMEM;
3791 goto out;
3792 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303793
3794 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3795 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3796 if (IS_ERR(host)) {
3797 ret = PTR_ERR(host);
3798 goto out;
3799 }
3800
3801 pltfm_host = sdhci_priv(host);
3802 pltfm_host->priv = msm_host;
3803 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303804 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303805
3806 /* Extract platform data */
3807 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003808 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
3809 if (ret < 0) {
3810 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3811 ret);
3812 goto pltfm_free;
3813 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003814
3815 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003816 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3817 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003818 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003819 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003820
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003821 if (disable_slots & (1 << (ret - 1))) {
3822 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3823 ret);
3824 ret = -ENODEV;
3825 goto pltfm_free;
3826 }
3827
Venkat Gopalakrishnan976e8cb2015-10-23 16:46:29 -07003828 if (ret <= 2) {
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003829 sdhci_slot[ret-1] = msm_host;
Venkat Gopalakrishnan976e8cb2015-10-23 16:46:29 -07003830 host->slot_no = ret;
3831 }
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003832
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003833 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3834 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303835 if (!msm_host->pdata) {
3836 dev_err(&pdev->dev, "DT parsing error\n");
3837 goto pltfm_free;
3838 }
3839 } else {
3840 dev_err(&pdev->dev, "No device tree node\n");
3841 goto pltfm_free;
3842 }
3843
3844 /* Setup Clocks */
3845
3846 /* Setup SDCC bus voter clock. */
3847 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3848 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3849 /* Vote for max. clk rate for max. performance */
3850 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3851 if (ret)
3852 goto pltfm_free;
3853 ret = clk_prepare_enable(msm_host->bus_clk);
3854 if (ret)
3855 goto pltfm_free;
3856 }
3857
3858 /* Setup main peripheral bus clock */
3859 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3860 if (!IS_ERR(msm_host->pclk)) {
3861 ret = clk_prepare_enable(msm_host->pclk);
3862 if (ret)
3863 goto bus_clk_disable;
3864 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303865 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303866
3867 /* Setup SDC MMC clock */
3868 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3869 if (IS_ERR(msm_host->clk)) {
3870 ret = PTR_ERR(msm_host->clk);
3871 goto pclk_disable;
3872 }
3873
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303874 /* Set to the minimum supported clock frequency */
3875 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3876 if (ret) {
3877 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303878 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303879 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303880 ret = clk_prepare_enable(msm_host->clk);
3881 if (ret)
3882 goto pclk_disable;
3883
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303884 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303885 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303886
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003887 /* Setup CDC calibration fixed feedback clock */
3888 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3889 if (!IS_ERR(msm_host->ff_clk)) {
3890 ret = clk_prepare_enable(msm_host->ff_clk);
3891 if (ret)
3892 goto clk_disable;
3893 }
3894
3895 /* Setup CDC calibration sleep clock */
3896 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3897 if (!IS_ERR(msm_host->sleep_clk)) {
3898 ret = clk_prepare_enable(msm_host->sleep_clk);
3899 if (ret)
3900 goto ff_clk_disable;
3901 }
3902
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003903 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3904
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303905 ret = sdhci_msm_bus_register(msm_host, pdev);
3906 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003907 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303908
3909 if (msm_host->msm_bus_vote.client_handle)
3910 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3911 sdhci_msm_bus_work);
3912 sdhci_msm_bus_voting(host, 1);
3913
Asutosh Das0ef24812012-12-18 16:14:02 +05303914 /* Setup regulators */
3915 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3916 if (ret) {
3917 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303918 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303919 }
3920
3921 /* Reset the core and Enable SDHC mode */
3922 core_memres = platform_get_resource_byname(pdev,
3923 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303924 if (!core_memres) {
3925 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3926 goto vreg_deinit;
3927 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303928 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3929 resource_size(core_memres));
3930
3931 if (!msm_host->core_mem) {
3932 dev_err(&pdev->dev, "Failed to remap registers\n");
3933 ret = -ENOMEM;
3934 goto vreg_deinit;
3935 }
3936
Sahitya Tummala079ed852015-10-29 20:18:45 +05303937 tlmm_memres = platform_get_resource_byname(pdev,
3938 IORESOURCE_MEM, "tlmm_mem");
3939 if (tlmm_memres) {
3940 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
3941 resource_size(tlmm_memres));
3942
3943 if (!tlmm_mem) {
3944 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
3945 ret = -ENOMEM;
3946 goto vreg_deinit;
3947 }
3948 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
3949 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
3950 &tlmm_memres->start, readl_relaxed(tlmm_mem));
3951 }
3952
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303953 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003954 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303955 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003956 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3957 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303958
Asutosh Das0ef24812012-12-18 16:14:02 +05303959 /* Set HC_MODE_EN bit in HC_MODE register */
3960 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3961
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003962 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3963 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3964 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3965
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303966 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003967
3968 /*
3969 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3970 * be used as required later on.
3971 */
3972 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3973 CORE_IO_PAD_PWR_SWITCH_EN),
3974 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303975 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303976 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3977 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3978 * interrupt in GIC (by registering the interrupt handler), we need to
3979 * ensure that any pending power irq interrupt status is acknowledged
3980 * otherwise power irq interrupt handler would be fired prematurely.
3981 */
3982 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
3983 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
3984 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
3985 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
3986 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
3987 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
3988 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
3989 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07003990
Subhash Jadavani28137342013-05-14 17:46:43 +05303991 /*
3992 * Ensure that above writes are propogated before interrupt enablement
3993 * in GIC.
3994 */
3995 mb();
3996
3997 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05303998 * Following are the deviations from SDHC spec v3.0 -
3999 * 1. Card detection is handled using separate GPIO.
4000 * 2. Bus power control is handled by interacting with PMIC.
4001 */
4002 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4003 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304004 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004005 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304006 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304007 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304008 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304009 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das0ef24812012-12-18 16:14:02 +05304010
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304011 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4012 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4013
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004014 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004015 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4016 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4017 SDHCI_VENDOR_VER_SHIFT));
4018 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4019 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4020 /*
4021 * Add 40us delay in interrupt handler when
4022 * operating at initialization frequency(400KHz).
4023 */
4024 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4025 /*
4026 * Set Software Reset for DAT line in Software
4027 * Reset Register (Bit 2).
4028 */
4029 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4030 }
4031
Asutosh Das214b9662013-06-13 14:27:42 +05304032 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4033
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004034 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004035 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4036 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304037 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004038 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304039 goto vreg_deinit;
4040 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004041 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304042 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004043 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304044 if (ret) {
4045 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004046 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304047 goto vreg_deinit;
4048 }
4049
4050 /* Enable pwr irq interrupts */
4051 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
4052
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304053#ifdef CONFIG_MMC_CLKGATE
4054 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4055 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4056#endif
4057
Asutosh Das0ef24812012-12-18 16:14:02 +05304058 /* Set host capabilities */
4059 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4060 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004061 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304062 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304063 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004064 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4065 msm_host->mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004066 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004067 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304068 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004069 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004070 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304071 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304072
4073 if (msm_host->pdata->nonremovable)
4074 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4075
Guoping Yuf7c91332014-08-20 16:56:18 +08004076 if (msm_host->pdata->nonhotplug)
4077 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4078
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304079 init_completion(&msm_host->pwr_irq_completion);
4080
Sahitya Tummala581df132013-03-12 14:57:46 +05304081 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304082 /*
4083 * Set up the card detect GPIO in active configuration before
4084 * configuring it as an IRQ. Otherwise, it can be in some
4085 * weird/inconsistent state resulting in flood of interrupts.
4086 */
4087 sdhci_msm_setup_pins(msm_host->pdata, true);
4088
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304089 /*
4090 * This delay is needed for stabilizing the card detect GPIO
4091 * line after changing the pull configs.
4092 */
4093 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304094 ret = mmc_gpio_request_cd(msm_host->mmc,
4095 msm_host->pdata->status_gpio, 0);
4096 if (ret) {
4097 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4098 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304099 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304100 }
4101 }
4102
Krishna Konda7feab352013-09-17 23:55:40 -07004103 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4104 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4105 host->dma_mask = DMA_BIT_MASK(64);
4106 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304107 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004108 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304109 host->dma_mask = DMA_BIT_MASK(32);
4110 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304111 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304112 } else {
4113 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4114 }
4115
Ritesh Harjani42876f42015-11-17 17:46:51 +05304116 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4117 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304118 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304119 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4120 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304121 msm_host->is_sdiowakeup_enabled = true;
4122 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4123 sdhci_msm_sdiowakeup_irq,
4124 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4125 "sdhci-msm sdiowakeup", host);
4126 if (ret) {
4127 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4128 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4129 msm_host->pdata->sdiowakeup_irq = -1;
4130 msm_host->is_sdiowakeup_enabled = false;
4131 goto vreg_deinit;
4132 } else {
4133 spin_lock_irqsave(&host->lock, flags);
4134 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304135 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304136 spin_unlock_irqrestore(&host->lock, flags);
4137 }
4138 }
4139
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004140 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304141 ret = sdhci_add_host(host);
4142 if (ret) {
4143 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304144 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304145 }
4146
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004147 pm_runtime_set_active(&pdev->dev);
4148 pm_runtime_enable(&pdev->dev);
4149 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4150 pm_runtime_use_autosuspend(&pdev->dev);
4151
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304152 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4153 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4154 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4155 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4156 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4157 ret = device_create_file(&pdev->dev,
4158 &msm_host->msm_bus_vote.max_bus_bw);
4159 if (ret)
4160 goto remove_host;
4161
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304162 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4163 msm_host->polling.show = show_polling;
4164 msm_host->polling.store = store_polling;
4165 sysfs_attr_init(&msm_host->polling.attr);
4166 msm_host->polling.attr.name = "polling";
4167 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4168 ret = device_create_file(&pdev->dev, &msm_host->polling);
4169 if (ret)
4170 goto remove_max_bus_bw_file;
4171 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304172
4173 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4174 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4175 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4176 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4177 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4178 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4179 if (ret) {
4180 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4181 mmc_hostname(host->mmc), __func__, ret);
4182 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4183 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304184 /* Successful initialization */
4185 goto out;
4186
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304187remove_max_bus_bw_file:
4188 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304189remove_host:
4190 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004191 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304192 sdhci_remove_host(host, dead);
4193vreg_deinit:
4194 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304195bus_unregister:
4196 if (msm_host->msm_bus_vote.client_handle)
4197 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4198 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004199sleep_clk_disable:
4200 if (!IS_ERR(msm_host->sleep_clk))
4201 clk_disable_unprepare(msm_host->sleep_clk);
4202ff_clk_disable:
4203 if (!IS_ERR(msm_host->ff_clk))
4204 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304205clk_disable:
4206 if (!IS_ERR(msm_host->clk))
4207 clk_disable_unprepare(msm_host->clk);
4208pclk_disable:
4209 if (!IS_ERR(msm_host->pclk))
4210 clk_disable_unprepare(msm_host->pclk);
4211bus_clk_disable:
4212 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4213 clk_disable_unprepare(msm_host->bus_clk);
4214pltfm_free:
4215 sdhci_pltfm_free(pdev);
4216out:
4217 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4218 return ret;
4219}
4220
4221static int sdhci_msm_remove(struct platform_device *pdev)
4222{
4223 struct sdhci_host *host = platform_get_drvdata(pdev);
4224 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4225 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4226 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4227 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4228 0xffffffff);
4229
4230 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304231 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4232 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304233 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004234 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304235 sdhci_remove_host(host, dead);
4236 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304237
Asutosh Das0ef24812012-12-18 16:14:02 +05304238 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304239
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304240 sdhci_msm_setup_pins(pdata, true);
4241 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304242
4243 if (msm_host->msm_bus_vote.client_handle) {
4244 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4245 sdhci_msm_bus_unregister(msm_host);
4246 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304247 return 0;
4248}
4249
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004250#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304251static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4252{
4253 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4254 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4255 unsigned long flags;
4256 int ret = 0;
4257
4258 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4259 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4260 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304261 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304262 return 1;
4263 }
4264
4265 spin_lock_irqsave(&host->lock, flags);
4266 if (enable) {
4267 /* configure DAT1 gpio if applicable */
4268 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304269 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304270 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4271 if (!ret)
4272 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4273 goto out;
4274 } else {
4275 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4276 mmc_hostname(host->mmc), enable);
4277 }
4278 } else {
4279 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4280 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4281 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304282 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304283 } else {
4284 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4285 mmc_hostname(host->mmc), enable);
4286
4287 }
4288 }
4289out:
4290 if (ret)
4291 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4292 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4293 ret, msm_host->pdata->sdiowakeup_irq);
4294 spin_unlock_irqrestore(&host->lock, flags);
4295 return ret;
4296}
4297
4298
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004299static int sdhci_msm_runtime_suspend(struct device *dev)
4300{
4301 struct sdhci_host *host = dev_get_drvdata(dev);
4302 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4303 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004304 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004305
Ritesh Harjani42876f42015-11-17 17:46:51 +05304306 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4307 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304308
Ritesh Harjani42876f42015-11-17 17:46:51 +05304309 sdhci_cfg_irq(host, false, true);
4310
4311defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004312 disable_irq(msm_host->pwr_irq);
4313
4314 /*
4315 * Remove the vote immediately only if clocks are off in which
4316 * case we might have queued work to remove vote but it may not
4317 * be completed before runtime suspend or system suspend.
4318 */
4319 if (!atomic_read(&msm_host->clks_on)) {
4320 if (msm_host->msm_bus_vote.client_handle)
4321 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4322 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004323 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4324 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004325
4326 return 0;
4327}
4328
4329static int sdhci_msm_runtime_resume(struct device *dev)
4330{
4331 struct sdhci_host *host = dev_get_drvdata(dev);
4332 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4333 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004334 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004335
Ritesh Harjani42876f42015-11-17 17:46:51 +05304336 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4337 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304338
Ritesh Harjani42876f42015-11-17 17:46:51 +05304339 sdhci_cfg_irq(host, true, true);
4340
4341defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004342 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004343
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004344 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4345 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004346 return 0;
4347}
4348
4349static int sdhci_msm_suspend(struct device *dev)
4350{
4351 struct sdhci_host *host = dev_get_drvdata(dev);
4352 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4353 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004354 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304355 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004356 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004357
4358 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4359 (msm_host->mmc->slot.cd_irq >= 0))
4360 disable_irq(msm_host->mmc->slot.cd_irq);
4361
4362 if (pm_runtime_suspended(dev)) {
4363 pr_debug("%s: %s: already runtime suspended\n",
4364 mmc_hostname(host->mmc), __func__);
4365 goto out;
4366 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004367 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004368out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304369
4370 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4371 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4372 if (sdio_cfg)
4373 sdhci_cfg_irq(host, false, true);
4374 }
4375
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004376 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4377 ktime_to_us(ktime_sub(ktime_get(), start)));
4378 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004379}
4380
4381static int sdhci_msm_resume(struct device *dev)
4382{
4383 struct sdhci_host *host = dev_get_drvdata(dev);
4384 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4385 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4386 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304387 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004388 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004389
4390 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4391 (msm_host->mmc->slot.cd_irq >= 0))
4392 enable_irq(msm_host->mmc->slot.cd_irq);
4393
4394 if (pm_runtime_suspended(dev)) {
4395 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4396 mmc_hostname(host->mmc), __func__);
4397 goto out;
4398 }
4399
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004400 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004401out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304402 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4403 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4404 if (sdio_cfg)
4405 sdhci_cfg_irq(host, true, true);
4406 }
4407
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004408 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4409 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004410 return ret;
4411}
4412
Ritesh Harjani42876f42015-11-17 17:46:51 +05304413static int sdhci_msm_suspend_noirq(struct device *dev)
4414{
4415 struct sdhci_host *host = dev_get_drvdata(dev);
4416 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4417 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4418 int ret = 0;
4419
4420 /*
4421 * ksdioirqd may be running, hence retry
4422 * suspend in case the clocks are ON
4423 */
4424 if (atomic_read(&msm_host->clks_on)) {
4425 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4426 mmc_hostname(host->mmc), __func__);
4427 ret = -EAGAIN;
4428 }
4429
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304430 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4431 if (msm_host->sdio_pending_processing)
4432 ret = -EBUSY;
4433
Ritesh Harjani42876f42015-11-17 17:46:51 +05304434 return ret;
4435}
4436
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004437static const struct dev_pm_ops sdhci_msm_pmops = {
4438 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4439 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4440 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304441 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004442};
4443
4444#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4445
4446#else
4447#define SDHCI_MSM_PMOPS NULL
4448#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304449static const struct of_device_id sdhci_msm_dt_match[] = {
4450 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004451 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304452};
4453MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4454
4455static struct platform_driver sdhci_msm_driver = {
4456 .probe = sdhci_msm_probe,
4457 .remove = sdhci_msm_remove,
4458 .driver = {
4459 .name = "sdhci_msm",
4460 .owner = THIS_MODULE,
4461 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004462 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304463 },
4464};
4465
4466module_platform_driver(sdhci_msm_driver);
4467
4468MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4469MODULE_LICENSE("GPL v2");