blob: 3f30f0fa8ff8c7893a1704f1529f71e8af40ae82 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -08005 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080046#define CORE_POWER 0x0
47#define CORE_SW_RST (1 << 7)
48
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070049#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_MCI_DATA_CNT 0x30
51#define CORE_MCI_STATUS 0x34
52#define CORE_MCI_FIFO_CNT 0x44
53
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
62#define CORE_GENERICS 0x70
63#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Sahitya Tummala67717bc2013-08-02 09:21:37 +053072#define CORE_MCI_VERSION 0x050
73#define CORE_TESTBUS_CONFIG 0x0CC
74#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080075#define CORE_TESTBUS_SEL2_BIT 4
76#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053077
Asutosh Das0ef24812012-12-18 16:14:02 +053078#define CORE_PWRCTL_STATUS 0xDC
79#define CORE_PWRCTL_MASK 0xE0
80#define CORE_PWRCTL_CLEAR 0xE4
81#define CORE_PWRCTL_CTL 0xE8
82
83#define CORE_PWRCTL_BUS_OFF 0x01
84#define CORE_PWRCTL_BUS_ON (1 << 1)
85#define CORE_PWRCTL_IO_LOW (1 << 2)
86#define CORE_PWRCTL_IO_HIGH (1 << 3)
87
88#define CORE_PWRCTL_BUS_SUCCESS 0x01
89#define CORE_PWRCTL_BUS_FAIL (1 << 1)
90#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
91#define CORE_PWRCTL_IO_FAIL (1 << 3)
92
93#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094#define MAX_PHASES 16
95
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070096#define CORE_DLL_CONFIG 0x100
97#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070098#define CORE_DLL_EN (1 << 16)
99#define CORE_CDR_EN (1 << 17)
100#define CORE_CK_OUT_EN (1 << 18)
101#define CORE_CDR_EXT_EN (1 << 19)
102#define CORE_DLL_PDN (1 << 29)
103#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700104
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700106#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700107#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700108
109#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700110#define CORE_CLK_PWRSAVE (1 << 1)
111#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
112#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
113#define CORE_HC_MCLK_SEL_MASK (3 << 8)
114#define CORE_HC_AUTO_CMD21_EN (1 << 6)
115#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700116#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700117#define CORE_HC_SELECT_IN_EN (1 << 18)
118#define CORE_HC_SELECT_IN_HS400 (6 << 19)
119#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700120#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700121
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800122#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
123#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
124
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530125#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530126#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
127#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530128#define CORE_ONE_MID_EN (1 << 25)
129
Krishna Konda7feab352013-09-17 23:55:40 -0700130#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530131#define CORE_8_BIT_SUPPORT (1 << 18)
132#define CORE_3_3V_SUPPORT (1 << 24)
133#define CORE_3_0V_SUPPORT (1 << 25)
134#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300135#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700136
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800137#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_CTLR_CFG0 0x130
140#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
141#define CORE_HW_AUTOCAL_ENA (1 << 17)
142
143#define CORE_CSR_CDC_CTLR_CFG1 0x134
144#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
145#define CORE_TIMER_ENA (1 << 16)
146
147#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
148#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
149#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
150#define CORE_CDC_OFFSET_CFG 0x14C
151#define CORE_CSR_CDC_DELAY_CFG 0x150
152#define CORE_CDC_SLAVE_DDA_CFG 0x160
153#define CORE_CSR_CDC_STATUS0 0x164
154#define CORE_CALIBRATION_DONE (1 << 0)
155
156#define CORE_CDC_ERROR_CODE_MASK 0x7000000
157
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300158#define CQ_CMD_DBG_RAM 0x110
159#define CQ_CMD_DBG_RAM_WA 0x150
160#define CQ_CMD_DBG_RAM_OL 0x154
161
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700162#define CORE_CSR_CDC_GEN_CFG 0x178
163#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
164#define CORE_CDC_SWITCH_RC_EN (1 << 1)
165
166#define CORE_DDR_200_CFG 0x184
167#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530168#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700169#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530170
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700171#define CORE_VENDOR_SPEC3 0x1B0
172#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530173#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700174
175#define CORE_DLL_CONFIG_2 0x1B4
176#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800177#define CORE_FLL_CYCLE_CNT (1 << 18)
178#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700179
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530180#define CORE_DDR_CONFIG 0x1B8
181#define DDR_CONFIG_POR_VAL 0x80040853
182#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
183#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700184#define CORE_DDR_CONFIG_2 0x1BC
185#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700186
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700187/* 512 descriptors */
188#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530189#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530190
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700191#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800192#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700193
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700194#define INVALID_TUNING_PHASE -1
195
Krishna Konda96e6b112013-10-28 15:25:03 -0700196#define NUM_TUNING_PHASES 16
197#define MAX_DRV_TYPES_SUPPORTED_HS200 3
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200198#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700199
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700200static const u32 tuning_block_64[] = {
201 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
202 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
203 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
204 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
205};
206
207static const u32 tuning_block_128[] = {
208 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
209 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
210 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
211 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
212 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
213 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
214 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
215 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
216};
Asutosh Das0ef24812012-12-18 16:14:02 +0530217
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700218/* global to hold each slot instance for debug */
219static struct sdhci_msm_host *sdhci_slot[2];
220
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700221static int disable_slots;
222/* root can write, others read */
223module_param(disable_slots, int, S_IRUGO|S_IWUSR);
224
Asutosh Das0ef24812012-12-18 16:14:02 +0530225enum vdd_io_level {
226 /* set vdd_io_data->low_vol_level */
227 VDD_IO_LOW,
228 /* set vdd_io_data->high_vol_level */
229 VDD_IO_HIGH,
230 /*
231 * set whatever there in voltage_level (third argument) of
232 * sdhci_msm_set_vdd_io_vol() function.
233 */
234 VDD_IO_SET_LEVEL,
235};
236
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700237/* MSM platform specific tuning */
238static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
239 u8 poll)
240{
241 int rc = 0;
242 u32 wait_cnt = 50;
243 u8 ck_out_en = 0;
244 struct mmc_host *mmc = host->mmc;
245
246 /* poll for CK_OUT_EN bit. max. poll time = 50us */
247 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
248 CORE_CK_OUT_EN);
249
250 while (ck_out_en != poll) {
251 if (--wait_cnt == 0) {
252 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
253 mmc_hostname(mmc), __func__, poll);
254 rc = -ETIMEDOUT;
255 goto out;
256 }
257 udelay(1);
258
259 ck_out_en = !!(readl_relaxed(host->ioaddr +
260 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
261 }
262out:
263 return rc;
264}
265
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530266/*
267 * Enable CDR to track changes of DAT lines and adjust sampling
268 * point according to voltage/temperature variations
269 */
270static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
271{
272 int rc = 0;
273 u32 config;
274
275 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
276 config |= CORE_CDR_EN;
277 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
278 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
279
280 rc = msm_dll_poll_ck_out_en(host, 0);
281 if (rc)
282 goto err;
283
284 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
285 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
286
287 rc = msm_dll_poll_ck_out_en(host, 1);
288 if (rc)
289 goto err;
290 goto out;
291err:
292 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
293out:
294 return rc;
295}
296
297static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
298 *attr, const char *buf, size_t count)
299{
300 struct sdhci_host *host = dev_get_drvdata(dev);
301 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
302 struct sdhci_msm_host *msm_host = pltfm_host->priv;
303 u32 tmp;
304 unsigned long flags;
305
306 if (!kstrtou32(buf, 0, &tmp)) {
307 spin_lock_irqsave(&host->lock, flags);
308 msm_host->en_auto_cmd21 = !!tmp;
309 spin_unlock_irqrestore(&host->lock, flags);
310 }
311 return count;
312}
313
314static ssize_t show_auto_cmd21(struct device *dev,
315 struct device_attribute *attr, char *buf)
316{
317 struct sdhci_host *host = dev_get_drvdata(dev);
318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
319 struct sdhci_msm_host *msm_host = pltfm_host->priv;
320
321 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
322}
323
324/* MSM auto-tuning handler */
325static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
326 bool enable,
327 u32 type)
328{
329 int rc = 0;
330 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
331 struct sdhci_msm_host *msm_host = pltfm_host->priv;
332 u32 val = 0;
333
334 if (!msm_host->en_auto_cmd21)
335 return 0;
336
337 if (type == MMC_SEND_TUNING_BLOCK_HS200)
338 val = CORE_HC_AUTO_CMD21_EN;
339 else
340 return 0;
341
342 if (enable) {
343 rc = msm_enable_cdr_cm_sdc4_dll(host);
344 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
345 val, host->ioaddr + CORE_VENDOR_SPEC);
346 } else {
347 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
348 ~val, host->ioaddr + CORE_VENDOR_SPEC);
349 }
350 return rc;
351}
352
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700353static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
354{
355 int rc = 0;
356 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
357 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
358 0x8};
359 unsigned long flags;
360 u32 config;
361 struct mmc_host *mmc = host->mmc;
362
363 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
364 spin_lock_irqsave(&host->lock, flags);
365
366 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
367 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
368 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
369 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
370
371 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
372 rc = msm_dll_poll_ck_out_en(host, 0);
373 if (rc)
374 goto err_out;
375
376 /*
377 * Write the selected DLL clock output phase (0 ... 15)
378 * to CDR_SELEXT bit field of DLL_CONFIG register.
379 */
380 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
381 & ~(0xF << 20))
382 | (grey_coded_phase_table[phase] << 20)),
383 host->ioaddr + CORE_DLL_CONFIG);
384
385 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
386 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
387 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
388
389 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
390 rc = msm_dll_poll_ck_out_en(host, 1);
391 if (rc)
392 goto err_out;
393
394 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
395 config |= CORE_CDR_EN;
396 config &= ~CORE_CDR_EXT_EN;
397 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
398 goto out;
399
400err_out:
401 pr_err("%s: %s: Failed to set DLL phase: %d\n",
402 mmc_hostname(mmc), __func__, phase);
403out:
404 spin_unlock_irqrestore(&host->lock, flags);
405 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
406 return rc;
407}
408
409/*
410 * Find out the greatest range of consecuitive selected
411 * DLL clock output phases that can be used as sampling
412 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700413 * timing mode) or for eMMC4.5 card read operation (in
414 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700415 * Select the 3/4 of the range and configure the DLL with the
416 * selected DLL clock output phase.
417 */
418
419static int msm_find_most_appropriate_phase(struct sdhci_host *host,
420 u8 *phase_table, u8 total_phases)
421{
422 int ret;
423 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
424 u8 phases_per_row[MAX_PHASES] = {0};
425 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
426 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
427 bool phase_0_found = false, phase_15_found = false;
428 struct mmc_host *mmc = host->mmc;
429
430 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
431 if (!total_phases || (total_phases > MAX_PHASES)) {
432 pr_err("%s: %s: invalid argument: total_phases=%d\n",
433 mmc_hostname(mmc), __func__, total_phases);
434 return -EINVAL;
435 }
436
437 for (cnt = 0; cnt < total_phases; cnt++) {
438 ranges[row_index][col_index] = phase_table[cnt];
439 phases_per_row[row_index] += 1;
440 col_index++;
441
442 if ((cnt + 1) == total_phases) {
443 continue;
444 /* check if next phase in phase_table is consecutive or not */
445 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
446 row_index++;
447 col_index = 0;
448 }
449 }
450
451 if (row_index >= MAX_PHASES)
452 return -EINVAL;
453
454 /* Check if phase-0 is present in first valid window? */
455 if (!ranges[0][0]) {
456 phase_0_found = true;
457 phase_0_raw_index = 0;
458 /* Check if cycle exist between 2 valid windows */
459 for (cnt = 1; cnt <= row_index; cnt++) {
460 if (phases_per_row[cnt]) {
461 for (i = 0; i < phases_per_row[cnt]; i++) {
462 if (ranges[cnt][i] == 15) {
463 phase_15_found = true;
464 phase_15_raw_index = cnt;
465 break;
466 }
467 }
468 }
469 }
470 }
471
472 /* If 2 valid windows form cycle then merge them as single window */
473 if (phase_0_found && phase_15_found) {
474 /* number of phases in raw where phase 0 is present */
475 u8 phases_0 = phases_per_row[phase_0_raw_index];
476 /* number of phases in raw where phase 15 is present */
477 u8 phases_15 = phases_per_row[phase_15_raw_index];
478
479 if (phases_0 + phases_15 >= MAX_PHASES)
480 /*
481 * If there are more than 1 phase windows then total
482 * number of phases in both the windows should not be
483 * more than or equal to MAX_PHASES.
484 */
485 return -EINVAL;
486
487 /* Merge 2 cyclic windows */
488 i = phases_15;
489 for (cnt = 0; cnt < phases_0; cnt++) {
490 ranges[phase_15_raw_index][i] =
491 ranges[phase_0_raw_index][cnt];
492 if (++i >= MAX_PHASES)
493 break;
494 }
495
496 phases_per_row[phase_0_raw_index] = 0;
497 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
498 }
499
500 for (cnt = 0; cnt <= row_index; cnt++) {
501 if (phases_per_row[cnt] > curr_max) {
502 curr_max = phases_per_row[cnt];
503 selected_row_index = cnt;
504 }
505 }
506
507 i = ((curr_max * 3) / 4);
508 if (i)
509 i--;
510
511 ret = (int)ranges[selected_row_index][i];
512
513 if (ret >= MAX_PHASES) {
514 ret = -EINVAL;
515 pr_err("%s: %s: invalid phase selected=%d\n",
516 mmc_hostname(mmc), __func__, ret);
517 }
518
519 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
520 return ret;
521}
522
523static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
524{
525 u32 mclk_freq = 0;
526
527 /* Program the MCLK value to MCLK_FREQ bit field */
528 if (host->clock <= 112000000)
529 mclk_freq = 0;
530 else if (host->clock <= 125000000)
531 mclk_freq = 1;
532 else if (host->clock <= 137000000)
533 mclk_freq = 2;
534 else if (host->clock <= 150000000)
535 mclk_freq = 3;
536 else if (host->clock <= 162000000)
537 mclk_freq = 4;
538 else if (host->clock <= 175000000)
539 mclk_freq = 5;
540 else if (host->clock <= 187000000)
541 mclk_freq = 6;
542 else if (host->clock <= 200000000)
543 mclk_freq = 7;
544
545 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
546 & ~(7 << 24)) | (mclk_freq << 24)),
547 host->ioaddr + CORE_DLL_CONFIG);
548}
549
550/* Initialize the DLL (Programmable Delay Line ) */
551static int msm_init_cm_dll(struct sdhci_host *host)
552{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
554 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700555 struct mmc_host *mmc = host->mmc;
556 int rc = 0;
557 unsigned long flags;
558 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530559 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700560
561 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
562 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530563 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
564 CORE_CLK_PWRSAVE);
565 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700566 /*
567 * Make sure that clock is always enabled when DLL
568 * tuning is in progress. Keeping PWRSAVE ON may
569 * turn off the clock. So let's disable the PWRSAVE
570 * here and re-enable it once tuning is completed.
571 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530572 if (prev_pwrsave) {
573 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
574 & ~CORE_CLK_PWRSAVE),
575 host->ioaddr + CORE_VENDOR_SPEC);
576 curr_pwrsave = false;
577 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700578
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800579 if (msm_host->use_updated_dll_reset) {
580 /* Disable the DLL clock */
581 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
582 & ~CORE_CK_OUT_EN),
583 host->ioaddr + CORE_DLL_CONFIG);
584
585 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
586 | CORE_DLL_CLOCK_DISABLE),
587 host->ioaddr + CORE_DLL_CONFIG_2);
588 }
589
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700590 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
591 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
592 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
593
594 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
595 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
596 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
597 msm_cm_dll_set_freq(host);
598
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800599 if (msm_host->use_updated_dll_reset) {
600 u32 mclk_freq = 0;
601
602 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
603 & CORE_FLL_CYCLE_CNT))
604 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
605 else
606 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
607
608 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
609 & ~(0xFF << 10)) | (mclk_freq << 10)),
610 host->ioaddr + CORE_DLL_CONFIG_2);
611 /* wait for 5us before enabling DLL clock */
612 udelay(5);
613 }
614
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700615 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
616 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
617 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
618
619 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
620 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
621 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
622
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800623 if (msm_host->use_updated_dll_reset) {
624 msm_cm_dll_set_freq(host);
625 /* Enable the DLL clock */
626 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
627 & ~CORE_DLL_CLOCK_DISABLE),
628 host->ioaddr + CORE_DLL_CONFIG_2);
629 }
630
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700631 /* Set DLL_EN bit to 1. */
632 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
633 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
634
635 /* Set CK_OUT_EN bit to 1. */
636 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
637 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
638
639 wait_cnt = 50;
640 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
641 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
642 CORE_DLL_LOCK)) {
643 /* max. wait for 50us sec for LOCK bit to be set */
644 if (--wait_cnt == 0) {
645 pr_err("%s: %s: DLL failed to LOCK\n",
646 mmc_hostname(mmc), __func__);
647 rc = -ETIMEDOUT;
648 goto out;
649 }
650 /* wait for 1us before polling again */
651 udelay(1);
652 }
653
654out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530655 /* Restore the correct PWRSAVE state */
656 if (prev_pwrsave ^ curr_pwrsave) {
657 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
658
659 if (prev_pwrsave)
660 reg |= CORE_CLK_PWRSAVE;
661 else
662 reg &= ~CORE_CLK_PWRSAVE;
663
664 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
665 }
666
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700667 spin_unlock_irqrestore(&host->lock, flags);
668 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
669 return rc;
670}
671
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700672static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
673{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700674 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700675 int ret = 0;
676 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700677
678 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
679
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700680 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
681 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
682 & ~CORE_CDC_T4_DLY_SEL),
683 host->ioaddr + CORE_DDR_200_CFG);
684
685 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
686 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
687 & ~CORE_CDC_SWITCH_BYPASS_OFF),
688 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
689
690 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
691 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
692 | CORE_CDC_SWITCH_RC_EN),
693 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
694
695 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
696 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
697 & ~CORE_START_CDC_TRAFFIC),
698 host->ioaddr + CORE_DDR_200_CFG);
699
700 /*
701 * Perform CDC Register Initialization Sequence
702 *
703 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
704 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
705 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
706 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
707 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
708 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
709 * CORE_CSR_CDC_DELAY_CFG 0x3AC
710 * CORE_CDC_OFFSET_CFG 0x0
711 * CORE_CDC_SLAVE_DDA_CFG 0x16334
712 */
713
714 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
715 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
716 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
717 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
718 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
719 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700720 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700721 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
722 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
723
724 /* CDC HW Calibration */
725
726 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
727 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
728 | CORE_SW_TRIG_FULL_CALIB),
729 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
730
731 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
732 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
733 & ~CORE_SW_TRIG_FULL_CALIB),
734 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
735
736 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
737 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
738 | CORE_HW_AUTOCAL_ENA),
739 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
740
741 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
742 writel_relaxed((readl_relaxed(host->ioaddr +
743 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
744 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
745
746 mb();
747
748 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700749 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
750 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
751
752 if (ret == -ETIMEDOUT) {
753 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700754 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700755 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700756 }
757
758 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
759 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
760 & CORE_CDC_ERROR_CODE_MASK;
761 if (cdc_err) {
762 pr_err("%s: %s: CDC Error Code %d\n",
763 mmc_hostname(host->mmc), __func__, cdc_err);
764 ret = -EINVAL;
765 goto out;
766 }
767
768 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
769 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
770 | CORE_START_CDC_TRAFFIC),
771 host->ioaddr + CORE_DDR_200_CFG);
772out:
773 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
774 __func__, ret);
775 return ret;
776}
777
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700778static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
779{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
781 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530782 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700783 int ret = 0;
784
785 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
786
787 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530788 * Reprogramming the value in case it might have been modified by
789 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700790 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700791 if (msm_host->rclk_delay_fix) {
792 writel_relaxed(DDR_CONFIG_2_POR_VAL,
793 host->ioaddr + CORE_DDR_CONFIG_2);
794 } else {
795 ddr_config = DDR_CONFIG_POR_VAL &
796 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
797 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
798 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
799 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700800
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530801 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530802 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
803 | CORE_CMDIN_RCLK_EN),
804 host->ioaddr + CORE_DDR_200_CFG);
805
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700806 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
807 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
808 | CORE_DDR_CAL_EN),
809 host->ioaddr + CORE_DLL_CONFIG_2);
810
811 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
812 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
813 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
814
815 if (ret == -ETIMEDOUT) {
816 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
817 mmc_hostname(host->mmc), __func__);
818 goto out;
819 }
820
Ritesh Harjani764065e2015-05-13 14:14:45 +0530821 /*
822 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
823 * when MCLK is gated OFF, it is not gated for less than 0.5us
824 * and MCLK must be switched on for at-least 1us before DATA
825 * starts coming. Controllers with 14lpp tech DLL cannot
826 * guarantee above requirement. So PWRSAVE_DLL should not be
827 * turned on for host controllers using this DLL.
828 */
829 if (!msm_host->use_14lpp_dll)
830 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
831 | CORE_PWRSAVE_DLL),
832 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700833 mb();
834out:
835 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
836 __func__, ret);
837 return ret;
838}
839
Ritesh Harjaniea709662015-05-27 15:40:24 +0530840static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
841{
842 int ret = 0;
843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
844 struct sdhci_msm_host *msm_host = pltfm_host->priv;
845 struct mmc_host *mmc = host->mmc;
846
847 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
848
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530849 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
850 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530851 mmc_hostname(mmc));
852 return -EINVAL;
853 }
854
855 if (msm_host->calibration_done ||
856 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
857 return 0;
858 }
859
860 /*
861 * Reset the tuning block.
862 */
863 ret = msm_init_cm_dll(host);
864 if (ret)
865 goto out;
866
867 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
868out:
869 if (!ret)
870 msm_host->calibration_done = true;
871 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
872 __func__, ret);
873 return ret;
874}
875
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700876static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
877{
878 int ret = 0;
879 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
880 struct sdhci_msm_host *msm_host = pltfm_host->priv;
881
882 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
883
884 /*
885 * Retuning in HS400 (DDR mode) will fail, just reset the
886 * tuning block and restore the saved tuning phase.
887 */
888 ret = msm_init_cm_dll(host);
889 if (ret)
890 goto out;
891
892 /* Set the selected phase in delay line hw block */
893 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
894 if (ret)
895 goto out;
896
Krishna Konda0e8efba2014-06-23 14:50:38 -0700897 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
898 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
899 | CORE_CMD_DAT_TRACK_SEL),
900 host->ioaddr + CORE_DLL_CONFIG);
901
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700902 if (msm_host->use_cdclp533)
903 /* Calibrate CDCLP533 DLL HW */
904 ret = sdhci_msm_cdclp533_calibration(host);
905 else
906 /* Calibrate CM_DLL_SDC4 HW */
907 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
908out:
909 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
910 __func__, ret);
911 return ret;
912}
913
Krishna Konda96e6b112013-10-28 15:25:03 -0700914static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
915 u8 drv_type)
916{
917 struct mmc_command cmd = {0};
918 struct mmc_request mrq = {NULL};
919 struct mmc_host *mmc = host->mmc;
920 u8 val = ((drv_type << 4) | 2);
921
922 cmd.opcode = MMC_SWITCH;
923 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
924 (EXT_CSD_HS_TIMING << 16) |
925 (val << 8) |
926 EXT_CSD_CMD_SET_NORMAL;
927 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
928 /* 1 sec */
929 cmd.busy_timeout = 1000 * 1000;
930
931 memset(cmd.resp, 0, sizeof(cmd.resp));
932 cmd.retries = 3;
933
934 mrq.cmd = &cmd;
935 cmd.data = NULL;
936
937 mmc_wait_for_req(mmc, &mrq);
938 pr_debug("%s: %s: set card drive type to %d\n",
939 mmc_hostname(mmc), __func__,
940 drv_type);
941}
942
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700943int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
944{
945 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530946 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700947 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700948 const u32 *tuning_block_pattern = tuning_block_64;
949 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
950 int rc;
951 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530952 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700955 u8 drv_type = 0;
956 bool drv_type_changed = false;
957 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530958 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530959
960 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700961 * Tuning is required for SDR104, HS200 and HS400 cards and
962 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530963 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700964 if (host->clock <= CORE_FREQ_100MHZ ||
965 !((ios.timing == MMC_TIMING_MMC_HS400) ||
966 (ios.timing == MMC_TIMING_MMC_HS200) ||
967 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530968 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700969
970 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700971
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700972 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700973 if (msm_host->tuning_done && !msm_host->calibration_done &&
974 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700975 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700976 spin_lock_irqsave(&host->lock, flags);
977 if (!rc)
978 msm_host->calibration_done = true;
979 spin_unlock_irqrestore(&host->lock, flags);
980 goto out;
981 }
982
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700983 spin_lock_irqsave(&host->lock, flags);
984
985 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
986 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
987 tuning_block_pattern = tuning_block_128;
988 size = sizeof(tuning_block_128);
989 }
990 spin_unlock_irqrestore(&host->lock, flags);
991
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700992 data_buf = kmalloc(size, GFP_KERNEL);
993 if (!data_buf) {
994 rc = -ENOMEM;
995 goto out;
996 }
997
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530998retry:
Krishna Konda96e6b112013-10-28 15:25:03 -0700999 tuned_phase_cnt = 0;
1000
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301001 /* first of all reset the tuning block */
1002 rc = msm_init_cm_dll(host);
1003 if (rc)
1004 goto kfree;
1005
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001006 phase = 0;
1007 do {
1008 struct mmc_command cmd = {0};
1009 struct mmc_data data = {0};
1010 struct mmc_request mrq = {
1011 .cmd = &cmd,
1012 .data = &data
1013 };
1014 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301015 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001016
1017 /* set the phase in delay line hw block */
1018 rc = msm_config_cm_dll_phase(host, phase);
1019 if (rc)
1020 goto kfree;
1021
1022 cmd.opcode = opcode;
1023 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1024
1025 data.blksz = size;
1026 data.blocks = 1;
1027 data.flags = MMC_DATA_READ;
1028 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1029
1030 data.sg = &sg;
1031 data.sg_len = 1;
1032 sg_init_one(&sg, data_buf, size);
1033 memset(data_buf, 0, size);
1034 mmc_wait_for_req(mmc, &mrq);
1035
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301036 if (card && (cmd.error || data.error)) {
1037 sts_cmd.opcode = MMC_SEND_STATUS;
1038 sts_cmd.arg = card->rca << 16;
1039 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1040 sts_retry = 5;
1041 while (sts_retry) {
1042 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1043
1044 if (sts_cmd.error ||
1045 (R1_CURRENT_STATE(sts_cmd.resp[0])
1046 != R1_STATE_TRAN)) {
1047 sts_retry--;
1048 /*
1049 * wait for at least 146 MCLK cycles for
1050 * the card to move to TRANS state. As
1051 * the MCLK would be min 200MHz for
1052 * tuning, we need max 0.73us delay. To
1053 * be on safer side 1ms delay is given.
1054 */
1055 usleep_range(1000, 1200);
1056 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1057 mmc_hostname(mmc), phase,
1058 sts_cmd.error, sts_cmd.resp[0]);
1059 continue;
1060 }
1061 break;
1062 };
1063 }
1064
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001065 if (!cmd.error && !data.error &&
1066 !memcmp(data_buf, tuning_block_pattern, size)) {
1067 /* tuning is successful at this tuning point */
1068 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001069 pr_debug("%s: %s: found *** good *** phase = %d\n",
1070 mmc_hostname(mmc), __func__, phase);
1071 } else {
1072 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001073 mmc_hostname(mmc), __func__, phase);
1074 }
1075 } while (++phase < 16);
1076
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301077 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1078 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001079 /*
1080 * If all phases pass then its a problem. So change the card's
1081 * drive type to a different value, if supported and repeat
1082 * tuning until at least one phase fails. Then set the original
1083 * drive type back.
1084 *
1085 * If all the phases still pass after trying all possible
1086 * drive types, then one of those 16 phases will be picked.
1087 * This is no different from what was going on before the
1088 * modification to change drive type and retune.
1089 */
1090 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1091 tuned_phase_cnt);
1092
1093 /* set drive type to other value . default setting is 0x0 */
1094 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
1095 if (card->ext_csd.raw_driver_strength &
1096 (1 << drv_type)) {
1097 sdhci_msm_set_mmc_drv_type(host, opcode,
1098 drv_type);
1099 if (!drv_type_changed)
1100 drv_type_changed = true;
1101 goto retry;
1102 }
1103 }
1104 }
1105
1106 /* reset drive type to default (50 ohm) if changed */
1107 if (drv_type_changed)
1108 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1109
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001110 if (tuned_phase_cnt) {
1111 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1112 tuned_phase_cnt);
1113 if (rc < 0)
1114 goto kfree;
1115 else
1116 phase = (u8)rc;
1117
1118 /*
1119 * Finally set the selected phase in delay
1120 * line hw block.
1121 */
1122 rc = msm_config_cm_dll_phase(host, phase);
1123 if (rc)
1124 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001125 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001126 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1127 mmc_hostname(mmc), __func__, phase);
1128 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301129 if (--tuning_seq_cnt)
1130 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001131 /* tuning failed */
1132 pr_err("%s: %s: no tuning point found\n",
1133 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301134 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001135 }
1136
1137kfree:
1138 kfree(data_buf);
1139out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001140 spin_lock_irqsave(&host->lock, flags);
1141 if (!rc)
1142 msm_host->tuning_done = true;
1143 spin_unlock_irqrestore(&host->lock, flags);
1144 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001145 return rc;
1146}
1147
Asutosh Das0ef24812012-12-18 16:14:02 +05301148static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1149{
1150 struct sdhci_msm_gpio_data *curr;
1151 int i, ret = 0;
1152
1153 curr = pdata->pin_data->gpio_data;
1154 for (i = 0; i < curr->size; i++) {
1155 if (!gpio_is_valid(curr->gpio[i].no)) {
1156 ret = -EINVAL;
1157 pr_err("%s: Invalid gpio = %d\n", __func__,
1158 curr->gpio[i].no);
1159 goto free_gpios;
1160 }
1161 if (enable) {
1162 ret = gpio_request(curr->gpio[i].no,
1163 curr->gpio[i].name);
1164 if (ret) {
1165 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1166 __func__, curr->gpio[i].no,
1167 curr->gpio[i].name, ret);
1168 goto free_gpios;
1169 }
1170 curr->gpio[i].is_enabled = true;
1171 } else {
1172 gpio_free(curr->gpio[i].no);
1173 curr->gpio[i].is_enabled = false;
1174 }
1175 }
1176 return ret;
1177
1178free_gpios:
1179 for (i--; i >= 0; i--) {
1180 gpio_free(curr->gpio[i].no);
1181 curr->gpio[i].is_enabled = false;
1182 }
1183 return ret;
1184}
1185
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301186static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1187 bool enable)
1188{
1189 int ret = 0;
1190
1191 if (enable)
1192 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1193 pdata->pctrl_data->pins_active);
1194 else
1195 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1196 pdata->pctrl_data->pins_sleep);
1197
1198 if (ret < 0)
1199 pr_err("%s state for pinctrl failed with %d\n",
1200 enable ? "Enabling" : "Disabling", ret);
1201
1202 return ret;
1203}
1204
Asutosh Das0ef24812012-12-18 16:14:02 +05301205static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1206{
1207 int ret = 0;
1208
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301209 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301210 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301211 } else if (pdata->pctrl_data) {
1212 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1213 goto out;
1214 } else if (!pdata->pin_data) {
1215 return 0;
1216 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301217
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301218 if (pdata->pin_data->is_gpio)
1219 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301220out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301221 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301222 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301223
1224 return ret;
1225}
1226
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301227static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1228 u32 **out, int *len, u32 size)
1229{
1230 int ret = 0;
1231 struct device_node *np = dev->of_node;
1232 size_t sz;
1233 u32 *arr = NULL;
1234
1235 if (!of_get_property(np, prop_name, len)) {
1236 ret = -EINVAL;
1237 goto out;
1238 }
1239 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001240 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301241 dev_err(dev, "%s invalid size\n", prop_name);
1242 ret = -EINVAL;
1243 goto out;
1244 }
1245
1246 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1247 if (!arr) {
1248 dev_err(dev, "%s failed allocating memory\n", prop_name);
1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252
1253 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1254 if (ret < 0) {
1255 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1256 goto out;
1257 }
1258 *out = arr;
1259out:
1260 if (ret)
1261 *len = 0;
1262 return ret;
1263}
1264
Asutosh Das0ef24812012-12-18 16:14:02 +05301265#define MAX_PROP_SIZE 32
1266static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1267 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1268{
1269 int len, ret = 0;
1270 const __be32 *prop;
1271 char prop_name[MAX_PROP_SIZE];
1272 struct sdhci_msm_reg_data *vreg;
1273 struct device_node *np = dev->of_node;
1274
1275 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1276 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301277 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301278 return ret;
1279 }
1280
1281 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1282 if (!vreg) {
1283 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1284 ret = -ENOMEM;
1285 return ret;
1286 }
1287
1288 vreg->name = vreg_name;
1289
1290 snprintf(prop_name, MAX_PROP_SIZE,
1291 "qcom,%s-always-on", vreg_name);
1292 if (of_get_property(np, prop_name, NULL))
1293 vreg->is_always_on = true;
1294
1295 snprintf(prop_name, MAX_PROP_SIZE,
1296 "qcom,%s-lpm-sup", vreg_name);
1297 if (of_get_property(np, prop_name, NULL))
1298 vreg->lpm_sup = true;
1299
1300 snprintf(prop_name, MAX_PROP_SIZE,
1301 "qcom,%s-voltage-level", vreg_name);
1302 prop = of_get_property(np, prop_name, &len);
1303 if (!prop || (len != (2 * sizeof(__be32)))) {
1304 dev_warn(dev, "%s %s property\n",
1305 prop ? "invalid format" : "no", prop_name);
1306 } else {
1307 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1308 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1309 }
1310
1311 snprintf(prop_name, MAX_PROP_SIZE,
1312 "qcom,%s-current-level", vreg_name);
1313 prop = of_get_property(np, prop_name, &len);
1314 if (!prop || (len != (2 * sizeof(__be32)))) {
1315 dev_warn(dev, "%s %s property\n",
1316 prop ? "invalid format" : "no", prop_name);
1317 } else {
1318 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1319 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1320 }
1321
1322 *vreg_data = vreg;
1323 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1324 vreg->name, vreg->is_always_on ? "always_on," : "",
1325 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1326 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1327
1328 return ret;
1329}
1330
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301331static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1332 struct sdhci_msm_pltfm_data *pdata)
1333{
1334 struct sdhci_pinctrl_data *pctrl_data;
1335 struct pinctrl *pctrl;
1336 int ret = 0;
1337
1338 /* Try to obtain pinctrl handle */
1339 pctrl = devm_pinctrl_get(dev);
1340 if (IS_ERR(pctrl)) {
1341 ret = PTR_ERR(pctrl);
1342 goto out;
1343 }
1344 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1345 if (!pctrl_data) {
1346 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1347 ret = -ENOMEM;
1348 goto out;
1349 }
1350 pctrl_data->pctrl = pctrl;
1351 /* Look-up and keep the states handy to be used later */
1352 pctrl_data->pins_active = pinctrl_lookup_state(
1353 pctrl_data->pctrl, "active");
1354 if (IS_ERR(pctrl_data->pins_active)) {
1355 ret = PTR_ERR(pctrl_data->pins_active);
1356 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1357 goto out;
1358 }
1359 pctrl_data->pins_sleep = pinctrl_lookup_state(
1360 pctrl_data->pctrl, "sleep");
1361 if (IS_ERR(pctrl_data->pins_sleep)) {
1362 ret = PTR_ERR(pctrl_data->pins_sleep);
1363 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1364 goto out;
1365 }
1366 pdata->pctrl_data = pctrl_data;
1367out:
1368 return ret;
1369}
1370
Asutosh Das0ef24812012-12-18 16:14:02 +05301371#define GPIO_NAME_MAX_LEN 32
1372static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1373 struct sdhci_msm_pltfm_data *pdata)
1374{
1375 int ret = 0, cnt, i;
1376 struct sdhci_msm_pin_data *pin_data;
1377 struct device_node *np = dev->of_node;
1378
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301379 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1380 if (!ret) {
1381 goto out;
1382 } else if (ret == -EPROBE_DEFER) {
1383 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1384 goto out;
1385 } else {
1386 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1387 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301388 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301389 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301390 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1391 if (!pin_data) {
1392 dev_err(dev, "No memory for pin_data\n");
1393 ret = -ENOMEM;
1394 goto out;
1395 }
1396
1397 cnt = of_gpio_count(np);
1398 if (cnt > 0) {
1399 pin_data->gpio_data = devm_kzalloc(dev,
1400 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1401 if (!pin_data->gpio_data) {
1402 dev_err(dev, "No memory for gpio_data\n");
1403 ret = -ENOMEM;
1404 goto out;
1405 }
1406 pin_data->gpio_data->size = cnt;
1407 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1408 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1409
1410 if (!pin_data->gpio_data->gpio) {
1411 dev_err(dev, "No memory for gpio\n");
1412 ret = -ENOMEM;
1413 goto out;
1414 }
1415
1416 for (i = 0; i < cnt; i++) {
1417 const char *name = NULL;
1418 char result[GPIO_NAME_MAX_LEN];
1419 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1420 of_property_read_string_index(np,
1421 "qcom,gpio-names", i, &name);
1422
1423 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1424 dev_name(dev), name ? name : "?");
1425 pin_data->gpio_data->gpio[i].name = result;
1426 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1427 pin_data->gpio_data->gpio[i].name,
1428 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301429 }
1430 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301431 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301432out:
1433 if (ret)
1434 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1435 return ret;
1436}
1437
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001438#ifdef CONFIG_SMP
1439static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1440{
1441 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1442}
1443#else
1444static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1445#endif
1446
Gilad Bronerc788a672015-09-08 15:39:11 +03001447static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1448 struct sdhci_msm_pltfm_data *pdata)
1449{
1450 struct device_node *np = dev->of_node;
1451 const char *str;
1452 u32 cpu;
1453 int ret = 0;
1454 int i;
1455
1456 pdata->pm_qos_data.irq_valid = false;
1457 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1458 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1459 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001460 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001461 }
1462
1463 /* must specify cpu for "affine_cores" type */
1464 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1465 pdata->pm_qos_data.irq_cpu = -1;
1466 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1467 if (ret) {
1468 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1469 ret);
1470 goto out;
1471 }
1472 if (cpu < 0 || cpu >= num_possible_cpus()) {
1473 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1474 __func__, cpu, num_possible_cpus());
1475 ret = -EINVAL;
1476 goto out;
1477 }
1478 pdata->pm_qos_data.irq_cpu = cpu;
1479 }
1480
1481 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1482 SDHCI_POWER_POLICY_NUM) {
1483 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1484 __func__, SDHCI_POWER_POLICY_NUM);
1485 ret = -EINVAL;
1486 goto out;
1487 }
1488
1489 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1490 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1491 &pdata->pm_qos_data.irq_latency.latency[i]);
1492
1493 pdata->pm_qos_data.irq_valid = true;
1494out:
1495 return ret;
1496}
1497
1498static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1499 struct sdhci_msm_pltfm_data *pdata)
1500{
1501 struct device_node *np = dev->of_node;
1502 u32 mask;
1503 int nr_groups;
1504 int ret;
1505 int i;
1506
1507 /* Read cpu group mapping */
1508 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1509 if (nr_groups <= 0) {
1510 ret = -EINVAL;
1511 goto out;
1512 }
1513 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1514 pdata->pm_qos_data.cpu_group_map.mask =
1515 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1516 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1517 ret = -ENOMEM;
1518 goto out;
1519 }
1520
1521 for (i = 0; i < nr_groups; i++) {
1522 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1523 i, &mask);
1524
1525 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1526 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1527 cpu_possible_mask)) {
1528 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1529 __func__, mask, i);
1530 ret = -EINVAL;
1531 goto free_res;
1532 }
1533 }
1534 return 0;
1535
1536free_res:
1537 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1538out:
1539 return ret;
1540}
1541
1542static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1543 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1544{
1545 struct device_node *np = dev->of_node;
1546 struct sdhci_msm_pm_qos_latency *values;
1547 int ret;
1548 int i;
1549 int group;
1550 int cfg;
1551
1552 ret = of_property_count_u32_elems(np, name);
1553 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1554 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1555 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1556 ret);
1557 return -EINVAL;
1558 } else if (ret < 0) {
1559 return ret;
1560 }
1561
1562 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1563 GFP_KERNEL);
1564 if (!values)
1565 return -ENOMEM;
1566
1567 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1568 group = i / SDHCI_POWER_POLICY_NUM;
1569 cfg = i % SDHCI_POWER_POLICY_NUM;
1570 of_property_read_u32_index(np, name, i,
1571 &(values[group].latency[cfg]));
1572 }
1573
1574 *latency = values;
1575 return 0;
1576}
1577
1578static void sdhci_msm_pm_qos_parse(struct device *dev,
1579 struct sdhci_msm_pltfm_data *pdata)
1580{
1581 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1582 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1583 __func__);
1584
1585 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1586 pdata->pm_qos_data.cmdq_valid =
1587 !sdhci_msm_pm_qos_parse_latency(dev,
1588 "qcom,pm-qos-cmdq-latency-us",
1589 pdata->pm_qos_data.cpu_group_map.nr_groups,
1590 &pdata->pm_qos_data.cmdq_latency);
1591 pdata->pm_qos_data.legacy_valid =
1592 !sdhci_msm_pm_qos_parse_latency(dev,
1593 "qcom,pm-qos-legacy-latency-us",
1594 pdata->pm_qos_data.cpu_group_map.nr_groups,
1595 &pdata->pm_qos_data.latency);
1596 if (!pdata->pm_qos_data.cmdq_valid &&
1597 !pdata->pm_qos_data.legacy_valid) {
1598 /* clean-up previously allocated arrays */
1599 kfree(pdata->pm_qos_data.latency);
1600 kfree(pdata->pm_qos_data.cmdq_latency);
1601 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1602 __func__);
1603 }
1604 } else {
1605 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1606 __func__);
1607 }
1608}
1609
Asutosh Das0ef24812012-12-18 16:14:02 +05301610/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001611static
1612struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1613 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301614{
1615 struct sdhci_msm_pltfm_data *pdata = NULL;
1616 struct device_node *np = dev->of_node;
1617 u32 bus_width = 0;
1618 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301619 int clk_table_len;
1620 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301621 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301622
1623 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1624 if (!pdata) {
1625 dev_err(dev, "failed to allocate memory for platform data\n");
1626 goto out;
1627 }
1628
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301629 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1630 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1631 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301632
Asutosh Das0ef24812012-12-18 16:14:02 +05301633 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1634 if (bus_width == 8)
1635 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1636 else if (bus_width == 4)
1637 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1638 else {
1639 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1640 pdata->mmc_bus_width = 0;
1641 }
1642
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001643 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1644 &msm_host->mmc->clk_scaling.freq_table,
1645 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1646 pr_debug("%s: no clock scaling frequencies were supplied\n",
1647 dev_name(dev));
1648 else if (!msm_host->mmc->clk_scaling.freq_table ||
1649 !msm_host->mmc->clk_scaling.freq_table_sz)
1650 dev_err(dev, "bad dts clock scaling frequencies\n");
1651
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301652 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1653 &clk_table, &clk_table_len, 0)) {
1654 dev_err(dev, "failed parsing supported clock rates\n");
1655 goto out;
1656 }
1657 if (!clk_table || !clk_table_len) {
1658 dev_err(dev, "Invalid clock table\n");
1659 goto out;
1660 }
1661 pdata->sup_clk_table = clk_table;
1662 pdata->sup_clk_cnt = clk_table_len;
1663
Asutosh Das0ef24812012-12-18 16:14:02 +05301664 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1665 sdhci_msm_slot_reg_data),
1666 GFP_KERNEL);
1667 if (!pdata->vreg_data) {
1668 dev_err(dev, "failed to allocate memory for vreg data\n");
1669 goto out;
1670 }
1671
1672 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1673 "vdd")) {
1674 dev_err(dev, "failed parsing vdd data\n");
1675 goto out;
1676 }
1677 if (sdhci_msm_dt_parse_vreg_info(dev,
1678 &pdata->vreg_data->vdd_io_data,
1679 "vdd-io")) {
1680 dev_err(dev, "failed parsing vdd-io data\n");
1681 goto out;
1682 }
1683
1684 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1685 dev_err(dev, "failed parsing gpio data\n");
1686 goto out;
1687 }
1688
Asutosh Das0ef24812012-12-18 16:14:02 +05301689 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1690
1691 for (i = 0; i < len; i++) {
1692 const char *name = NULL;
1693
1694 of_property_read_string_index(np,
1695 "qcom,bus-speed-mode", i, &name);
1696 if (!name)
1697 continue;
1698
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001699 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1700 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1701 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1702 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1703 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301704 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1705 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1706 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1707 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1708 pdata->caps |= MMC_CAP_1_8V_DDR
1709 | MMC_CAP_UHS_DDR50;
1710 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1711 pdata->caps |= MMC_CAP_1_2V_DDR
1712 | MMC_CAP_UHS_DDR50;
1713 }
1714
1715 if (of_get_property(np, "qcom,nonremovable", NULL))
1716 pdata->nonremovable = true;
1717
Guoping Yuf7c91332014-08-20 16:56:18 +08001718 if (of_get_property(np, "qcom,nonhotplug", NULL))
1719 pdata->nonhotplug = true;
1720
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001721 pdata->largeaddressbus =
1722 of_property_read_bool(np, "qcom,large-address-bus");
1723
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001724 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1725 msm_host->mmc->wakeup_on_idle = true;
1726
Gilad Bronerc788a672015-09-08 15:39:11 +03001727 sdhci_msm_pm_qos_parse(dev, pdata);
1728
Pavan Anamula5a256df2015-10-16 14:38:28 +05301729 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1730 pdata->core_3_0v_support = true;
1731
Asutosh Das0ef24812012-12-18 16:14:02 +05301732 return pdata;
1733out:
1734 return NULL;
1735}
1736
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301737/* Returns required bandwidth in Bytes per Sec */
1738static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1739 struct mmc_ios *ios)
1740{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301741 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1742 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1743
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301744 unsigned int bw;
1745
Sahitya Tummala2886c922013-04-03 18:03:31 +05301746 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301747 /*
1748 * For DDR mode, SDCC controller clock will be at
1749 * the double rate than the actual clock that goes to card.
1750 */
1751 if (ios->bus_width == MMC_BUS_WIDTH_4)
1752 bw /= 2;
1753 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1754 bw /= 8;
1755
1756 return bw;
1757}
1758
1759static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1760 unsigned int bw)
1761{
1762 unsigned int *table = host->pdata->voting_data->bw_vecs;
1763 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1764 int i;
1765
1766 if (host->msm_bus_vote.is_max_bw_needed && bw)
1767 return host->msm_bus_vote.max_bw_vote;
1768
1769 for (i = 0; i < size; i++) {
1770 if (bw <= table[i])
1771 break;
1772 }
1773
1774 if (i && (i == size))
1775 i--;
1776
1777 return i;
1778}
1779
1780/*
1781 * This function must be called with host lock acquired.
1782 * Caller of this function should also ensure that msm bus client
1783 * handle is not null.
1784 */
1785static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1786 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301787 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301788{
1789 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1790 int rc = 0;
1791
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301792 BUG_ON(!flags);
1793
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301794 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301795 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301796 rc = msm_bus_scale_client_update_request(
1797 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301798 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301799 if (rc) {
1800 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1801 mmc_hostname(host->mmc),
1802 msm_host->msm_bus_vote.client_handle, vote, rc);
1803 goto out;
1804 }
1805 msm_host->msm_bus_vote.curr_vote = vote;
1806 }
1807out:
1808 return rc;
1809}
1810
1811/*
1812 * Internal work. Work to set 0 bandwidth for msm bus.
1813 */
1814static void sdhci_msm_bus_work(struct work_struct *work)
1815{
1816 struct sdhci_msm_host *msm_host;
1817 struct sdhci_host *host;
1818 unsigned long flags;
1819
1820 msm_host = container_of(work, struct sdhci_msm_host,
1821 msm_bus_vote.vote_work.work);
1822 host = platform_get_drvdata(msm_host->pdev);
1823
1824 if (!msm_host->msm_bus_vote.client_handle)
1825 return;
1826
1827 spin_lock_irqsave(&host->lock, flags);
1828 /* don't vote for 0 bandwidth if any request is in progress */
1829 if (!host->mrq) {
1830 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301831 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301832 } else
1833 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1834 mmc_hostname(host->mmc), __func__);
1835 spin_unlock_irqrestore(&host->lock, flags);
1836}
1837
1838/*
1839 * This function cancels any scheduled delayed work and sets the bus
1840 * vote based on bw (bandwidth) argument.
1841 */
1842static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1843 unsigned int bw)
1844{
1845 int vote;
1846 unsigned long flags;
1847 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1848 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1849
1850 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1851 spin_lock_irqsave(&host->lock, flags);
1852 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301853 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301854 spin_unlock_irqrestore(&host->lock, flags);
1855}
1856
1857#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1858
1859/* This function queues a work which will set the bandwidth requiement to 0 */
1860static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1861{
1862 unsigned long flags;
1863 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1864 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1865
1866 spin_lock_irqsave(&host->lock, flags);
1867 if (msm_host->msm_bus_vote.min_bw_vote !=
1868 msm_host->msm_bus_vote.curr_vote)
1869 queue_delayed_work(system_wq,
1870 &msm_host->msm_bus_vote.vote_work,
1871 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1872 spin_unlock_irqrestore(&host->lock, flags);
1873}
1874
1875static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1876 struct platform_device *pdev)
1877{
1878 int rc = 0;
1879 struct msm_bus_scale_pdata *bus_pdata;
1880
1881 struct sdhci_msm_bus_voting_data *data;
1882 struct device *dev = &pdev->dev;
1883
1884 data = devm_kzalloc(dev,
1885 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1886 if (!data) {
1887 dev_err(&pdev->dev,
1888 "%s: failed to allocate memory\n", __func__);
1889 rc = -ENOMEM;
1890 goto out;
1891 }
1892 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1893 if (data->bus_pdata) {
1894 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1895 &data->bw_vecs, &data->bw_vecs_size, 0);
1896 if (rc) {
1897 dev_err(&pdev->dev,
1898 "%s: Failed to get bus-bw-vectors-bps\n",
1899 __func__);
1900 goto out;
1901 }
1902 host->pdata->voting_data = data;
1903 }
1904 if (host->pdata->voting_data &&
1905 host->pdata->voting_data->bus_pdata &&
1906 host->pdata->voting_data->bw_vecs &&
1907 host->pdata->voting_data->bw_vecs_size) {
1908
1909 bus_pdata = host->pdata->voting_data->bus_pdata;
1910 host->msm_bus_vote.client_handle =
1911 msm_bus_scale_register_client(bus_pdata);
1912 if (!host->msm_bus_vote.client_handle) {
1913 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1914 rc = -EFAULT;
1915 goto out;
1916 }
1917 /* cache the vote index for minimum and maximum bandwidth */
1918 host->msm_bus_vote.min_bw_vote =
1919 sdhci_msm_bus_get_vote_for_bw(host, 0);
1920 host->msm_bus_vote.max_bw_vote =
1921 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1922 } else {
1923 devm_kfree(dev, data);
1924 }
1925
1926out:
1927 return rc;
1928}
1929
1930static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1931{
1932 if (host->msm_bus_vote.client_handle)
1933 msm_bus_scale_unregister_client(
1934 host->msm_bus_vote.client_handle);
1935}
1936
1937static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1938{
1939 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1940 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1941 struct mmc_ios *ios = &host->mmc->ios;
1942 unsigned int bw;
1943
1944 if (!msm_host->msm_bus_vote.client_handle)
1945 return;
1946
1947 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301948 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301949 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301950 } else {
1951 /*
1952 * If clock gating is enabled, then remove the vote
1953 * immediately because clocks will be disabled only
1954 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1955 * additional delay is required to remove the bus vote.
1956 */
1957#ifdef CONFIG_MMC_CLKGATE
1958 if (host->mmc->clkgate_delay)
1959 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1960 else
1961#endif
1962 sdhci_msm_bus_queue_work(host);
1963 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301964}
1965
Asutosh Das0ef24812012-12-18 16:14:02 +05301966/* Regulator utility functions */
1967static int sdhci_msm_vreg_init_reg(struct device *dev,
1968 struct sdhci_msm_reg_data *vreg)
1969{
1970 int ret = 0;
1971
1972 /* check if regulator is already initialized? */
1973 if (vreg->reg)
1974 goto out;
1975
1976 /* Get the regulator handle */
1977 vreg->reg = devm_regulator_get(dev, vreg->name);
1978 if (IS_ERR(vreg->reg)) {
1979 ret = PTR_ERR(vreg->reg);
1980 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1981 __func__, vreg->name, ret);
1982 goto out;
1983 }
1984
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301985 if (regulator_count_voltages(vreg->reg) > 0) {
1986 vreg->set_voltage_sup = true;
1987 /* sanity check */
1988 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1989 pr_err("%s: %s invalid constraints specified\n",
1990 __func__, vreg->name);
1991 ret = -EINVAL;
1992 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301993 }
1994
1995out:
1996 return ret;
1997}
1998
1999static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2000{
2001 if (vreg->reg)
2002 devm_regulator_put(vreg->reg);
2003}
2004
2005static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2006 *vreg, int uA_load)
2007{
2008 int ret = 0;
2009
2010 /*
2011 * regulators that do not support regulator_set_voltage also
2012 * do not support regulator_set_optimum_mode
2013 */
2014 if (vreg->set_voltage_sup) {
2015 ret = regulator_set_load(vreg->reg, uA_load);
2016 if (ret < 0)
2017 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2018 __func__, vreg->name, uA_load, ret);
2019 else
2020 /*
2021 * regulator_set_load() can return non zero
2022 * value even for success case.
2023 */
2024 ret = 0;
2025 }
2026 return ret;
2027}
2028
2029static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2030 int min_uV, int max_uV)
2031{
2032 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302033 if (vreg->set_voltage_sup) {
2034 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2035 if (ret) {
2036 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302037 __func__, vreg->name, min_uV, max_uV, ret);
2038 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302039 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302040
2041 return ret;
2042}
2043
2044static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2045{
2046 int ret = 0;
2047
2048 /* Put regulator in HPM (high power mode) */
2049 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2050 if (ret < 0)
2051 return ret;
2052
2053 if (!vreg->is_enabled) {
2054 /* Set voltage level */
2055 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2056 vreg->high_vol_level);
2057 if (ret)
2058 return ret;
2059 }
2060 ret = regulator_enable(vreg->reg);
2061 if (ret) {
2062 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2063 __func__, vreg->name, ret);
2064 return ret;
2065 }
2066 vreg->is_enabled = true;
2067 return ret;
2068}
2069
2070static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2071{
2072 int ret = 0;
2073
2074 /* Never disable regulator marked as always_on */
2075 if (vreg->is_enabled && !vreg->is_always_on) {
2076 ret = regulator_disable(vreg->reg);
2077 if (ret) {
2078 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2079 __func__, vreg->name, ret);
2080 goto out;
2081 }
2082 vreg->is_enabled = false;
2083
2084 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2085 if (ret < 0)
2086 goto out;
2087
2088 /* Set min. voltage level to 0 */
2089 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2090 if (ret)
2091 goto out;
2092 } else if (vreg->is_enabled && vreg->is_always_on) {
2093 if (vreg->lpm_sup) {
2094 /* Put always_on regulator in LPM (low power mode) */
2095 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2096 vreg->lpm_uA);
2097 if (ret < 0)
2098 goto out;
2099 }
2100 }
2101out:
2102 return ret;
2103}
2104
2105static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2106 bool enable, bool is_init)
2107{
2108 int ret = 0, i;
2109 struct sdhci_msm_slot_reg_data *curr_slot;
2110 struct sdhci_msm_reg_data *vreg_table[2];
2111
2112 curr_slot = pdata->vreg_data;
2113 if (!curr_slot) {
2114 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2115 __func__);
2116 goto out;
2117 }
2118
2119 vreg_table[0] = curr_slot->vdd_data;
2120 vreg_table[1] = curr_slot->vdd_io_data;
2121
2122 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2123 if (vreg_table[i]) {
2124 if (enable)
2125 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2126 else
2127 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2128 if (ret)
2129 goto out;
2130 }
2131 }
2132out:
2133 return ret;
2134}
2135
2136/*
2137 * Reset vreg by ensuring it is off during probe. A call
2138 * to enable vreg is needed to balance disable vreg
2139 */
2140static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2141{
2142 int ret;
2143
2144 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2145 if (ret)
2146 return ret;
2147 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2148 return ret;
2149}
2150
2151/* This init function should be called only once for each SDHC slot */
2152static int sdhci_msm_vreg_init(struct device *dev,
2153 struct sdhci_msm_pltfm_data *pdata,
2154 bool is_init)
2155{
2156 int ret = 0;
2157 struct sdhci_msm_slot_reg_data *curr_slot;
2158 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2159
2160 curr_slot = pdata->vreg_data;
2161 if (!curr_slot)
2162 goto out;
2163
2164 curr_vdd_reg = curr_slot->vdd_data;
2165 curr_vdd_io_reg = curr_slot->vdd_io_data;
2166
2167 if (!is_init)
2168 /* Deregister all regulators from regulator framework */
2169 goto vdd_io_reg_deinit;
2170
2171 /*
2172 * Get the regulator handle from voltage regulator framework
2173 * and then try to set the voltage level for the regulator
2174 */
2175 if (curr_vdd_reg) {
2176 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2177 if (ret)
2178 goto out;
2179 }
2180 if (curr_vdd_io_reg) {
2181 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2182 if (ret)
2183 goto vdd_reg_deinit;
2184 }
2185 ret = sdhci_msm_vreg_reset(pdata);
2186 if (ret)
2187 dev_err(dev, "vreg reset failed (%d)\n", ret);
2188 goto out;
2189
2190vdd_io_reg_deinit:
2191 if (curr_vdd_io_reg)
2192 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2193vdd_reg_deinit:
2194 if (curr_vdd_reg)
2195 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2196out:
2197 return ret;
2198}
2199
2200
2201static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2202 enum vdd_io_level level,
2203 unsigned int voltage_level)
2204{
2205 int ret = 0;
2206 int set_level;
2207 struct sdhci_msm_reg_data *vdd_io_reg;
2208
2209 if (!pdata->vreg_data)
2210 return ret;
2211
2212 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2213 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2214 switch (level) {
2215 case VDD_IO_LOW:
2216 set_level = vdd_io_reg->low_vol_level;
2217 break;
2218 case VDD_IO_HIGH:
2219 set_level = vdd_io_reg->high_vol_level;
2220 break;
2221 case VDD_IO_SET_LEVEL:
2222 set_level = voltage_level;
2223 break;
2224 default:
2225 pr_err("%s: invalid argument level = %d",
2226 __func__, level);
2227 ret = -EINVAL;
2228 return ret;
2229 }
2230 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2231 set_level);
2232 }
2233 return ret;
2234}
2235
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302236void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2237{
2238 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2239 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2240
2241 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2242 mmc_hostname(host->mmc),
2243 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2244 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2245 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2246}
2247
Asutosh Das0ef24812012-12-18 16:14:02 +05302248static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2249{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002250 struct sdhci_host *host = (struct sdhci_host *)data;
2251 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2252 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302253 u8 irq_status = 0;
2254 u8 irq_ack = 0;
2255 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302256 int pwr_state = 0, io_level = 0;
2257 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302258 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302259
2260 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2261 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2262 mmc_hostname(msm_host->mmc), irq, irq_status);
2263
2264 /* Clear the interrupt */
2265 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2266 /*
2267 * SDHC has core_mem and hc_mem device memory and these memory
2268 * addresses do not fall within 1KB region. Hence, any update to
2269 * core_mem address space would require an mb() to ensure this gets
2270 * completed before its next update to registers within hc_mem.
2271 */
2272 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302273 /*
2274 * There is a rare HW scenario where the first clear pulse could be
2275 * lost when actual reset and clear/read of status register is
2276 * happening at a time. Hence, retry for at least 10 times to make
2277 * sure status register is cleared. Otherwise, this will result in
2278 * a spurious power IRQ resulting in system instability.
2279 */
2280 while (irq_status &
2281 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2282 if (retry == 0) {
2283 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2284 mmc_hostname(host->mmc), irq_status);
2285 sdhci_msm_dump_pwr_ctrl_regs(host);
2286 BUG_ON(1);
2287 }
2288 writeb_relaxed(irq_status,
2289 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2290 retry--;
2291 udelay(10);
2292 }
2293 if (likely(retry < 10))
2294 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2295 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302296
2297 /* Handle BUS ON/OFF*/
2298 if (irq_status & CORE_PWRCTL_BUS_ON) {
2299 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302300 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302301 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302302 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2303 VDD_IO_HIGH, 0);
2304 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302305 if (ret)
2306 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2307 else
2308 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302309
2310 pwr_state = REQ_BUS_ON;
2311 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302312 }
2313 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2314 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302315 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302316 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302317 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2318 VDD_IO_LOW, 0);
2319 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302320 if (ret)
2321 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2322 else
2323 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302324
2325 pwr_state = REQ_BUS_OFF;
2326 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302327 }
2328 /* Handle IO LOW/HIGH */
2329 if (irq_status & CORE_PWRCTL_IO_LOW) {
2330 /* Switch voltage Low */
2331 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2332 if (ret)
2333 irq_ack |= CORE_PWRCTL_IO_FAIL;
2334 else
2335 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302336
2337 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302338 }
2339 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2340 /* Switch voltage High */
2341 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2342 if (ret)
2343 irq_ack |= CORE_PWRCTL_IO_FAIL;
2344 else
2345 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302346
2347 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302348 }
2349
2350 /* ACK status to the core */
2351 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2352 /*
2353 * SDHC has core_mem and hc_mem device memory and these memory
2354 * addresses do not fall within 1KB region. Hence, any update to
2355 * core_mem address space would require an mb() to ensure this gets
2356 * completed before its next update to registers within hc_mem.
2357 */
2358 mb();
2359
Krishna Konda46fd1432014-10-30 21:13:27 -07002360 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002361 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2362 ~CORE_IO_PAD_PWR_SWITCH),
2363 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002364 else if ((io_level & REQ_IO_LOW) ||
2365 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002366 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2367 CORE_IO_PAD_PWR_SWITCH),
2368 host->ioaddr + CORE_VENDOR_SPEC);
2369 mb();
2370
Asutosh Das0ef24812012-12-18 16:14:02 +05302371 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2372 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302373 spin_lock_irqsave(&host->lock, flags);
2374 if (pwr_state)
2375 msm_host->curr_pwr_state = pwr_state;
2376 if (io_level)
2377 msm_host->curr_io_level = io_level;
2378 complete(&msm_host->pwr_irq_completion);
2379 spin_unlock_irqrestore(&host->lock, flags);
2380
Asutosh Das0ef24812012-12-18 16:14:02 +05302381 return IRQ_HANDLED;
2382}
2383
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302384static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302385show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2386{
2387 struct sdhci_host *host = dev_get_drvdata(dev);
2388 int poll;
2389 unsigned long flags;
2390
2391 spin_lock_irqsave(&host->lock, flags);
2392 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2393 spin_unlock_irqrestore(&host->lock, flags);
2394
2395 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2396}
2397
2398static ssize_t
2399store_polling(struct device *dev, struct device_attribute *attr,
2400 const char *buf, size_t count)
2401{
2402 struct sdhci_host *host = dev_get_drvdata(dev);
2403 int value;
2404 unsigned long flags;
2405
2406 if (!kstrtou32(buf, 0, &value)) {
2407 spin_lock_irqsave(&host->lock, flags);
2408 if (value) {
2409 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2410 mmc_detect_change(host->mmc, 0);
2411 } else {
2412 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2413 }
2414 spin_unlock_irqrestore(&host->lock, flags);
2415 }
2416 return count;
2417}
2418
2419static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302420show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2421 char *buf)
2422{
2423 struct sdhci_host *host = dev_get_drvdata(dev);
2424 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2425 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2426
2427 return snprintf(buf, PAGE_SIZE, "%u\n",
2428 msm_host->msm_bus_vote.is_max_bw_needed);
2429}
2430
2431static ssize_t
2432store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2433 const char *buf, size_t count)
2434{
2435 struct sdhci_host *host = dev_get_drvdata(dev);
2436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2437 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2438 uint32_t value;
2439 unsigned long flags;
2440
2441 if (!kstrtou32(buf, 0, &value)) {
2442 spin_lock_irqsave(&host->lock, flags);
2443 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2444 spin_unlock_irqrestore(&host->lock, flags);
2445 }
2446 return count;
2447}
2448
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302449static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302450{
2451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2452 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302453 unsigned long flags;
2454 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302455 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302456
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302457 spin_lock_irqsave(&host->lock, flags);
2458 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2459 mmc_hostname(host->mmc), __func__, req_type,
2460 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302461 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2462 /*
2463 * The IRQ for request type IO High/Low will be generated when -
2464 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2465 * 2. If 1 is true and when there is a state change in 1.8V enable
2466 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2467 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2468 * layer tries to set it to 3.3V before card detection happens, the
2469 * IRQ doesn't get triggered as there is no state change in this bit.
2470 * The driver already handles this case by changing the IO voltage
2471 * level to high as part of controller power up sequence. Hence, check
2472 * for host->pwr to handle a case where IO voltage high request is
2473 * issued even before controller power up.
2474 */
2475 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2476 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2477 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2478 pr_debug("%s: do not wait for power IRQ that never comes\n",
2479 mmc_hostname(host->mmc));
2480 spin_unlock_irqrestore(&host->lock, flags);
2481 return;
2482 }
2483 }
2484
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302485 if ((req_type & msm_host->curr_pwr_state) ||
2486 (req_type & msm_host->curr_io_level))
2487 done = true;
2488 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302489
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302490 /*
2491 * This is needed here to hanlde a case where IRQ gets
2492 * triggered even before this function is called so that
2493 * x->done counter of completion gets reset. Otherwise,
2494 * next call to wait_for_completion returns immediately
2495 * without actually waiting for the IRQ to be handled.
2496 */
2497 if (done)
2498 init_completion(&msm_host->pwr_irq_completion);
2499 else
2500 wait_for_completion(&msm_host->pwr_irq_completion);
2501
2502 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2503 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302504}
2505
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002506static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2507{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302508 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2509
2510 if (enable) {
2511 config |= CORE_CDR_EN;
2512 config &= ~CORE_CDR_EXT_EN;
2513 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2514 } else {
2515 config &= ~CORE_CDR_EN;
2516 config |= CORE_CDR_EXT_EN;
2517 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2518 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002519}
2520
Asutosh Das648f9d12013-01-10 21:11:04 +05302521static unsigned int sdhci_msm_max_segs(void)
2522{
2523 return SDHCI_MSM_MAX_SEGMENTS;
2524}
2525
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302526static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302527{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302528 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2529 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302530
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302531 return msm_host->pdata->sup_clk_table[0];
2532}
2533
2534static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2535{
2536 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2537 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2538 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2539
2540 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2541}
2542
2543static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2544 u32 req_clk)
2545{
2546 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2547 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2548 unsigned int sel_clk = -1;
2549 unsigned char cnt;
2550
2551 if (req_clk < sdhci_msm_get_min_clock(host)) {
2552 sel_clk = sdhci_msm_get_min_clock(host);
2553 return sel_clk;
2554 }
2555
2556 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2557 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2558 break;
2559 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2560 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2561 break;
2562 } else {
2563 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2564 }
2565 }
2566 return sel_clk;
2567}
2568
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302569static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2570{
2571 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2572 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2573 int rc = 0;
2574
2575 if (atomic_read(&msm_host->controller_clock))
2576 return 0;
2577
2578 sdhci_msm_bus_voting(host, 1);
2579
2580 if (!IS_ERR(msm_host->pclk)) {
2581 rc = clk_prepare_enable(msm_host->pclk);
2582 if (rc) {
2583 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2584 mmc_hostname(host->mmc), __func__, rc);
2585 goto remove_vote;
2586 }
2587 }
2588
2589 rc = clk_prepare_enable(msm_host->clk);
2590 if (rc) {
2591 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2592 mmc_hostname(host->mmc), __func__, rc);
2593 goto disable_pclk;
2594 }
2595
2596 atomic_set(&msm_host->controller_clock, 1);
2597 pr_debug("%s: %s: enabled controller clock\n",
2598 mmc_hostname(host->mmc), __func__);
2599 goto out;
2600
2601disable_pclk:
2602 if (!IS_ERR(msm_host->pclk))
2603 clk_disable_unprepare(msm_host->pclk);
2604remove_vote:
2605 if (msm_host->msm_bus_vote.client_handle)
2606 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2607out:
2608 return rc;
2609}
2610
2611
2612
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302613static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2614{
2615 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2616 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2617 int rc = 0;
2618
2619 if (enable && !atomic_read(&msm_host->clks_on)) {
2620 pr_debug("%s: request to enable clocks\n",
2621 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302622
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302623 /*
2624 * The bus-width or the clock rate might have changed
2625 * after controller clocks are enbaled, update bus vote
2626 * in such case.
2627 */
2628 if (atomic_read(&msm_host->controller_clock))
2629 sdhci_msm_bus_voting(host, 1);
2630
2631 rc = sdhci_msm_enable_controller_clock(host);
2632 if (rc)
2633 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302634
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302635 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2636 rc = clk_prepare_enable(msm_host->bus_clk);
2637 if (rc) {
2638 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2639 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302640 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302641 }
2642 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002643 if (!IS_ERR(msm_host->ff_clk)) {
2644 rc = clk_prepare_enable(msm_host->ff_clk);
2645 if (rc) {
2646 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2647 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302648 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002649 }
2650 }
2651 if (!IS_ERR(msm_host->sleep_clk)) {
2652 rc = clk_prepare_enable(msm_host->sleep_clk);
2653 if (rc) {
2654 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2655 mmc_hostname(host->mmc), __func__, rc);
2656 goto disable_ff_clk;
2657 }
2658 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302659 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302660
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302661 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302662 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2663 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302664 /*
2665 * During 1.8V signal switching the clock source must
2666 * still be ON as it requires accessing SDHC
2667 * registers (SDHCi host control2 register bit 3 must
2668 * be written and polled after stopping the SDCLK).
2669 */
2670 if (host->mmc->card_clock_off)
2671 return 0;
2672 pr_debug("%s: request to disable clocks\n",
2673 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002674 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2675 clk_disable_unprepare(msm_host->sleep_clk);
2676 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2677 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302678 clk_disable_unprepare(msm_host->clk);
2679 if (!IS_ERR(msm_host->pclk))
2680 clk_disable_unprepare(msm_host->pclk);
2681 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2682 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302683
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302684 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302685 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302686 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302687 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302688 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002689disable_ff_clk:
2690 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2691 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302692disable_bus_clk:
2693 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2694 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302695disable_controller_clk:
2696 if (!IS_ERR_OR_NULL(msm_host->clk))
2697 clk_disable_unprepare(msm_host->clk);
2698 if (!IS_ERR_OR_NULL(msm_host->pclk))
2699 clk_disable_unprepare(msm_host->pclk);
2700 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302701remove_vote:
2702 if (msm_host->msm_bus_vote.client_handle)
2703 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302704out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302705 return rc;
2706}
2707
2708static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2709{
2710 int rc;
2711 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2712 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2713 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002714 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302715 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302716
2717 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302718 /*
2719 * disable pwrsave to ensure clock is not auto-gated until
2720 * the rate is >400KHz (initialization complete).
2721 */
2722 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2723 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302724 sdhci_msm_prepare_clocks(host, false);
2725 host->clock = clock;
2726 goto out;
2727 }
2728
2729 rc = sdhci_msm_prepare_clocks(host, true);
2730 if (rc)
2731 goto out;
2732
Sahitya Tummala043744a2013-06-24 09:55:33 +05302733 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2734 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302735 if ((clock > 400000) &&
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002736 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302737 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2738 | CORE_CLK_PWRSAVE,
2739 host->ioaddr + CORE_VENDOR_SPEC);
2740 /*
2741 * Disable pwrsave for a newly added card if doesn't allow clock
2742 * gating.
2743 */
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002744 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302745 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2746 & ~CORE_CLK_PWRSAVE,
2747 host->ioaddr + CORE_VENDOR_SPEC);
2748
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302749 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002750 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002751 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002752 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302753 /*
2754 * The SDHC requires internal clock frequency to be double the
2755 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002756 * uses the faster clock(100/400MHz) for some of its parts and
2757 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302758 */
2759 ddr_clock = clock * 2;
2760 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2761 ddr_clock);
2762 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002763
2764 /*
2765 * In general all timing modes are controlled via UHS mode select in
2766 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2767 * their respective modes defined here, hence we use these values.
2768 *
2769 * HS200 - SDR104 (Since they both are equivalent in functionality)
2770 * HS400 - This involves multiple configurations
2771 * Initially SDR104 - when tuning is required as HS200
2772 * Then when switching to DDR @ 400MHz (HS400) we use
2773 * the vendor specific HC_SELECT_IN to control the mode.
2774 *
2775 * In addition to controlling the modes we also need to select the
2776 * correct input clock for DLL depending on the mode.
2777 *
2778 * HS400 - divided clock (free running MCLK/2)
2779 * All other modes - default (free running MCLK)
2780 */
2781 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2782 /* Select the divided clock (free running MCLK/2) */
2783 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2784 & ~CORE_HC_MCLK_SEL_MASK)
2785 | CORE_HC_MCLK_SEL_HS400),
2786 host->ioaddr + CORE_VENDOR_SPEC);
2787 /*
2788 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2789 * register
2790 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302791 if ((msm_host->tuning_done ||
2792 (mmc_card_strobe(msm_host->mmc->card) &&
2793 msm_host->enhanced_strobe)) &&
2794 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002795 /*
2796 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2797 * field in VENDOR_SPEC_FUNC
2798 */
2799 writel_relaxed((readl_relaxed(host->ioaddr + \
2800 CORE_VENDOR_SPEC)
2801 | CORE_HC_SELECT_IN_HS400
2802 | CORE_HC_SELECT_IN_EN),
2803 host->ioaddr + CORE_VENDOR_SPEC);
2804 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002805 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2806 /*
2807 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2808 * CORE_DLL_STATUS to be set. This should get set
2809 * with in 15 us at 200 MHz.
2810 */
2811 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2812 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2813 CORE_DDR_DLL_LOCK)), 10, 1000);
2814 if (rc == -ETIMEDOUT)
2815 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2816 mmc_hostname(host->mmc),
2817 dll_lock);
2818 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002819 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002820 if (!msm_host->use_cdclp533)
2821 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2822 writel_relaxed((readl_relaxed(host->ioaddr +
2823 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2824 host->ioaddr + CORE_VENDOR_SPEC3);
2825
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002826 /* Select the default clock (free running MCLK) */
2827 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2828 & ~CORE_HC_MCLK_SEL_MASK)
2829 | CORE_HC_MCLK_SEL_DFLT),
2830 host->ioaddr + CORE_VENDOR_SPEC);
2831
2832 /*
2833 * Disable HC_SELECT_IN to be able to use the UHS mode select
2834 * configuration from Host Control2 register for all other
2835 * modes.
2836 *
2837 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2838 * in VENDOR_SPEC_FUNC
2839 */
2840 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2841 & ~CORE_HC_SELECT_IN_EN
2842 & ~CORE_HC_SELECT_IN_MASK),
2843 host->ioaddr + CORE_VENDOR_SPEC);
2844 }
2845 mb();
2846
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302847 if (sup_clock != msm_host->clk_rate) {
2848 pr_debug("%s: %s: setting clk rate to %u\n",
2849 mmc_hostname(host->mmc), __func__, sup_clock);
2850 rc = clk_set_rate(msm_host->clk, sup_clock);
2851 if (rc) {
2852 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2853 mmc_hostname(host->mmc), __func__,
2854 sup_clock, rc);
2855 goto out;
2856 }
2857 msm_host->clk_rate = sup_clock;
2858 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302859 /*
2860 * Update the bus vote in case of frequency change due to
2861 * clock scaling.
2862 */
2863 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302864 }
2865out:
2866 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302867}
2868
Sahitya Tummala14613432013-03-21 11:13:25 +05302869static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2870 unsigned int uhs)
2871{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002872 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2873 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302874 u16 ctrl_2;
2875
2876 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2877 /* Select Bus Speed Mode for host */
2878 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002879 if ((uhs == MMC_TIMING_MMC_HS400) ||
2880 (uhs == MMC_TIMING_MMC_HS200) ||
2881 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302882 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2883 else if (uhs == MMC_TIMING_UHS_SDR12)
2884 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2885 else if (uhs == MMC_TIMING_UHS_SDR25)
2886 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2887 else if (uhs == MMC_TIMING_UHS_SDR50)
2888 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002889 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2890 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302891 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302892 /*
2893 * When clock frquency is less than 100MHz, the feedback clock must be
2894 * provided and DLL must not be used so that tuning can be skipped. To
2895 * provide feedback clock, the mode selection can be any value less
2896 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2897 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002898 if (host->clock <= CORE_FREQ_100MHZ) {
2899 if ((uhs == MMC_TIMING_MMC_HS400) ||
2900 (uhs == MMC_TIMING_MMC_HS200) ||
2901 (uhs == MMC_TIMING_UHS_SDR104))
2902 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302903
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002904 /*
2905 * Make sure DLL is disabled when not required
2906 *
2907 * Write 1 to DLL_RST bit of DLL_CONFIG register
2908 */
2909 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2910 | CORE_DLL_RST),
2911 host->ioaddr + CORE_DLL_CONFIG);
2912
2913 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2914 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2915 | CORE_DLL_PDN),
2916 host->ioaddr + CORE_DLL_CONFIG);
2917 mb();
2918
2919 /*
2920 * The DLL needs to be restored and CDCLP533 recalibrated
2921 * when the clock frequency is set back to 400MHz.
2922 */
2923 msm_host->calibration_done = false;
2924 }
2925
2926 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2927 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302928 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2929
2930}
2931
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002932#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002933#define DRV_NAME "cmdq-host"
2934static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_msm_host *msm_host)
2935{
2936 int i = 0;
2937 struct cmdq_host *cq_host = mmc_cmdq_private(msm_host->mmc);
2938 u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2939 u16 minor = version & CORE_VERSION_TARGET_MASK;
2940 /* registers offset changed starting from 4.2.0 */
2941 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
2942
2943 pr_err("---- Debug RAM dump ----\n");
2944 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
2945 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
2946 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
2947
2948 while (i < 16) {
2949 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
2950 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
2951 i++;
2952 }
2953 pr_err("-------------------------\n");
2954}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302955
2956void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2957{
2958 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2959 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2960 int tbsel, tbsel2;
2961 int i, index = 0;
2962 u32 test_bus_val = 0;
2963 u32 debug_reg[MAX_TEST_BUS] = {0};
2964
2965 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07002966 if (host->cq_host)
2967 sdhci_msm_cmdq_dump_debug_ram(msm_host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002968
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302969 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
2970 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
2971 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
2972 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
2973 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
2974 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
2975 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
2976 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
2977 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
2978 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
2979 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
2980 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05302981 pr_info("Vndr func2: 0x%08x\n",
2982 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302983
2984 /*
2985 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
2986 * of CORE_TESTBUS_CONFIG register.
2987 *
2988 * To select test bus 0 to 7 use tbsel and to select any test bus
2989 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
2990 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
2991 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
2992 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002993 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302994 for (tbsel = 0; tbsel < 8; tbsel++) {
2995 if (index >= MAX_TEST_BUS)
2996 break;
2997 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
2998 tbsel | CORE_TESTBUS_ENA;
2999 writel_relaxed(test_bus_val,
3000 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3001 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
3002 CORE_SDCC_DEBUG_REG);
3003 }
3004 }
3005 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3006 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3007 i, i + 3, debug_reg[i], debug_reg[i+1],
3008 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003009}
3010
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303011/*
3012 * sdhci_msm_enhanced_strobe_mask :-
3013 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3014 * SW should write 3 to
3015 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3016 * The default reset value of this register is 2.
3017 */
3018static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3019{
3020 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3021 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3022
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303023 if (!msm_host->enhanced_strobe ||
3024 !mmc_card_strobe(msm_host->mmc->card)) {
3025 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303026 mmc_hostname(host->mmc));
3027 return;
3028 }
3029
3030 if (set) {
3031 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3032 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3033 host->ioaddr + CORE_VENDOR_SPEC3);
3034 } else {
3035 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3036 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3037 host->ioaddr + CORE_VENDOR_SPEC3);
3038 }
3039}
3040
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003041static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3042{
3043 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3044 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3045
3046 if (set) {
3047 writel_relaxed(CORE_TESTBUS_ENA,
3048 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3049 } else {
3050 u32 value;
3051
3052 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3053 value &= ~CORE_TESTBUS_ENA;
3054 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3055 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303056}
3057
Dov Levenglick9c575e22015-07-20 09:30:52 +03003058static void sdhci_msm_detect(struct sdhci_host *host, bool detected)
3059{
3060 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3061 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3062 struct mmc_host *mmc = msm_host->mmc;
3063 struct mmc_card *card = mmc->card;
3064
3065 if (detected && mmc_card_sdio(card))
3066 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3067 else
3068 mmc->pm_caps &= ~MMC_PM_KEEP_POWER;
3069}
3070
Pavan Anamula691dd592015-08-25 16:11:20 +05303071void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3072{
3073 u32 vendor_func2;
3074 unsigned long timeout;
3075
3076 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3077
3078 if (enable) {
3079 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3080 CORE_VENDOR_SPEC_FUNC2);
3081 timeout = 10000;
3082 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3083 HC_SW_RST_REQ) {
3084 if (timeout == 0) {
3085 pr_info("%s: Applying wait idle disable workaround\n",
3086 mmc_hostname(host->mmc));
3087 /*
3088 * Apply the reset workaround to not wait for
3089 * pending data transfers on AXI before
3090 * resetting the controller. This could be
3091 * risky if the transfers were stuck on the
3092 * AXI bus.
3093 */
3094 vendor_func2 = readl_relaxed(host->ioaddr +
3095 CORE_VENDOR_SPEC_FUNC2);
3096 writel_relaxed(vendor_func2 |
3097 HC_SW_RST_WAIT_IDLE_DIS,
3098 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3099 host->reset_wa_t = ktime_get();
3100 return;
3101 }
3102 timeout--;
3103 udelay(10);
3104 }
3105 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3106 mmc_hostname(host->mmc));
3107 } else {
3108 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3109 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3110 }
3111}
3112
Gilad Broner44445992015-09-29 16:05:39 +03003113static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3114{
3115 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
3116 container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);
3117
3118 if (atomic_read(&pm_qos_irq->counter))
3119 return;
3120
3121 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3122 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3123}
3124
3125void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3126{
3127 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3128 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3129 struct sdhci_msm_pm_qos_latency *latency =
3130 &msm_host->pdata->pm_qos_data.irq_latency;
3131 int counter;
3132
3133 if (!msm_host->pm_qos_irq.enabled)
3134 return;
3135
3136 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3137 /* Make sure to update the voting in case power policy has changed */
3138 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3139 && counter > 1)
3140 return;
3141
3142 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3143 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3144 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3145 msm_host->pm_qos_irq.latency);
3146}
3147
3148void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3149{
3150 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3151 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3152 int counter;
3153
3154 if (!msm_host->pm_qos_irq.enabled)
3155 return;
3156
Subhash Jadavani4d813902015-10-15 12:16:43 -07003157 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3158 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3159 } else {
3160 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3161 return;
Gilad Broner44445992015-09-29 16:05:39 +03003162 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003163
Gilad Broner44445992015-09-29 16:05:39 +03003164 if (counter)
3165 return;
3166
3167 if (async) {
3168 schedule_work(&msm_host->pm_qos_irq.unvote_work);
3169 return;
3170 }
3171
3172 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3173 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3174 msm_host->pm_qos_irq.latency);
3175}
3176
Gilad Broner68c54562015-09-20 11:59:46 +03003177static ssize_t
3178sdhci_msm_pm_qos_irq_show(struct device *dev,
3179 struct device_attribute *attr, char *buf)
3180{
3181 struct sdhci_host *host = dev_get_drvdata(dev);
3182 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3183 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3184 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3185
3186 return snprintf(buf, PAGE_SIZE,
3187 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3188 irq->enabled, atomic_read(&irq->counter), irq->latency);
3189}
3190
3191static ssize_t
3192sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3193 struct device_attribute *attr, char *buf)
3194{
3195 struct sdhci_host *host = dev_get_drvdata(dev);
3196 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3197 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3198
3199 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3200}
3201
3202static ssize_t
3203sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3204 struct device_attribute *attr, const char *buf, size_t count)
3205{
3206 struct sdhci_host *host = dev_get_drvdata(dev);
3207 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3208 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3209 uint32_t value;
3210 bool enable;
3211 int ret;
3212
3213 ret = kstrtou32(buf, 0, &value);
3214 if (ret)
3215 goto out;
3216 enable = !!value;
3217
3218 if (enable == msm_host->pm_qos_irq.enabled)
3219 goto out;
3220
3221 msm_host->pm_qos_irq.enabled = enable;
3222 if (!enable) {
3223 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3224 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3225 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3226 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3227 msm_host->pm_qos_irq.latency);
3228 }
3229
3230out:
3231 return count;
3232}
3233
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003234#ifdef CONFIG_SMP
3235static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3236 struct sdhci_host *host)
3237{
3238 msm_host->pm_qos_irq.req.irq = host->irq;
3239}
3240#else
3241static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3242 struct sdhci_host *host) { }
3243#endif
3244
Gilad Broner44445992015-09-29 16:05:39 +03003245void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3246{
3247 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3248 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3249 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003250 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003251
3252 if (!msm_host->pdata->pm_qos_data.irq_valid)
3253 return;
3254
3255 /* Initialize only once as this gets called per partition */
3256 if (msm_host->pm_qos_irq.enabled)
3257 return;
3258
3259 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3260 msm_host->pm_qos_irq.req.type =
3261 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003262 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3263 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3264 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003265 else
3266 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3267 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3268
3269 INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
3270 sdhci_msm_pm_qos_irq_unvote_work);
3271 /* For initialization phase, set the performance latency */
3272 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3273 msm_host->pm_qos_irq.latency =
3274 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3275 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3276 msm_host->pm_qos_irq.latency);
3277 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003278
3279 /* sysfs */
3280 msm_host->pm_qos_irq.enable_attr.show =
3281 sdhci_msm_pm_qos_irq_enable_show;
3282 msm_host->pm_qos_irq.enable_attr.store =
3283 sdhci_msm_pm_qos_irq_enable_store;
3284 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3285 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3286 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3287 ret = device_create_file(&msm_host->pdev->dev,
3288 &msm_host->pm_qos_irq.enable_attr);
3289 if (ret)
3290 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3291 __func__, ret);
3292
3293 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3294 msm_host->pm_qos_irq.status_attr.store = NULL;
3295 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3296 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3297 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3298 ret = device_create_file(&msm_host->pdev->dev,
3299 &msm_host->pm_qos_irq.status_attr);
3300 if (ret)
3301 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3302 __func__, ret);
3303}
3304
3305static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3306 struct device_attribute *attr, char *buf)
3307{
3308 struct sdhci_host *host = dev_get_drvdata(dev);
3309 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3310 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3311 struct sdhci_msm_pm_qos_group *group;
3312 int i;
3313 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3314 int offset = 0;
3315
3316 for (i = 0; i < nr_groups; i++) {
3317 group = &msm_host->pm_qos[i];
3318 offset += snprintf(&buf[offset], PAGE_SIZE,
3319 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3320 i, group->req.cpus_affine.bits[0],
3321 msm_host->pm_qos_group_enable,
3322 atomic_read(&group->counter),
3323 group->latency);
3324 }
3325
3326 return offset;
3327}
3328
3329static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3330 struct device_attribute *attr, char *buf)
3331{
3332 struct sdhci_host *host = dev_get_drvdata(dev);
3333 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3334 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3335
3336 return snprintf(buf, PAGE_SIZE, "%s\n",
3337 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3338}
3339
3340static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3341 struct device_attribute *attr, const char *buf, size_t count)
3342{
3343 struct sdhci_host *host = dev_get_drvdata(dev);
3344 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3345 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3346 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3347 uint32_t value;
3348 bool enable;
3349 int ret;
3350 int i;
3351
3352 ret = kstrtou32(buf, 0, &value);
3353 if (ret)
3354 goto out;
3355 enable = !!value;
3356
3357 if (enable == msm_host->pm_qos_group_enable)
3358 goto out;
3359
3360 msm_host->pm_qos_group_enable = enable;
3361 if (!enable) {
3362 for (i = 0; i < nr_groups; i++) {
3363 cancel_work_sync(&msm_host->pm_qos[i].unvote_work);
3364 atomic_set(&msm_host->pm_qos[i].counter, 0);
3365 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3366 pm_qos_update_request(&msm_host->pm_qos[i].req,
3367 msm_host->pm_qos[i].latency);
3368 }
3369 }
3370
3371out:
3372 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003373}
3374
3375static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3376{
3377 int i;
3378 struct sdhci_msm_cpu_group_map *map =
3379 &msm_host->pdata->pm_qos_data.cpu_group_map;
3380
3381 if (cpu < 0)
3382 goto not_found;
3383
3384 for (i = 0; i < map->nr_groups; i++)
3385 if (cpumask_test_cpu(cpu, &map->mask[i]))
3386 return i;
3387
3388not_found:
3389 return -EINVAL;
3390}
3391
3392void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3393 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3394{
3395 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3396 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3397 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3398 struct sdhci_msm_pm_qos_group *pm_qos_group;
3399 int counter;
3400
3401 if (!msm_host->pm_qos_group_enable || group < 0)
3402 return;
3403
3404 pm_qos_group = &msm_host->pm_qos[group];
3405 counter = atomic_inc_return(&pm_qos_group->counter);
3406
3407 /* Make sure to update the voting in case power policy has changed */
3408 if (pm_qos_group->latency == latency->latency[host->power_policy]
3409 && counter > 1)
3410 return;
3411
3412 cancel_work_sync(&pm_qos_group->unvote_work);
3413
3414 pm_qos_group->latency = latency->latency[host->power_policy];
3415 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3416}
3417
3418static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3419{
3420 struct sdhci_msm_pm_qos_group *group =
3421 container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);
3422
3423 if (atomic_read(&group->counter))
3424 return;
3425
3426 group->latency = PM_QOS_DEFAULT_VALUE;
3427 pm_qos_update_request(&group->req, group->latency);
3428}
3429
Gilad Broner07d92eb2015-09-29 16:57:21 +03003430bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003431{
3432 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3433 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3434 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3435
3436 if (!msm_host->pm_qos_group_enable || group < 0 ||
3437 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003438 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003439
3440 if (async) {
3441 schedule_work(&msm_host->pm_qos[group].unvote_work);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003442 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003443 }
3444
3445 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3446 pm_qos_update_request(&msm_host->pm_qos[group].req,
3447 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003448 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003449}
3450
3451void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3452 struct sdhci_msm_pm_qos_latency *latency)
3453{
3454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3456 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3457 struct sdhci_msm_pm_qos_group *group;
3458 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003459 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003460
3461 if (msm_host->pm_qos_group_enable)
3462 return;
3463
3464 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3465 GFP_KERNEL);
3466 if (!msm_host->pm_qos)
3467 return;
3468
3469 for (i = 0; i < nr_groups; i++) {
3470 group = &msm_host->pm_qos[i];
3471 INIT_WORK(&group->unvote_work,
3472 sdhci_msm_pm_qos_cpu_unvote_work);
3473 atomic_set(&group->counter, 0);
3474 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3475 cpumask_copy(&group->req.cpus_affine,
3476 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3477 /* For initialization phase, set the performance mode latency */
3478 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3479 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3480 group->latency);
3481 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3482 __func__, i,
3483 group->req.cpus_affine.bits[0],
3484 group->latency,
3485 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3486 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003487 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003488 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003489
3490 /* sysfs */
3491 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3492 msm_host->pm_qos_group_status_attr.store = NULL;
3493 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3494 msm_host->pm_qos_group_status_attr.attr.name =
3495 "pm_qos_cpu_groups_status";
3496 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3497 ret = device_create_file(&msm_host->pdev->dev,
3498 &msm_host->pm_qos_group_status_attr);
3499 if (ret)
3500 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3501 __func__, ret);
3502 msm_host->pm_qos_group_enable_attr.show =
3503 sdhci_msm_pm_qos_group_enable_show;
3504 msm_host->pm_qos_group_enable_attr.store =
3505 sdhci_msm_pm_qos_group_enable_store;
3506 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3507 msm_host->pm_qos_group_enable_attr.attr.name =
3508 "pm_qos_cpu_groups_enable";
3509 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3510 ret = device_create_file(&msm_host->pdev->dev,
3511 &msm_host->pm_qos_group_enable_attr);
3512 if (ret)
3513 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3514 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003515}
3516
Gilad Broner07d92eb2015-09-29 16:57:21 +03003517static void sdhci_msm_pre_req(struct sdhci_host *host,
3518 struct mmc_request *mmc_req)
3519{
3520 int cpu;
3521 int group;
3522 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3523 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3524 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3525 msm_host->pm_qos_prev_cpu);
3526
3527 sdhci_msm_pm_qos_irq_vote(host);
3528
3529 cpu = get_cpu();
3530 put_cpu();
3531 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3532 if (group < 0)
3533 return;
3534
3535 if (group != prev_group && prev_group >= 0) {
3536 sdhci_msm_pm_qos_cpu_unvote(host,
3537 msm_host->pm_qos_prev_cpu, false);
3538 prev_group = -1; /* make sure to vote for new group */
3539 }
3540
3541 if (prev_group < 0) {
3542 sdhci_msm_pm_qos_cpu_vote(host,
3543 msm_host->pdata->pm_qos_data.latency, cpu);
3544 msm_host->pm_qos_prev_cpu = cpu;
3545 }
3546}
3547
3548static void sdhci_msm_post_req(struct sdhci_host *host,
3549 struct mmc_request *mmc_req)
3550{
3551 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3552 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3553
3554 sdhci_msm_pm_qos_irq_unvote(host, false);
3555
3556 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3557 msm_host->pm_qos_prev_cpu = -1;
3558}
3559
3560static void sdhci_msm_init(struct sdhci_host *host)
3561{
3562 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3563 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3564
3565 sdhci_msm_pm_qos_irq_init(host);
3566
3567 if (msm_host->pdata->pm_qos_data.legacy_valid)
3568 sdhci_msm_pm_qos_cpu_init(host,
3569 msm_host->pdata->pm_qos_data.latency);
3570}
3571
Asutosh Das0ef24812012-12-18 16:14:02 +05303572static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303573 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303574 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003575 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303576 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003577 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303578 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303579 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303580 .get_min_clock = sdhci_msm_get_min_clock,
3581 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303582 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303583 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303584 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003585 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003586 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003587 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303588 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Dov Levenglick9c575e22015-07-20 09:30:52 +03003589 .detect = sdhci_msm_detect,
Pavan Anamula691dd592015-08-25 16:11:20 +05303590 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003591 .init = sdhci_msm_init,
3592 .pre_req = sdhci_msm_pre_req,
3593 .post_req = sdhci_msm_post_req,
Asutosh Das0ef24812012-12-18 16:14:02 +05303594};
3595
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303596static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3597 struct sdhci_host *host)
3598{
Krishna Konda46fd1432014-10-30 21:13:27 -07003599 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303600 u16 minor;
3601 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303602 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303603
3604 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3605 major = (version & CORE_VERSION_MAJOR_MASK) >>
3606 CORE_VERSION_MAJOR_SHIFT;
3607 minor = version & CORE_VERSION_TARGET_MASK;
3608
Krishna Konda46fd1432014-10-30 21:13:27 -07003609 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3610
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303611 /*
3612 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003613 * controller won't advertise 3.0v, 1.8v and 8-bit features
3614 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303615 */
3616 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003617 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003618 /*
3619 * Enable 1.8V support capability on controllers that
3620 * support dual voltage
3621 */
3622 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003623 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3624 caps |= CORE_3_0V_SUPPORT;
3625 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003626 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303627 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3628 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303629 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003630
3631 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303632 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3633 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3634 */
3635 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303636 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303637 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3638 writel_relaxed((val | CORE_ONE_MID_EN),
3639 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3640 }
3641 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003642 * SDCC 5 controller with major version 1, minor version 0x34 and later
3643 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3644 */
3645 if ((major == 1) && (minor < 0x34))
3646 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003647
3648 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003649 * SDCC 5 controller with major version 1, minor version 0x42 and later
3650 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303651 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003652 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303653 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003654 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303655 msm_host->enhanced_strobe = true;
3656 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003657
3658 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003659 * SDCC 5 controller with major version 1 and minor version 0x42,
3660 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3661 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303662 * when MCLK is gated OFF, it is not gated for less than 0.5us
3663 * and MCLK must be switched on for at-least 1us before DATA
3664 * starts coming.
3665 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003666 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3667 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303668 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003669
Pavan Anamula5a256df2015-10-16 14:38:28 +05303670 /* Fake 3.0V support for SDIO devices which requires such voltage */
3671 if (msm_host->pdata->core_3_0v_support) {
3672 caps |= CORE_3_0V_SUPPORT;
3673 writel_relaxed(
3674 (readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
3675 caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
3676 }
3677
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003678 if ((major == 1) && (minor >= 0x49))
3679 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303680 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003681 * Mask 64-bit support for controller with 32-bit address bus so that
3682 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003683 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003684 if (!msm_host->pdata->largeaddressbus)
3685 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3686
Gilad Broner2a10ca02014-10-02 17:20:35 +03003687 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003688 /* keep track of the value in SDHCI_CAPABILITIES */
3689 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303690}
3691
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003692#ifdef CONFIG_MMC_CQ_HCI
3693static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3694 struct platform_device *pdev)
3695{
3696 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3697 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3698
3699 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003700 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003701 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3702 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003703 host->cq_host = NULL;
3704 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003705 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003706 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003707}
3708#else
3709static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3710 struct platform_device *pdev)
3711{
3712
3713}
3714#endif
3715
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003716static bool sdhci_msm_is_bootdevice(struct device *dev)
3717{
3718 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3719 strlen(saved_command_line))) {
3720 char search_string[50];
3721
3722 snprintf(search_string, ARRAY_SIZE(search_string),
3723 "androidboot.bootdevice=%s", dev_name(dev));
3724 if (strnstr(saved_command_line, search_string,
3725 strlen(saved_command_line)))
3726 return true;
3727 else
3728 return false;
3729 }
3730
3731 /*
3732 * "androidboot.bootdevice=" argument is not present then
3733 * return true as we don't know the boot device anyways.
3734 */
3735 return true;
3736}
3737
Asutosh Das0ef24812012-12-18 16:14:02 +05303738static int sdhci_msm_probe(struct platform_device *pdev)
3739{
3740 struct sdhci_host *host;
3741 struct sdhci_pltfm_host *pltfm_host;
3742 struct sdhci_msm_host *msm_host;
3743 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003744 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003745 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003746 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05303747 struct resource *tlmm_memres = NULL;
3748 void __iomem *tlmm_mem;
Asutosh Das0ef24812012-12-18 16:14:02 +05303749
3750 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3751 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3752 GFP_KERNEL);
3753 if (!msm_host) {
3754 ret = -ENOMEM;
3755 goto out;
3756 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303757
3758 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3759 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3760 if (IS_ERR(host)) {
3761 ret = PTR_ERR(host);
3762 goto out;
3763 }
3764
3765 pltfm_host = sdhci_priv(host);
3766 pltfm_host->priv = msm_host;
3767 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303768 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303769
3770 /* Extract platform data */
3771 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003772 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
3773 if (ret < 0) {
3774 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3775 ret);
3776 goto pltfm_free;
3777 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003778
3779 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003780 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3781 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003782 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003783 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003784
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003785 if (disable_slots & (1 << (ret - 1))) {
3786 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3787 ret);
3788 ret = -ENODEV;
3789 goto pltfm_free;
3790 }
3791
Venkat Gopalakrishnan976e8cb2015-10-23 16:46:29 -07003792 if (ret <= 2) {
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003793 sdhci_slot[ret-1] = msm_host;
Venkat Gopalakrishnan976e8cb2015-10-23 16:46:29 -07003794 host->slot_no = ret;
3795 }
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003796
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003797 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3798 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303799 if (!msm_host->pdata) {
3800 dev_err(&pdev->dev, "DT parsing error\n");
3801 goto pltfm_free;
3802 }
3803 } else {
3804 dev_err(&pdev->dev, "No device tree node\n");
3805 goto pltfm_free;
3806 }
3807
3808 /* Setup Clocks */
3809
3810 /* Setup SDCC bus voter clock. */
3811 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3812 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3813 /* Vote for max. clk rate for max. performance */
3814 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3815 if (ret)
3816 goto pltfm_free;
3817 ret = clk_prepare_enable(msm_host->bus_clk);
3818 if (ret)
3819 goto pltfm_free;
3820 }
3821
3822 /* Setup main peripheral bus clock */
3823 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3824 if (!IS_ERR(msm_host->pclk)) {
3825 ret = clk_prepare_enable(msm_host->pclk);
3826 if (ret)
3827 goto bus_clk_disable;
3828 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303829 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303830
3831 /* Setup SDC MMC clock */
3832 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3833 if (IS_ERR(msm_host->clk)) {
3834 ret = PTR_ERR(msm_host->clk);
3835 goto pclk_disable;
3836 }
3837
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303838 /* Set to the minimum supported clock frequency */
3839 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3840 if (ret) {
3841 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303842 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303843 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303844 ret = clk_prepare_enable(msm_host->clk);
3845 if (ret)
3846 goto pclk_disable;
3847
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303848 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303849 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303850
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003851 /* Setup CDC calibration fixed feedback clock */
3852 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3853 if (!IS_ERR(msm_host->ff_clk)) {
3854 ret = clk_prepare_enable(msm_host->ff_clk);
3855 if (ret)
3856 goto clk_disable;
3857 }
3858
3859 /* Setup CDC calibration sleep clock */
3860 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3861 if (!IS_ERR(msm_host->sleep_clk)) {
3862 ret = clk_prepare_enable(msm_host->sleep_clk);
3863 if (ret)
3864 goto ff_clk_disable;
3865 }
3866
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003867 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3868
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303869 ret = sdhci_msm_bus_register(msm_host, pdev);
3870 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003871 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303872
3873 if (msm_host->msm_bus_vote.client_handle)
3874 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3875 sdhci_msm_bus_work);
3876 sdhci_msm_bus_voting(host, 1);
3877
Asutosh Das0ef24812012-12-18 16:14:02 +05303878 /* Setup regulators */
3879 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3880 if (ret) {
3881 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303882 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303883 }
3884
3885 /* Reset the core and Enable SDHC mode */
3886 core_memres = platform_get_resource_byname(pdev,
3887 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303888 if (!core_memres) {
3889 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3890 goto vreg_deinit;
3891 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303892 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3893 resource_size(core_memres));
3894
3895 if (!msm_host->core_mem) {
3896 dev_err(&pdev->dev, "Failed to remap registers\n");
3897 ret = -ENOMEM;
3898 goto vreg_deinit;
3899 }
3900
Sahitya Tummala079ed852015-10-29 20:18:45 +05303901 tlmm_memres = platform_get_resource_byname(pdev,
3902 IORESOURCE_MEM, "tlmm_mem");
3903 if (tlmm_memres) {
3904 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
3905 resource_size(tlmm_memres));
3906
3907 if (!tlmm_mem) {
3908 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
3909 ret = -ENOMEM;
3910 goto vreg_deinit;
3911 }
3912 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
3913 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
3914 &tlmm_memres->start, readl_relaxed(tlmm_mem));
3915 }
3916
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303917 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003918 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303919 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003920 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3921 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303922
Asutosh Das0ef24812012-12-18 16:14:02 +05303923 /* Set HC_MODE_EN bit in HC_MODE register */
3924 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3925
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003926 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3927 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3928 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3929
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303930 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003931
3932 /*
3933 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3934 * be used as required later on.
3935 */
3936 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3937 CORE_IO_PAD_PWR_SWITCH_EN),
3938 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303939 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303940 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3941 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3942 * interrupt in GIC (by registering the interrupt handler), we need to
3943 * ensure that any pending power irq interrupt status is acknowledged
3944 * otherwise power irq interrupt handler would be fired prematurely.
3945 */
3946 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
3947 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
3948 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
3949 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
3950 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
3951 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
3952 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
3953 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07003954
Subhash Jadavani28137342013-05-14 17:46:43 +05303955 /*
3956 * Ensure that above writes are propogated before interrupt enablement
3957 * in GIC.
3958 */
3959 mb();
3960
3961 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05303962 * Following are the deviations from SDHC spec v3.0 -
3963 * 1. Card detection is handled using separate GPIO.
3964 * 2. Bus power control is handled by interacting with PMIC.
3965 */
3966 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
3967 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303968 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03003969 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303970 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05303971 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05303972 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05303973 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das0ef24812012-12-18 16:14:02 +05303974
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05303975 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
3976 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
3977
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003978 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003979 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
3980 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
3981 SDHCI_VENDOR_VER_SHIFT));
3982 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
3983 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
3984 /*
3985 * Add 40us delay in interrupt handler when
3986 * operating at initialization frequency(400KHz).
3987 */
3988 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
3989 /*
3990 * Set Software Reset for DAT line in Software
3991 * Reset Register (Bit 2).
3992 */
3993 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
3994 }
3995
Asutosh Das214b9662013-06-13 14:27:42 +05303996 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
3997
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003998 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003999 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4000 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304001 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004002 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304003 goto vreg_deinit;
4004 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004005 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304006 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004007 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304008 if (ret) {
4009 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004010 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304011 goto vreg_deinit;
4012 }
4013
4014 /* Enable pwr irq interrupts */
4015 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
4016
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304017#ifdef CONFIG_MMC_CLKGATE
4018 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4019 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4020#endif
4021
Asutosh Das0ef24812012-12-18 16:14:02 +05304022 /* Set host capabilities */
4023 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4024 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004025 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304026 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304027 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004028 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4029 msm_host->mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
Subhash Jadavani6d472b22013-05-29 15:52:10 +05304030 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004031 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004032 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304033 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004034 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004035 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Asutosh Das0ef24812012-12-18 16:14:02 +05304036
4037 if (msm_host->pdata->nonremovable)
4038 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4039
Guoping Yuf7c91332014-08-20 16:56:18 +08004040 if (msm_host->pdata->nonhotplug)
4041 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4042
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304043 init_completion(&msm_host->pwr_irq_completion);
4044
Sahitya Tummala581df132013-03-12 14:57:46 +05304045 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304046 /*
4047 * Set up the card detect GPIO in active configuration before
4048 * configuring it as an IRQ. Otherwise, it can be in some
4049 * weird/inconsistent state resulting in flood of interrupts.
4050 */
4051 sdhci_msm_setup_pins(msm_host->pdata, true);
4052
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304053 /*
4054 * This delay is needed for stabilizing the card detect GPIO
4055 * line after changing the pull configs.
4056 */
4057 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304058 ret = mmc_gpio_request_cd(msm_host->mmc,
4059 msm_host->pdata->status_gpio, 0);
4060 if (ret) {
4061 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4062 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304063 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304064 }
4065 }
4066
Krishna Konda7feab352013-09-17 23:55:40 -07004067 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4068 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4069 host->dma_mask = DMA_BIT_MASK(64);
4070 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304071 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004072 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304073 host->dma_mask = DMA_BIT_MASK(32);
4074 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304075 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304076 } else {
4077 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4078 }
4079
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004080 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304081 ret = sdhci_add_host(host);
4082 if (ret) {
4083 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304084 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304085 }
4086
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004087 pm_runtime_set_active(&pdev->dev);
4088 pm_runtime_enable(&pdev->dev);
4089 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4090 pm_runtime_use_autosuspend(&pdev->dev);
4091
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304092 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4093 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4094 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4095 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4096 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4097 ret = device_create_file(&pdev->dev,
4098 &msm_host->msm_bus_vote.max_bus_bw);
4099 if (ret)
4100 goto remove_host;
4101
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304102 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4103 msm_host->polling.show = show_polling;
4104 msm_host->polling.store = store_polling;
4105 sysfs_attr_init(&msm_host->polling.attr);
4106 msm_host->polling.attr.name = "polling";
4107 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4108 ret = device_create_file(&pdev->dev, &msm_host->polling);
4109 if (ret)
4110 goto remove_max_bus_bw_file;
4111 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304112
4113 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4114 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4115 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4116 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4117 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4118 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4119 if (ret) {
4120 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4121 mmc_hostname(host->mmc), __func__, ret);
4122 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4123 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304124 /* Successful initialization */
4125 goto out;
4126
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304127remove_max_bus_bw_file:
4128 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304129remove_host:
4130 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004131 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304132 sdhci_remove_host(host, dead);
4133vreg_deinit:
4134 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304135bus_unregister:
4136 if (msm_host->msm_bus_vote.client_handle)
4137 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4138 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004139sleep_clk_disable:
4140 if (!IS_ERR(msm_host->sleep_clk))
4141 clk_disable_unprepare(msm_host->sleep_clk);
4142ff_clk_disable:
4143 if (!IS_ERR(msm_host->ff_clk))
4144 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304145clk_disable:
4146 if (!IS_ERR(msm_host->clk))
4147 clk_disable_unprepare(msm_host->clk);
4148pclk_disable:
4149 if (!IS_ERR(msm_host->pclk))
4150 clk_disable_unprepare(msm_host->pclk);
4151bus_clk_disable:
4152 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4153 clk_disable_unprepare(msm_host->bus_clk);
4154pltfm_free:
4155 sdhci_pltfm_free(pdev);
4156out:
4157 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4158 return ret;
4159}
4160
4161static int sdhci_msm_remove(struct platform_device *pdev)
4162{
4163 struct sdhci_host *host = platform_get_drvdata(pdev);
4164 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4165 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4166 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4167 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4168 0xffffffff);
4169
4170 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304171 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4172 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304173 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004174 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304175 sdhci_remove_host(host, dead);
4176 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304177
Asutosh Das0ef24812012-12-18 16:14:02 +05304178 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304179
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304180 sdhci_msm_setup_pins(pdata, true);
4181 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304182
4183 if (msm_host->msm_bus_vote.client_handle) {
4184 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4185 sdhci_msm_bus_unregister(msm_host);
4186 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304187 return 0;
4188}
4189
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004190#ifdef CONFIG_PM
4191static int sdhci_msm_runtime_suspend(struct device *dev)
4192{
4193 struct sdhci_host *host = dev_get_drvdata(dev);
4194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4195 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004196 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004197
Pavan Anamula45ef1372015-10-29 23:22:12 +05304198 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4199 if (mmc_enable_qca6574_settings(host->mmc->card) ||
4200 mmc_enable_qca9377_settings(host->mmc->card))
4201 return 0;
4202 }
4203
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004204 disable_irq(host->irq);
4205 disable_irq(msm_host->pwr_irq);
4206
4207 /*
4208 * Remove the vote immediately only if clocks are off in which
4209 * case we might have queued work to remove vote but it may not
4210 * be completed before runtime suspend or system suspend.
4211 */
4212 if (!atomic_read(&msm_host->clks_on)) {
4213 if (msm_host->msm_bus_vote.client_handle)
4214 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4215 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004216 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4217 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004218
4219 return 0;
4220}
4221
4222static int sdhci_msm_runtime_resume(struct device *dev)
4223{
4224 struct sdhci_host *host = dev_get_drvdata(dev);
4225 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4226 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004227 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004228
Pavan Anamula45ef1372015-10-29 23:22:12 +05304229 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4230 if (mmc_enable_qca6574_settings(host->mmc->card) ||
4231 mmc_enable_qca9377_settings(host->mmc->card))
4232 return 0;
4233 }
4234
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004235 enable_irq(msm_host->pwr_irq);
4236 enable_irq(host->irq);
4237
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004238 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4239 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004240 return 0;
4241}
4242
4243static int sdhci_msm_suspend(struct device *dev)
4244{
4245 struct sdhci_host *host = dev_get_drvdata(dev);
4246 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4247 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004248 int ret = 0;
4249 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004250
4251 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4252 (msm_host->mmc->slot.cd_irq >= 0))
4253 disable_irq(msm_host->mmc->slot.cd_irq);
4254
4255 if (pm_runtime_suspended(dev)) {
4256 pr_debug("%s: %s: already runtime suspended\n",
4257 mmc_hostname(host->mmc), __func__);
4258 goto out;
4259 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004260 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004261out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004262 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4263 ktime_to_us(ktime_sub(ktime_get(), start)));
4264 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004265}
4266
4267static int sdhci_msm_resume(struct device *dev)
4268{
4269 struct sdhci_host *host = dev_get_drvdata(dev);
4270 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4271 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4272 int ret = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004273 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004274
4275 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4276 (msm_host->mmc->slot.cd_irq >= 0))
4277 enable_irq(msm_host->mmc->slot.cd_irq);
4278
4279 if (pm_runtime_suspended(dev)) {
4280 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4281 mmc_hostname(host->mmc), __func__);
4282 goto out;
4283 }
4284
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004285 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004286out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004287 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4288 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004289 return ret;
4290}
4291
4292static const struct dev_pm_ops sdhci_msm_pmops = {
4293 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4294 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4295 NULL)
4296};
4297
4298#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4299
4300#else
4301#define SDHCI_MSM_PMOPS NULL
4302#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304303static const struct of_device_id sdhci_msm_dt_match[] = {
4304 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004305 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304306};
4307MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4308
4309static struct platform_driver sdhci_msm_driver = {
4310 .probe = sdhci_msm_probe,
4311 .remove = sdhci_msm_remove,
4312 .driver = {
4313 .name = "sdhci_msm",
4314 .owner = THIS_MODULE,
4315 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004316 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304317 },
4318};
4319
4320module_platform_driver(sdhci_msm_driver);
4321
4322MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4323MODULE_LICENSE("GPL v2");