blob: 3fa3cd378ee06d05482a9ccda90a45789cdfc0bd [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -08005 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080046#define CORE_POWER 0x0
47#define CORE_SW_RST (1 << 7)
48
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070049#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_MCI_DATA_CNT 0x30
51#define CORE_MCI_STATUS 0x34
52#define CORE_MCI_FIFO_CNT 0x44
53
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
62#define CORE_GENERICS 0x70
63#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Sahitya Tummala67717bc2013-08-02 09:21:37 +053072#define CORE_MCI_VERSION 0x050
73#define CORE_TESTBUS_CONFIG 0x0CC
74#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080075#define CORE_TESTBUS_SEL2_BIT 4
76#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053077
Asutosh Das0ef24812012-12-18 16:14:02 +053078#define CORE_PWRCTL_STATUS 0xDC
79#define CORE_PWRCTL_MASK 0xE0
80#define CORE_PWRCTL_CLEAR 0xE4
81#define CORE_PWRCTL_CTL 0xE8
82
83#define CORE_PWRCTL_BUS_OFF 0x01
84#define CORE_PWRCTL_BUS_ON (1 << 1)
85#define CORE_PWRCTL_IO_LOW (1 << 2)
86#define CORE_PWRCTL_IO_HIGH (1 << 3)
87
88#define CORE_PWRCTL_BUS_SUCCESS 0x01
89#define CORE_PWRCTL_BUS_FAIL (1 << 1)
90#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
91#define CORE_PWRCTL_IO_FAIL (1 << 3)
92
93#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094#define MAX_PHASES 16
95
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070096#define CORE_DLL_CONFIG 0x100
97#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070098#define CORE_DLL_EN (1 << 16)
99#define CORE_CDR_EN (1 << 17)
100#define CORE_CK_OUT_EN (1 << 18)
101#define CORE_CDR_EXT_EN (1 << 19)
102#define CORE_DLL_PDN (1 << 29)
103#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700104
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700106#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700107#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700108
109#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700110#define CORE_CLK_PWRSAVE (1 << 1)
111#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
112#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
113#define CORE_HC_MCLK_SEL_MASK (3 << 8)
114#define CORE_HC_AUTO_CMD21_EN (1 << 6)
115#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700116#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700117#define CORE_HC_SELECT_IN_EN (1 << 18)
118#define CORE_HC_SELECT_IN_HS400 (6 << 19)
119#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700120#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700121
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800122#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
123#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
124
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530125#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530126#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
127#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530128#define CORE_ONE_MID_EN (1 << 25)
129
Krishna Konda7feab352013-09-17 23:55:40 -0700130#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530131#define CORE_8_BIT_SUPPORT (1 << 18)
132#define CORE_3_3V_SUPPORT (1 << 24)
133#define CORE_3_0V_SUPPORT (1 << 25)
134#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300135#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700136
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800137#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_CTLR_CFG0 0x130
140#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
141#define CORE_HW_AUTOCAL_ENA (1 << 17)
142
143#define CORE_CSR_CDC_CTLR_CFG1 0x134
144#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
145#define CORE_TIMER_ENA (1 << 16)
146
147#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
148#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
149#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
150#define CORE_CDC_OFFSET_CFG 0x14C
151#define CORE_CSR_CDC_DELAY_CFG 0x150
152#define CORE_CDC_SLAVE_DDA_CFG 0x160
153#define CORE_CSR_CDC_STATUS0 0x164
154#define CORE_CALIBRATION_DONE (1 << 0)
155
156#define CORE_CDC_ERROR_CODE_MASK 0x7000000
157
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300158#define CQ_CMD_DBG_RAM 0x110
159#define CQ_CMD_DBG_RAM_WA 0x150
160#define CQ_CMD_DBG_RAM_OL 0x154
161
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700162#define CORE_CSR_CDC_GEN_CFG 0x178
163#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
164#define CORE_CDC_SWITCH_RC_EN (1 << 1)
165
166#define CORE_DDR_200_CFG 0x184
167#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530168#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700169#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530170
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700171#define CORE_VENDOR_SPEC3 0x1B0
172#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530173#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700174
175#define CORE_DLL_CONFIG_2 0x1B4
176#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800177#define CORE_FLL_CYCLE_CNT (1 << 18)
178#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700179
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530180#define CORE_DDR_CONFIG 0x1B8
181#define DDR_CONFIG_POR_VAL 0x80040853
182#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
183#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700184#define CORE_DDR_CONFIG_2 0x1BC
185#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700186
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700187/* 512 descriptors */
188#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530189#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530190
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700191#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800192#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700193
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700194#define INVALID_TUNING_PHASE -1
195
Krishna Konda96e6b112013-10-28 15:25:03 -0700196#define NUM_TUNING_PHASES 16
197#define MAX_DRV_TYPES_SUPPORTED_HS200 3
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200198#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700199
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700200static const u32 tuning_block_64[] = {
201 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
202 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
203 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
204 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
205};
206
207static const u32 tuning_block_128[] = {
208 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
209 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
210 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
211 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
212 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
213 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
214 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
215 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
216};
Asutosh Das0ef24812012-12-18 16:14:02 +0530217
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700218/* global to hold each slot instance for debug */
219static struct sdhci_msm_host *sdhci_slot[2];
220
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700221static int disable_slots;
222/* root can write, others read */
223module_param(disable_slots, int, S_IRUGO|S_IWUSR);
224
Asutosh Das0ef24812012-12-18 16:14:02 +0530225enum vdd_io_level {
226 /* set vdd_io_data->low_vol_level */
227 VDD_IO_LOW,
228 /* set vdd_io_data->high_vol_level */
229 VDD_IO_HIGH,
230 /*
231 * set whatever there in voltage_level (third argument) of
232 * sdhci_msm_set_vdd_io_vol() function.
233 */
234 VDD_IO_SET_LEVEL,
235};
236
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700237/* MSM platform specific tuning */
238static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
239 u8 poll)
240{
241 int rc = 0;
242 u32 wait_cnt = 50;
243 u8 ck_out_en = 0;
244 struct mmc_host *mmc = host->mmc;
245
246 /* poll for CK_OUT_EN bit. max. poll time = 50us */
247 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
248 CORE_CK_OUT_EN);
249
250 while (ck_out_en != poll) {
251 if (--wait_cnt == 0) {
252 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
253 mmc_hostname(mmc), __func__, poll);
254 rc = -ETIMEDOUT;
255 goto out;
256 }
257 udelay(1);
258
259 ck_out_en = !!(readl_relaxed(host->ioaddr +
260 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
261 }
262out:
263 return rc;
264}
265
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530266/*
267 * Enable CDR to track changes of DAT lines and adjust sampling
268 * point according to voltage/temperature variations
269 */
270static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
271{
272 int rc = 0;
273 u32 config;
274
275 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
276 config |= CORE_CDR_EN;
277 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
278 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
279
280 rc = msm_dll_poll_ck_out_en(host, 0);
281 if (rc)
282 goto err;
283
284 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
285 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
286
287 rc = msm_dll_poll_ck_out_en(host, 1);
288 if (rc)
289 goto err;
290 goto out;
291err:
292 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
293out:
294 return rc;
295}
296
297static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
298 *attr, const char *buf, size_t count)
299{
300 struct sdhci_host *host = dev_get_drvdata(dev);
301 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
302 struct sdhci_msm_host *msm_host = pltfm_host->priv;
303 u32 tmp;
304 unsigned long flags;
305
306 if (!kstrtou32(buf, 0, &tmp)) {
307 spin_lock_irqsave(&host->lock, flags);
308 msm_host->en_auto_cmd21 = !!tmp;
309 spin_unlock_irqrestore(&host->lock, flags);
310 }
311 return count;
312}
313
314static ssize_t show_auto_cmd21(struct device *dev,
315 struct device_attribute *attr, char *buf)
316{
317 struct sdhci_host *host = dev_get_drvdata(dev);
318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
319 struct sdhci_msm_host *msm_host = pltfm_host->priv;
320
321 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
322}
323
324/* MSM auto-tuning handler */
325static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
326 bool enable,
327 u32 type)
328{
329 int rc = 0;
330 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
331 struct sdhci_msm_host *msm_host = pltfm_host->priv;
332 u32 val = 0;
333
334 if (!msm_host->en_auto_cmd21)
335 return 0;
336
337 if (type == MMC_SEND_TUNING_BLOCK_HS200)
338 val = CORE_HC_AUTO_CMD21_EN;
339 else
340 return 0;
341
342 if (enable) {
343 rc = msm_enable_cdr_cm_sdc4_dll(host);
344 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
345 val, host->ioaddr + CORE_VENDOR_SPEC);
346 } else {
347 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
348 ~val, host->ioaddr + CORE_VENDOR_SPEC);
349 }
350 return rc;
351}
352
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700353static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
354{
355 int rc = 0;
356 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
357 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
358 0x8};
359 unsigned long flags;
360 u32 config;
361 struct mmc_host *mmc = host->mmc;
362
363 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
364 spin_lock_irqsave(&host->lock, flags);
365
366 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
367 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
368 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
369 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
370
371 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
372 rc = msm_dll_poll_ck_out_en(host, 0);
373 if (rc)
374 goto err_out;
375
376 /*
377 * Write the selected DLL clock output phase (0 ... 15)
378 * to CDR_SELEXT bit field of DLL_CONFIG register.
379 */
380 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
381 & ~(0xF << 20))
382 | (grey_coded_phase_table[phase] << 20)),
383 host->ioaddr + CORE_DLL_CONFIG);
384
385 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
386 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
387 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
388
389 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
390 rc = msm_dll_poll_ck_out_en(host, 1);
391 if (rc)
392 goto err_out;
393
394 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
395 config |= CORE_CDR_EN;
396 config &= ~CORE_CDR_EXT_EN;
397 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
398 goto out;
399
400err_out:
401 pr_err("%s: %s: Failed to set DLL phase: %d\n",
402 mmc_hostname(mmc), __func__, phase);
403out:
404 spin_unlock_irqrestore(&host->lock, flags);
405 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
406 return rc;
407}
408
409/*
410 * Find out the greatest range of consecuitive selected
411 * DLL clock output phases that can be used as sampling
412 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700413 * timing mode) or for eMMC4.5 card read operation (in
414 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700415 * Select the 3/4 of the range and configure the DLL with the
416 * selected DLL clock output phase.
417 */
418
419static int msm_find_most_appropriate_phase(struct sdhci_host *host,
420 u8 *phase_table, u8 total_phases)
421{
422 int ret;
423 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
424 u8 phases_per_row[MAX_PHASES] = {0};
425 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
426 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
427 bool phase_0_found = false, phase_15_found = false;
428 struct mmc_host *mmc = host->mmc;
429
430 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
431 if (!total_phases || (total_phases > MAX_PHASES)) {
432 pr_err("%s: %s: invalid argument: total_phases=%d\n",
433 mmc_hostname(mmc), __func__, total_phases);
434 return -EINVAL;
435 }
436
437 for (cnt = 0; cnt < total_phases; cnt++) {
438 ranges[row_index][col_index] = phase_table[cnt];
439 phases_per_row[row_index] += 1;
440 col_index++;
441
442 if ((cnt + 1) == total_phases) {
443 continue;
444 /* check if next phase in phase_table is consecutive or not */
445 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
446 row_index++;
447 col_index = 0;
448 }
449 }
450
451 if (row_index >= MAX_PHASES)
452 return -EINVAL;
453
454 /* Check if phase-0 is present in first valid window? */
455 if (!ranges[0][0]) {
456 phase_0_found = true;
457 phase_0_raw_index = 0;
458 /* Check if cycle exist between 2 valid windows */
459 for (cnt = 1; cnt <= row_index; cnt++) {
460 if (phases_per_row[cnt]) {
461 for (i = 0; i < phases_per_row[cnt]; i++) {
462 if (ranges[cnt][i] == 15) {
463 phase_15_found = true;
464 phase_15_raw_index = cnt;
465 break;
466 }
467 }
468 }
469 }
470 }
471
472 /* If 2 valid windows form cycle then merge them as single window */
473 if (phase_0_found && phase_15_found) {
474 /* number of phases in raw where phase 0 is present */
475 u8 phases_0 = phases_per_row[phase_0_raw_index];
476 /* number of phases in raw where phase 15 is present */
477 u8 phases_15 = phases_per_row[phase_15_raw_index];
478
479 if (phases_0 + phases_15 >= MAX_PHASES)
480 /*
481 * If there are more than 1 phase windows then total
482 * number of phases in both the windows should not be
483 * more than or equal to MAX_PHASES.
484 */
485 return -EINVAL;
486
487 /* Merge 2 cyclic windows */
488 i = phases_15;
489 for (cnt = 0; cnt < phases_0; cnt++) {
490 ranges[phase_15_raw_index][i] =
491 ranges[phase_0_raw_index][cnt];
492 if (++i >= MAX_PHASES)
493 break;
494 }
495
496 phases_per_row[phase_0_raw_index] = 0;
497 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
498 }
499
500 for (cnt = 0; cnt <= row_index; cnt++) {
501 if (phases_per_row[cnt] > curr_max) {
502 curr_max = phases_per_row[cnt];
503 selected_row_index = cnt;
504 }
505 }
506
507 i = ((curr_max * 3) / 4);
508 if (i)
509 i--;
510
511 ret = (int)ranges[selected_row_index][i];
512
513 if (ret >= MAX_PHASES) {
514 ret = -EINVAL;
515 pr_err("%s: %s: invalid phase selected=%d\n",
516 mmc_hostname(mmc), __func__, ret);
517 }
518
519 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
520 return ret;
521}
522
523static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
524{
525 u32 mclk_freq = 0;
526
527 /* Program the MCLK value to MCLK_FREQ bit field */
528 if (host->clock <= 112000000)
529 mclk_freq = 0;
530 else if (host->clock <= 125000000)
531 mclk_freq = 1;
532 else if (host->clock <= 137000000)
533 mclk_freq = 2;
534 else if (host->clock <= 150000000)
535 mclk_freq = 3;
536 else if (host->clock <= 162000000)
537 mclk_freq = 4;
538 else if (host->clock <= 175000000)
539 mclk_freq = 5;
540 else if (host->clock <= 187000000)
541 mclk_freq = 6;
542 else if (host->clock <= 200000000)
543 mclk_freq = 7;
544
545 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
546 & ~(7 << 24)) | (mclk_freq << 24)),
547 host->ioaddr + CORE_DLL_CONFIG);
548}
549
550/* Initialize the DLL (Programmable Delay Line ) */
551static int msm_init_cm_dll(struct sdhci_host *host)
552{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
554 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700555 struct mmc_host *mmc = host->mmc;
556 int rc = 0;
557 unsigned long flags;
558 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530559 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700560
561 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
562 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530563 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
564 CORE_CLK_PWRSAVE);
565 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700566 /*
567 * Make sure that clock is always enabled when DLL
568 * tuning is in progress. Keeping PWRSAVE ON may
569 * turn off the clock. So let's disable the PWRSAVE
570 * here and re-enable it once tuning is completed.
571 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530572 if (prev_pwrsave) {
573 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
574 & ~CORE_CLK_PWRSAVE),
575 host->ioaddr + CORE_VENDOR_SPEC);
576 curr_pwrsave = false;
577 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700578
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800579 if (msm_host->use_updated_dll_reset) {
580 /* Disable the DLL clock */
581 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
582 & ~CORE_CK_OUT_EN),
583 host->ioaddr + CORE_DLL_CONFIG);
584
585 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
586 | CORE_DLL_CLOCK_DISABLE),
587 host->ioaddr + CORE_DLL_CONFIG_2);
588 }
589
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700590 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
591 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
592 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
593
594 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
595 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
596 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
597 msm_cm_dll_set_freq(host);
598
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800599 if (msm_host->use_updated_dll_reset) {
600 u32 mclk_freq = 0;
601
602 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
603 & CORE_FLL_CYCLE_CNT))
604 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
605 else
606 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
607
608 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
609 & ~(0xFF << 10)) | (mclk_freq << 10)),
610 host->ioaddr + CORE_DLL_CONFIG_2);
611 /* wait for 5us before enabling DLL clock */
612 udelay(5);
613 }
614
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700615 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
616 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
617 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
618
619 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
620 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
621 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
622
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800623 if (msm_host->use_updated_dll_reset) {
624 msm_cm_dll_set_freq(host);
625 /* Enable the DLL clock */
626 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
627 & ~CORE_DLL_CLOCK_DISABLE),
628 host->ioaddr + CORE_DLL_CONFIG_2);
629 }
630
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700631 /* Set DLL_EN bit to 1. */
632 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
633 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
634
635 /* Set CK_OUT_EN bit to 1. */
636 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
637 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
638
639 wait_cnt = 50;
640 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
641 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
642 CORE_DLL_LOCK)) {
643 /* max. wait for 50us sec for LOCK bit to be set */
644 if (--wait_cnt == 0) {
645 pr_err("%s: %s: DLL failed to LOCK\n",
646 mmc_hostname(mmc), __func__);
647 rc = -ETIMEDOUT;
648 goto out;
649 }
650 /* wait for 1us before polling again */
651 udelay(1);
652 }
653
654out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530655 /* Restore the correct PWRSAVE state */
656 if (prev_pwrsave ^ curr_pwrsave) {
657 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
658
659 if (prev_pwrsave)
660 reg |= CORE_CLK_PWRSAVE;
661 else
662 reg &= ~CORE_CLK_PWRSAVE;
663
664 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
665 }
666
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700667 spin_unlock_irqrestore(&host->lock, flags);
668 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
669 return rc;
670}
671
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700672static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
673{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700674 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700675 int ret = 0;
676 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700677
678 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
679
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700680 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
681 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
682 & ~CORE_CDC_T4_DLY_SEL),
683 host->ioaddr + CORE_DDR_200_CFG);
684
685 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
686 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
687 & ~CORE_CDC_SWITCH_BYPASS_OFF),
688 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
689
690 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
691 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
692 | CORE_CDC_SWITCH_RC_EN),
693 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
694
695 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
696 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
697 & ~CORE_START_CDC_TRAFFIC),
698 host->ioaddr + CORE_DDR_200_CFG);
699
700 /*
701 * Perform CDC Register Initialization Sequence
702 *
703 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
704 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
705 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
706 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
707 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
708 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
709 * CORE_CSR_CDC_DELAY_CFG 0x3AC
710 * CORE_CDC_OFFSET_CFG 0x0
711 * CORE_CDC_SLAVE_DDA_CFG 0x16334
712 */
713
714 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
715 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
716 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
717 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
718 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
719 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700720 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700721 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
722 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
723
724 /* CDC HW Calibration */
725
726 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
727 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
728 | CORE_SW_TRIG_FULL_CALIB),
729 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
730
731 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
732 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
733 & ~CORE_SW_TRIG_FULL_CALIB),
734 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
735
736 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
737 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
738 | CORE_HW_AUTOCAL_ENA),
739 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
740
741 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
742 writel_relaxed((readl_relaxed(host->ioaddr +
743 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
744 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
745
746 mb();
747
748 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700749 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
750 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
751
752 if (ret == -ETIMEDOUT) {
753 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700754 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700755 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700756 }
757
758 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
759 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
760 & CORE_CDC_ERROR_CODE_MASK;
761 if (cdc_err) {
762 pr_err("%s: %s: CDC Error Code %d\n",
763 mmc_hostname(host->mmc), __func__, cdc_err);
764 ret = -EINVAL;
765 goto out;
766 }
767
768 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
769 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
770 | CORE_START_CDC_TRAFFIC),
771 host->ioaddr + CORE_DDR_200_CFG);
772out:
773 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
774 __func__, ret);
775 return ret;
776}
777
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700778static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
779{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
781 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530782 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700783 int ret = 0;
784
785 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
786
787 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530788 * Reprogramming the value in case it might have been modified by
789 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700790 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700791 if (msm_host->rclk_delay_fix) {
792 writel_relaxed(DDR_CONFIG_2_POR_VAL,
793 host->ioaddr + CORE_DDR_CONFIG_2);
794 } else {
795 ddr_config = DDR_CONFIG_POR_VAL &
796 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
797 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
798 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
799 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700800
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530801 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530802 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
803 | CORE_CMDIN_RCLK_EN),
804 host->ioaddr + CORE_DDR_200_CFG);
805
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700806 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
807 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
808 | CORE_DDR_CAL_EN),
809 host->ioaddr + CORE_DLL_CONFIG_2);
810
811 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
812 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
813 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
814
815 if (ret == -ETIMEDOUT) {
816 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
817 mmc_hostname(host->mmc), __func__);
818 goto out;
819 }
820
Ritesh Harjani764065e2015-05-13 14:14:45 +0530821 /*
822 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
823 * when MCLK is gated OFF, it is not gated for less than 0.5us
824 * and MCLK must be switched on for at-least 1us before DATA
825 * starts coming. Controllers with 14lpp tech DLL cannot
826 * guarantee above requirement. So PWRSAVE_DLL should not be
827 * turned on for host controllers using this DLL.
828 */
829 if (!msm_host->use_14lpp_dll)
830 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
831 | CORE_PWRSAVE_DLL),
832 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700833 mb();
834out:
835 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
836 __func__, ret);
837 return ret;
838}
839
Ritesh Harjaniea709662015-05-27 15:40:24 +0530840static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
841{
842 int ret = 0;
843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
844 struct sdhci_msm_host *msm_host = pltfm_host->priv;
845 struct mmc_host *mmc = host->mmc;
846
847 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
848
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530849 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
850 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530851 mmc_hostname(mmc));
852 return -EINVAL;
853 }
854
855 if (msm_host->calibration_done ||
856 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
857 return 0;
858 }
859
860 /*
861 * Reset the tuning block.
862 */
863 ret = msm_init_cm_dll(host);
864 if (ret)
865 goto out;
866
867 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
868out:
869 if (!ret)
870 msm_host->calibration_done = true;
871 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
872 __func__, ret);
873 return ret;
874}
875
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700876static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
877{
878 int ret = 0;
879 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
880 struct sdhci_msm_host *msm_host = pltfm_host->priv;
881
882 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
883
884 /*
885 * Retuning in HS400 (DDR mode) will fail, just reset the
886 * tuning block and restore the saved tuning phase.
887 */
888 ret = msm_init_cm_dll(host);
889 if (ret)
890 goto out;
891
892 /* Set the selected phase in delay line hw block */
893 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
894 if (ret)
895 goto out;
896
Krishna Konda0e8efba2014-06-23 14:50:38 -0700897 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
898 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
899 | CORE_CMD_DAT_TRACK_SEL),
900 host->ioaddr + CORE_DLL_CONFIG);
901
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700902 if (msm_host->use_cdclp533)
903 /* Calibrate CDCLP533 DLL HW */
904 ret = sdhci_msm_cdclp533_calibration(host);
905 else
906 /* Calibrate CM_DLL_SDC4 HW */
907 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
908out:
909 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
910 __func__, ret);
911 return ret;
912}
913
Krishna Konda96e6b112013-10-28 15:25:03 -0700914static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
915 u8 drv_type)
916{
917 struct mmc_command cmd = {0};
918 struct mmc_request mrq = {NULL};
919 struct mmc_host *mmc = host->mmc;
920 u8 val = ((drv_type << 4) | 2);
921
922 cmd.opcode = MMC_SWITCH;
923 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
924 (EXT_CSD_HS_TIMING << 16) |
925 (val << 8) |
926 EXT_CSD_CMD_SET_NORMAL;
927 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
928 /* 1 sec */
929 cmd.busy_timeout = 1000 * 1000;
930
931 memset(cmd.resp, 0, sizeof(cmd.resp));
932 cmd.retries = 3;
933
934 mrq.cmd = &cmd;
935 cmd.data = NULL;
936
937 mmc_wait_for_req(mmc, &mrq);
938 pr_debug("%s: %s: set card drive type to %d\n",
939 mmc_hostname(mmc), __func__,
940 drv_type);
941}
942
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700943int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
944{
945 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530946 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700947 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700948 const u32 *tuning_block_pattern = tuning_block_64;
949 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
950 int rc;
951 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530952 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700955 u8 drv_type = 0;
956 bool drv_type_changed = false;
957 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530958 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530959
960 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700961 * Tuning is required for SDR104, HS200 and HS400 cards and
962 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530963 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700964 if (host->clock <= CORE_FREQ_100MHZ ||
965 !((ios.timing == MMC_TIMING_MMC_HS400) ||
966 (ios.timing == MMC_TIMING_MMC_HS200) ||
967 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530968 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700969
970 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700971
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700972 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700973 if (msm_host->tuning_done && !msm_host->calibration_done &&
974 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700975 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700976 spin_lock_irqsave(&host->lock, flags);
977 if (!rc)
978 msm_host->calibration_done = true;
979 spin_unlock_irqrestore(&host->lock, flags);
980 goto out;
981 }
982
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700983 spin_lock_irqsave(&host->lock, flags);
984
985 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
986 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
987 tuning_block_pattern = tuning_block_128;
988 size = sizeof(tuning_block_128);
989 }
990 spin_unlock_irqrestore(&host->lock, flags);
991
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700992 data_buf = kmalloc(size, GFP_KERNEL);
993 if (!data_buf) {
994 rc = -ENOMEM;
995 goto out;
996 }
997
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530998retry:
Krishna Konda96e6b112013-10-28 15:25:03 -0700999 tuned_phase_cnt = 0;
1000
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301001 /* first of all reset the tuning block */
1002 rc = msm_init_cm_dll(host);
1003 if (rc)
1004 goto kfree;
1005
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001006 phase = 0;
1007 do {
1008 struct mmc_command cmd = {0};
1009 struct mmc_data data = {0};
1010 struct mmc_request mrq = {
1011 .cmd = &cmd,
1012 .data = &data
1013 };
1014 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301015 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001016
1017 /* set the phase in delay line hw block */
1018 rc = msm_config_cm_dll_phase(host, phase);
1019 if (rc)
1020 goto kfree;
1021
1022 cmd.opcode = opcode;
1023 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1024
1025 data.blksz = size;
1026 data.blocks = 1;
1027 data.flags = MMC_DATA_READ;
1028 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1029
1030 data.sg = &sg;
1031 data.sg_len = 1;
1032 sg_init_one(&sg, data_buf, size);
1033 memset(data_buf, 0, size);
1034 mmc_wait_for_req(mmc, &mrq);
1035
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301036 if (card && (cmd.error || data.error)) {
1037 sts_cmd.opcode = MMC_SEND_STATUS;
1038 sts_cmd.arg = card->rca << 16;
1039 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1040 sts_retry = 5;
1041 while (sts_retry) {
1042 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1043
1044 if (sts_cmd.error ||
1045 (R1_CURRENT_STATE(sts_cmd.resp[0])
1046 != R1_STATE_TRAN)) {
1047 sts_retry--;
1048 /*
1049 * wait for at least 146 MCLK cycles for
1050 * the card to move to TRANS state. As
1051 * the MCLK would be min 200MHz for
1052 * tuning, we need max 0.73us delay. To
1053 * be on safer side 1ms delay is given.
1054 */
1055 usleep_range(1000, 1200);
1056 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1057 mmc_hostname(mmc), phase,
1058 sts_cmd.error, sts_cmd.resp[0]);
1059 continue;
1060 }
1061 break;
1062 };
1063 }
1064
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001065 if (!cmd.error && !data.error &&
1066 !memcmp(data_buf, tuning_block_pattern, size)) {
1067 /* tuning is successful at this tuning point */
1068 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001069 pr_debug("%s: %s: found *** good *** phase = %d\n",
1070 mmc_hostname(mmc), __func__, phase);
1071 } else {
1072 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001073 mmc_hostname(mmc), __func__, phase);
1074 }
1075 } while (++phase < 16);
1076
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301077 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1078 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001079 /*
1080 * If all phases pass then its a problem. So change the card's
1081 * drive type to a different value, if supported and repeat
1082 * tuning until at least one phase fails. Then set the original
1083 * drive type back.
1084 *
1085 * If all the phases still pass after trying all possible
1086 * drive types, then one of those 16 phases will be picked.
1087 * This is no different from what was going on before the
1088 * modification to change drive type and retune.
1089 */
1090 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1091 tuned_phase_cnt);
1092
1093 /* set drive type to other value . default setting is 0x0 */
1094 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
1095 if (card->ext_csd.raw_driver_strength &
1096 (1 << drv_type)) {
1097 sdhci_msm_set_mmc_drv_type(host, opcode,
1098 drv_type);
1099 if (!drv_type_changed)
1100 drv_type_changed = true;
1101 goto retry;
1102 }
1103 }
1104 }
1105
1106 /* reset drive type to default (50 ohm) if changed */
1107 if (drv_type_changed)
1108 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1109
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001110 if (tuned_phase_cnt) {
1111 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1112 tuned_phase_cnt);
1113 if (rc < 0)
1114 goto kfree;
1115 else
1116 phase = (u8)rc;
1117
1118 /*
1119 * Finally set the selected phase in delay
1120 * line hw block.
1121 */
1122 rc = msm_config_cm_dll_phase(host, phase);
1123 if (rc)
1124 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001125 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001126 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1127 mmc_hostname(mmc), __func__, phase);
1128 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301129 if (--tuning_seq_cnt)
1130 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001131 /* tuning failed */
1132 pr_err("%s: %s: no tuning point found\n",
1133 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301134 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001135 }
1136
1137kfree:
1138 kfree(data_buf);
1139out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001140 spin_lock_irqsave(&host->lock, flags);
1141 if (!rc)
1142 msm_host->tuning_done = true;
1143 spin_unlock_irqrestore(&host->lock, flags);
1144 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001145 return rc;
1146}
1147
Asutosh Das0ef24812012-12-18 16:14:02 +05301148static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1149{
1150 struct sdhci_msm_gpio_data *curr;
1151 int i, ret = 0;
1152
1153 curr = pdata->pin_data->gpio_data;
1154 for (i = 0; i < curr->size; i++) {
1155 if (!gpio_is_valid(curr->gpio[i].no)) {
1156 ret = -EINVAL;
1157 pr_err("%s: Invalid gpio = %d\n", __func__,
1158 curr->gpio[i].no);
1159 goto free_gpios;
1160 }
1161 if (enable) {
1162 ret = gpio_request(curr->gpio[i].no,
1163 curr->gpio[i].name);
1164 if (ret) {
1165 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1166 __func__, curr->gpio[i].no,
1167 curr->gpio[i].name, ret);
1168 goto free_gpios;
1169 }
1170 curr->gpio[i].is_enabled = true;
1171 } else {
1172 gpio_free(curr->gpio[i].no);
1173 curr->gpio[i].is_enabled = false;
1174 }
1175 }
1176 return ret;
1177
1178free_gpios:
1179 for (i--; i >= 0; i--) {
1180 gpio_free(curr->gpio[i].no);
1181 curr->gpio[i].is_enabled = false;
1182 }
1183 return ret;
1184}
1185
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301186static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1187 bool enable)
1188{
1189 int ret = 0;
1190
1191 if (enable)
1192 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1193 pdata->pctrl_data->pins_active);
1194 else
1195 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1196 pdata->pctrl_data->pins_sleep);
1197
1198 if (ret < 0)
1199 pr_err("%s state for pinctrl failed with %d\n",
1200 enable ? "Enabling" : "Disabling", ret);
1201
1202 return ret;
1203}
1204
Asutosh Das0ef24812012-12-18 16:14:02 +05301205static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1206{
1207 int ret = 0;
1208
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301209 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301210 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301211 } else if (pdata->pctrl_data) {
1212 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1213 goto out;
1214 } else if (!pdata->pin_data) {
1215 return 0;
1216 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301217
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301218 if (pdata->pin_data->is_gpio)
1219 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301220out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301221 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301222 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301223
1224 return ret;
1225}
1226
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301227static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1228 u32 **out, int *len, u32 size)
1229{
1230 int ret = 0;
1231 struct device_node *np = dev->of_node;
1232 size_t sz;
1233 u32 *arr = NULL;
1234
1235 if (!of_get_property(np, prop_name, len)) {
1236 ret = -EINVAL;
1237 goto out;
1238 }
1239 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001240 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301241 dev_err(dev, "%s invalid size\n", prop_name);
1242 ret = -EINVAL;
1243 goto out;
1244 }
1245
1246 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1247 if (!arr) {
1248 dev_err(dev, "%s failed allocating memory\n", prop_name);
1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252
1253 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1254 if (ret < 0) {
1255 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1256 goto out;
1257 }
1258 *out = arr;
1259out:
1260 if (ret)
1261 *len = 0;
1262 return ret;
1263}
1264
Asutosh Das0ef24812012-12-18 16:14:02 +05301265#define MAX_PROP_SIZE 32
1266static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1267 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1268{
1269 int len, ret = 0;
1270 const __be32 *prop;
1271 char prop_name[MAX_PROP_SIZE];
1272 struct sdhci_msm_reg_data *vreg;
1273 struct device_node *np = dev->of_node;
1274
1275 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1276 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301277 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301278 return ret;
1279 }
1280
1281 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1282 if (!vreg) {
1283 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1284 ret = -ENOMEM;
1285 return ret;
1286 }
1287
1288 vreg->name = vreg_name;
1289
1290 snprintf(prop_name, MAX_PROP_SIZE,
1291 "qcom,%s-always-on", vreg_name);
1292 if (of_get_property(np, prop_name, NULL))
1293 vreg->is_always_on = true;
1294
1295 snprintf(prop_name, MAX_PROP_SIZE,
1296 "qcom,%s-lpm-sup", vreg_name);
1297 if (of_get_property(np, prop_name, NULL))
1298 vreg->lpm_sup = true;
1299
1300 snprintf(prop_name, MAX_PROP_SIZE,
1301 "qcom,%s-voltage-level", vreg_name);
1302 prop = of_get_property(np, prop_name, &len);
1303 if (!prop || (len != (2 * sizeof(__be32)))) {
1304 dev_warn(dev, "%s %s property\n",
1305 prop ? "invalid format" : "no", prop_name);
1306 } else {
1307 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1308 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1309 }
1310
1311 snprintf(prop_name, MAX_PROP_SIZE,
1312 "qcom,%s-current-level", vreg_name);
1313 prop = of_get_property(np, prop_name, &len);
1314 if (!prop || (len != (2 * sizeof(__be32)))) {
1315 dev_warn(dev, "%s %s property\n",
1316 prop ? "invalid format" : "no", prop_name);
1317 } else {
1318 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1319 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1320 }
1321
1322 *vreg_data = vreg;
1323 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1324 vreg->name, vreg->is_always_on ? "always_on," : "",
1325 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1326 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1327
1328 return ret;
1329}
1330
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301331static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1332 struct sdhci_msm_pltfm_data *pdata)
1333{
1334 struct sdhci_pinctrl_data *pctrl_data;
1335 struct pinctrl *pctrl;
1336 int ret = 0;
1337
1338 /* Try to obtain pinctrl handle */
1339 pctrl = devm_pinctrl_get(dev);
1340 if (IS_ERR(pctrl)) {
1341 ret = PTR_ERR(pctrl);
1342 goto out;
1343 }
1344 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1345 if (!pctrl_data) {
1346 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1347 ret = -ENOMEM;
1348 goto out;
1349 }
1350 pctrl_data->pctrl = pctrl;
1351 /* Look-up and keep the states handy to be used later */
1352 pctrl_data->pins_active = pinctrl_lookup_state(
1353 pctrl_data->pctrl, "active");
1354 if (IS_ERR(pctrl_data->pins_active)) {
1355 ret = PTR_ERR(pctrl_data->pins_active);
1356 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1357 goto out;
1358 }
1359 pctrl_data->pins_sleep = pinctrl_lookup_state(
1360 pctrl_data->pctrl, "sleep");
1361 if (IS_ERR(pctrl_data->pins_sleep)) {
1362 ret = PTR_ERR(pctrl_data->pins_sleep);
1363 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1364 goto out;
1365 }
1366 pdata->pctrl_data = pctrl_data;
1367out:
1368 return ret;
1369}
1370
Asutosh Das0ef24812012-12-18 16:14:02 +05301371#define GPIO_NAME_MAX_LEN 32
1372static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1373 struct sdhci_msm_pltfm_data *pdata)
1374{
1375 int ret = 0, cnt, i;
1376 struct sdhci_msm_pin_data *pin_data;
1377 struct device_node *np = dev->of_node;
1378
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301379 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1380 if (!ret) {
1381 goto out;
1382 } else if (ret == -EPROBE_DEFER) {
1383 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1384 goto out;
1385 } else {
1386 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1387 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301388 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301389 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301390 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1391 if (!pin_data) {
1392 dev_err(dev, "No memory for pin_data\n");
1393 ret = -ENOMEM;
1394 goto out;
1395 }
1396
1397 cnt = of_gpio_count(np);
1398 if (cnt > 0) {
1399 pin_data->gpio_data = devm_kzalloc(dev,
1400 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1401 if (!pin_data->gpio_data) {
1402 dev_err(dev, "No memory for gpio_data\n");
1403 ret = -ENOMEM;
1404 goto out;
1405 }
1406 pin_data->gpio_data->size = cnt;
1407 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1408 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1409
1410 if (!pin_data->gpio_data->gpio) {
1411 dev_err(dev, "No memory for gpio\n");
1412 ret = -ENOMEM;
1413 goto out;
1414 }
1415
1416 for (i = 0; i < cnt; i++) {
1417 const char *name = NULL;
1418 char result[GPIO_NAME_MAX_LEN];
1419 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1420 of_property_read_string_index(np,
1421 "qcom,gpio-names", i, &name);
1422
1423 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1424 dev_name(dev), name ? name : "?");
1425 pin_data->gpio_data->gpio[i].name = result;
1426 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1427 pin_data->gpio_data->gpio[i].name,
1428 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301429 }
1430 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301431 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301432out:
1433 if (ret)
1434 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1435 return ret;
1436}
1437
Gilad Bronerc788a672015-09-08 15:39:11 +03001438static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1439 struct sdhci_msm_pltfm_data *pdata)
1440{
1441 struct device_node *np = dev->of_node;
1442 const char *str;
1443 u32 cpu;
1444 int ret = 0;
1445 int i;
1446
1447 pdata->pm_qos_data.irq_valid = false;
1448 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1449 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1450 !strcmp(str, "affine_irq")) {
1451 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1452 }
1453
1454 /* must specify cpu for "affine_cores" type */
1455 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1456 pdata->pm_qos_data.irq_cpu = -1;
1457 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1458 if (ret) {
1459 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1460 ret);
1461 goto out;
1462 }
1463 if (cpu < 0 || cpu >= num_possible_cpus()) {
1464 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1465 __func__, cpu, num_possible_cpus());
1466 ret = -EINVAL;
1467 goto out;
1468 }
1469 pdata->pm_qos_data.irq_cpu = cpu;
1470 }
1471
1472 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1473 SDHCI_POWER_POLICY_NUM) {
1474 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1475 __func__, SDHCI_POWER_POLICY_NUM);
1476 ret = -EINVAL;
1477 goto out;
1478 }
1479
1480 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1481 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1482 &pdata->pm_qos_data.irq_latency.latency[i]);
1483
1484 pdata->pm_qos_data.irq_valid = true;
1485out:
1486 return ret;
1487}
1488
1489static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1490 struct sdhci_msm_pltfm_data *pdata)
1491{
1492 struct device_node *np = dev->of_node;
1493 u32 mask;
1494 int nr_groups;
1495 int ret;
1496 int i;
1497
1498 /* Read cpu group mapping */
1499 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1500 if (nr_groups <= 0) {
1501 ret = -EINVAL;
1502 goto out;
1503 }
1504 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1505 pdata->pm_qos_data.cpu_group_map.mask =
1506 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1507 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1508 ret = -ENOMEM;
1509 goto out;
1510 }
1511
1512 for (i = 0; i < nr_groups; i++) {
1513 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1514 i, &mask);
1515
1516 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1517 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1518 cpu_possible_mask)) {
1519 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1520 __func__, mask, i);
1521 ret = -EINVAL;
1522 goto free_res;
1523 }
1524 }
1525 return 0;
1526
1527free_res:
1528 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1529out:
1530 return ret;
1531}
1532
1533static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1534 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1535{
1536 struct device_node *np = dev->of_node;
1537 struct sdhci_msm_pm_qos_latency *values;
1538 int ret;
1539 int i;
1540 int group;
1541 int cfg;
1542
1543 ret = of_property_count_u32_elems(np, name);
1544 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1545 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1546 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1547 ret);
1548 return -EINVAL;
1549 } else if (ret < 0) {
1550 return ret;
1551 }
1552
1553 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1554 GFP_KERNEL);
1555 if (!values)
1556 return -ENOMEM;
1557
1558 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1559 group = i / SDHCI_POWER_POLICY_NUM;
1560 cfg = i % SDHCI_POWER_POLICY_NUM;
1561 of_property_read_u32_index(np, name, i,
1562 &(values[group].latency[cfg]));
1563 }
1564
1565 *latency = values;
1566 return 0;
1567}
1568
1569static void sdhci_msm_pm_qos_parse(struct device *dev,
1570 struct sdhci_msm_pltfm_data *pdata)
1571{
1572 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1573 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1574 __func__);
1575
1576 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1577 pdata->pm_qos_data.cmdq_valid =
1578 !sdhci_msm_pm_qos_parse_latency(dev,
1579 "qcom,pm-qos-cmdq-latency-us",
1580 pdata->pm_qos_data.cpu_group_map.nr_groups,
1581 &pdata->pm_qos_data.cmdq_latency);
1582 pdata->pm_qos_data.legacy_valid =
1583 !sdhci_msm_pm_qos_parse_latency(dev,
1584 "qcom,pm-qos-legacy-latency-us",
1585 pdata->pm_qos_data.cpu_group_map.nr_groups,
1586 &pdata->pm_qos_data.latency);
1587 if (!pdata->pm_qos_data.cmdq_valid &&
1588 !pdata->pm_qos_data.legacy_valid) {
1589 /* clean-up previously allocated arrays */
1590 kfree(pdata->pm_qos_data.latency);
1591 kfree(pdata->pm_qos_data.cmdq_latency);
1592 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1593 __func__);
1594 }
1595 } else {
1596 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1597 __func__);
1598 }
1599}
1600
Asutosh Das0ef24812012-12-18 16:14:02 +05301601/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001602static
1603struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1604 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301605{
1606 struct sdhci_msm_pltfm_data *pdata = NULL;
1607 struct device_node *np = dev->of_node;
1608 u32 bus_width = 0;
1609 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301610 int clk_table_len;
1611 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301612 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301613
1614 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1615 if (!pdata) {
1616 dev_err(dev, "failed to allocate memory for platform data\n");
1617 goto out;
1618 }
1619
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301620 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1621 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1622 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301623
Asutosh Das0ef24812012-12-18 16:14:02 +05301624 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1625 if (bus_width == 8)
1626 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1627 else if (bus_width == 4)
1628 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1629 else {
1630 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1631 pdata->mmc_bus_width = 0;
1632 }
1633
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001634 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1635 &msm_host->mmc->clk_scaling.freq_table,
1636 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1637 pr_debug("%s: no clock scaling frequencies were supplied\n",
1638 dev_name(dev));
1639 else if (!msm_host->mmc->clk_scaling.freq_table ||
1640 !msm_host->mmc->clk_scaling.freq_table_sz)
1641 dev_err(dev, "bad dts clock scaling frequencies\n");
1642
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301643 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1644 &clk_table, &clk_table_len, 0)) {
1645 dev_err(dev, "failed parsing supported clock rates\n");
1646 goto out;
1647 }
1648 if (!clk_table || !clk_table_len) {
1649 dev_err(dev, "Invalid clock table\n");
1650 goto out;
1651 }
1652 pdata->sup_clk_table = clk_table;
1653 pdata->sup_clk_cnt = clk_table_len;
1654
Asutosh Das0ef24812012-12-18 16:14:02 +05301655 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1656 sdhci_msm_slot_reg_data),
1657 GFP_KERNEL);
1658 if (!pdata->vreg_data) {
1659 dev_err(dev, "failed to allocate memory for vreg data\n");
1660 goto out;
1661 }
1662
1663 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1664 "vdd")) {
1665 dev_err(dev, "failed parsing vdd data\n");
1666 goto out;
1667 }
1668 if (sdhci_msm_dt_parse_vreg_info(dev,
1669 &pdata->vreg_data->vdd_io_data,
1670 "vdd-io")) {
1671 dev_err(dev, "failed parsing vdd-io data\n");
1672 goto out;
1673 }
1674
1675 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1676 dev_err(dev, "failed parsing gpio data\n");
1677 goto out;
1678 }
1679
Asutosh Das0ef24812012-12-18 16:14:02 +05301680 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1681
1682 for (i = 0; i < len; i++) {
1683 const char *name = NULL;
1684
1685 of_property_read_string_index(np,
1686 "qcom,bus-speed-mode", i, &name);
1687 if (!name)
1688 continue;
1689
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001690 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1691 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1692 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1693 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1694 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301695 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1696 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1697 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1698 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1699 pdata->caps |= MMC_CAP_1_8V_DDR
1700 | MMC_CAP_UHS_DDR50;
1701 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1702 pdata->caps |= MMC_CAP_1_2V_DDR
1703 | MMC_CAP_UHS_DDR50;
1704 }
1705
1706 if (of_get_property(np, "qcom,nonremovable", NULL))
1707 pdata->nonremovable = true;
1708
Guoping Yuf7c91332014-08-20 16:56:18 +08001709 if (of_get_property(np, "qcom,nonhotplug", NULL))
1710 pdata->nonhotplug = true;
1711
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001712 pdata->largeaddressbus =
1713 of_property_read_bool(np, "qcom,large-address-bus");
1714
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001715 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1716 msm_host->mmc->wakeup_on_idle = true;
1717
Gilad Bronerc788a672015-09-08 15:39:11 +03001718 sdhci_msm_pm_qos_parse(dev, pdata);
1719
Asutosh Das0ef24812012-12-18 16:14:02 +05301720 return pdata;
1721out:
1722 return NULL;
1723}
1724
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301725/* Returns required bandwidth in Bytes per Sec */
1726static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1727 struct mmc_ios *ios)
1728{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301729 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1730 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1731
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301732 unsigned int bw;
1733
Sahitya Tummala2886c922013-04-03 18:03:31 +05301734 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301735 /*
1736 * For DDR mode, SDCC controller clock will be at
1737 * the double rate than the actual clock that goes to card.
1738 */
1739 if (ios->bus_width == MMC_BUS_WIDTH_4)
1740 bw /= 2;
1741 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1742 bw /= 8;
1743
1744 return bw;
1745}
1746
1747static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1748 unsigned int bw)
1749{
1750 unsigned int *table = host->pdata->voting_data->bw_vecs;
1751 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1752 int i;
1753
1754 if (host->msm_bus_vote.is_max_bw_needed && bw)
1755 return host->msm_bus_vote.max_bw_vote;
1756
1757 for (i = 0; i < size; i++) {
1758 if (bw <= table[i])
1759 break;
1760 }
1761
1762 if (i && (i == size))
1763 i--;
1764
1765 return i;
1766}
1767
1768/*
1769 * This function must be called with host lock acquired.
1770 * Caller of this function should also ensure that msm bus client
1771 * handle is not null.
1772 */
1773static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1774 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301775 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301776{
1777 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1778 int rc = 0;
1779
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301780 BUG_ON(!flags);
1781
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301782 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301783 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301784 rc = msm_bus_scale_client_update_request(
1785 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301786 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301787 if (rc) {
1788 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1789 mmc_hostname(host->mmc),
1790 msm_host->msm_bus_vote.client_handle, vote, rc);
1791 goto out;
1792 }
1793 msm_host->msm_bus_vote.curr_vote = vote;
1794 }
1795out:
1796 return rc;
1797}
1798
1799/*
1800 * Internal work. Work to set 0 bandwidth for msm bus.
1801 */
1802static void sdhci_msm_bus_work(struct work_struct *work)
1803{
1804 struct sdhci_msm_host *msm_host;
1805 struct sdhci_host *host;
1806 unsigned long flags;
1807
1808 msm_host = container_of(work, struct sdhci_msm_host,
1809 msm_bus_vote.vote_work.work);
1810 host = platform_get_drvdata(msm_host->pdev);
1811
1812 if (!msm_host->msm_bus_vote.client_handle)
1813 return;
1814
1815 spin_lock_irqsave(&host->lock, flags);
1816 /* don't vote for 0 bandwidth if any request is in progress */
1817 if (!host->mrq) {
1818 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301819 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301820 } else
1821 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1822 mmc_hostname(host->mmc), __func__);
1823 spin_unlock_irqrestore(&host->lock, flags);
1824}
1825
1826/*
1827 * This function cancels any scheduled delayed work and sets the bus
1828 * vote based on bw (bandwidth) argument.
1829 */
1830static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1831 unsigned int bw)
1832{
1833 int vote;
1834 unsigned long flags;
1835 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1836 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1837
1838 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1839 spin_lock_irqsave(&host->lock, flags);
1840 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301841 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301842 spin_unlock_irqrestore(&host->lock, flags);
1843}
1844
1845#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1846
1847/* This function queues a work which will set the bandwidth requiement to 0 */
1848static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1849{
1850 unsigned long flags;
1851 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1852 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1853
1854 spin_lock_irqsave(&host->lock, flags);
1855 if (msm_host->msm_bus_vote.min_bw_vote !=
1856 msm_host->msm_bus_vote.curr_vote)
1857 queue_delayed_work(system_wq,
1858 &msm_host->msm_bus_vote.vote_work,
1859 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1860 spin_unlock_irqrestore(&host->lock, flags);
1861}
1862
1863static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1864 struct platform_device *pdev)
1865{
1866 int rc = 0;
1867 struct msm_bus_scale_pdata *bus_pdata;
1868
1869 struct sdhci_msm_bus_voting_data *data;
1870 struct device *dev = &pdev->dev;
1871
1872 data = devm_kzalloc(dev,
1873 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1874 if (!data) {
1875 dev_err(&pdev->dev,
1876 "%s: failed to allocate memory\n", __func__);
1877 rc = -ENOMEM;
1878 goto out;
1879 }
1880 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1881 if (data->bus_pdata) {
1882 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1883 &data->bw_vecs, &data->bw_vecs_size, 0);
1884 if (rc) {
1885 dev_err(&pdev->dev,
1886 "%s: Failed to get bus-bw-vectors-bps\n",
1887 __func__);
1888 goto out;
1889 }
1890 host->pdata->voting_data = data;
1891 }
1892 if (host->pdata->voting_data &&
1893 host->pdata->voting_data->bus_pdata &&
1894 host->pdata->voting_data->bw_vecs &&
1895 host->pdata->voting_data->bw_vecs_size) {
1896
1897 bus_pdata = host->pdata->voting_data->bus_pdata;
1898 host->msm_bus_vote.client_handle =
1899 msm_bus_scale_register_client(bus_pdata);
1900 if (!host->msm_bus_vote.client_handle) {
1901 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1902 rc = -EFAULT;
1903 goto out;
1904 }
1905 /* cache the vote index for minimum and maximum bandwidth */
1906 host->msm_bus_vote.min_bw_vote =
1907 sdhci_msm_bus_get_vote_for_bw(host, 0);
1908 host->msm_bus_vote.max_bw_vote =
1909 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1910 } else {
1911 devm_kfree(dev, data);
1912 }
1913
1914out:
1915 return rc;
1916}
1917
1918static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1919{
1920 if (host->msm_bus_vote.client_handle)
1921 msm_bus_scale_unregister_client(
1922 host->msm_bus_vote.client_handle);
1923}
1924
1925static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1926{
1927 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1928 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1929 struct mmc_ios *ios = &host->mmc->ios;
1930 unsigned int bw;
1931
1932 if (!msm_host->msm_bus_vote.client_handle)
1933 return;
1934
1935 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301936 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301937 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301938 } else {
1939 /*
1940 * If clock gating is enabled, then remove the vote
1941 * immediately because clocks will be disabled only
1942 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1943 * additional delay is required to remove the bus vote.
1944 */
1945#ifdef CONFIG_MMC_CLKGATE
1946 if (host->mmc->clkgate_delay)
1947 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1948 else
1949#endif
1950 sdhci_msm_bus_queue_work(host);
1951 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301952}
1953
Asutosh Das0ef24812012-12-18 16:14:02 +05301954/* Regulator utility functions */
1955static int sdhci_msm_vreg_init_reg(struct device *dev,
1956 struct sdhci_msm_reg_data *vreg)
1957{
1958 int ret = 0;
1959
1960 /* check if regulator is already initialized? */
1961 if (vreg->reg)
1962 goto out;
1963
1964 /* Get the regulator handle */
1965 vreg->reg = devm_regulator_get(dev, vreg->name);
1966 if (IS_ERR(vreg->reg)) {
1967 ret = PTR_ERR(vreg->reg);
1968 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1969 __func__, vreg->name, ret);
1970 goto out;
1971 }
1972
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301973 if (regulator_count_voltages(vreg->reg) > 0) {
1974 vreg->set_voltage_sup = true;
1975 /* sanity check */
1976 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1977 pr_err("%s: %s invalid constraints specified\n",
1978 __func__, vreg->name);
1979 ret = -EINVAL;
1980 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301981 }
1982
1983out:
1984 return ret;
1985}
1986
1987static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
1988{
1989 if (vreg->reg)
1990 devm_regulator_put(vreg->reg);
1991}
1992
1993static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
1994 *vreg, int uA_load)
1995{
1996 int ret = 0;
1997
1998 /*
1999 * regulators that do not support regulator_set_voltage also
2000 * do not support regulator_set_optimum_mode
2001 */
2002 if (vreg->set_voltage_sup) {
2003 ret = regulator_set_load(vreg->reg, uA_load);
2004 if (ret < 0)
2005 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2006 __func__, vreg->name, uA_load, ret);
2007 else
2008 /*
2009 * regulator_set_load() can return non zero
2010 * value even for success case.
2011 */
2012 ret = 0;
2013 }
2014 return ret;
2015}
2016
2017static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2018 int min_uV, int max_uV)
2019{
2020 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302021 if (vreg->set_voltage_sup) {
2022 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2023 if (ret) {
2024 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302025 __func__, vreg->name, min_uV, max_uV, ret);
2026 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302027 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302028
2029 return ret;
2030}
2031
2032static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2033{
2034 int ret = 0;
2035
2036 /* Put regulator in HPM (high power mode) */
2037 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2038 if (ret < 0)
2039 return ret;
2040
2041 if (!vreg->is_enabled) {
2042 /* Set voltage level */
2043 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2044 vreg->high_vol_level);
2045 if (ret)
2046 return ret;
2047 }
2048 ret = regulator_enable(vreg->reg);
2049 if (ret) {
2050 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2051 __func__, vreg->name, ret);
2052 return ret;
2053 }
2054 vreg->is_enabled = true;
2055 return ret;
2056}
2057
2058static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2059{
2060 int ret = 0;
2061
2062 /* Never disable regulator marked as always_on */
2063 if (vreg->is_enabled && !vreg->is_always_on) {
2064 ret = regulator_disable(vreg->reg);
2065 if (ret) {
2066 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2067 __func__, vreg->name, ret);
2068 goto out;
2069 }
2070 vreg->is_enabled = false;
2071
2072 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2073 if (ret < 0)
2074 goto out;
2075
2076 /* Set min. voltage level to 0 */
2077 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2078 if (ret)
2079 goto out;
2080 } else if (vreg->is_enabled && vreg->is_always_on) {
2081 if (vreg->lpm_sup) {
2082 /* Put always_on regulator in LPM (low power mode) */
2083 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2084 vreg->lpm_uA);
2085 if (ret < 0)
2086 goto out;
2087 }
2088 }
2089out:
2090 return ret;
2091}
2092
2093static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2094 bool enable, bool is_init)
2095{
2096 int ret = 0, i;
2097 struct sdhci_msm_slot_reg_data *curr_slot;
2098 struct sdhci_msm_reg_data *vreg_table[2];
2099
2100 curr_slot = pdata->vreg_data;
2101 if (!curr_slot) {
2102 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2103 __func__);
2104 goto out;
2105 }
2106
2107 vreg_table[0] = curr_slot->vdd_data;
2108 vreg_table[1] = curr_slot->vdd_io_data;
2109
2110 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2111 if (vreg_table[i]) {
2112 if (enable)
2113 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2114 else
2115 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2116 if (ret)
2117 goto out;
2118 }
2119 }
2120out:
2121 return ret;
2122}
2123
2124/*
2125 * Reset vreg by ensuring it is off during probe. A call
2126 * to enable vreg is needed to balance disable vreg
2127 */
2128static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2129{
2130 int ret;
2131
2132 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2133 if (ret)
2134 return ret;
2135 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2136 return ret;
2137}
2138
2139/* This init function should be called only once for each SDHC slot */
2140static int sdhci_msm_vreg_init(struct device *dev,
2141 struct sdhci_msm_pltfm_data *pdata,
2142 bool is_init)
2143{
2144 int ret = 0;
2145 struct sdhci_msm_slot_reg_data *curr_slot;
2146 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2147
2148 curr_slot = pdata->vreg_data;
2149 if (!curr_slot)
2150 goto out;
2151
2152 curr_vdd_reg = curr_slot->vdd_data;
2153 curr_vdd_io_reg = curr_slot->vdd_io_data;
2154
2155 if (!is_init)
2156 /* Deregister all regulators from regulator framework */
2157 goto vdd_io_reg_deinit;
2158
2159 /*
2160 * Get the regulator handle from voltage regulator framework
2161 * and then try to set the voltage level for the regulator
2162 */
2163 if (curr_vdd_reg) {
2164 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2165 if (ret)
2166 goto out;
2167 }
2168 if (curr_vdd_io_reg) {
2169 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2170 if (ret)
2171 goto vdd_reg_deinit;
2172 }
2173 ret = sdhci_msm_vreg_reset(pdata);
2174 if (ret)
2175 dev_err(dev, "vreg reset failed (%d)\n", ret);
2176 goto out;
2177
2178vdd_io_reg_deinit:
2179 if (curr_vdd_io_reg)
2180 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2181vdd_reg_deinit:
2182 if (curr_vdd_reg)
2183 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2184out:
2185 return ret;
2186}
2187
2188
2189static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2190 enum vdd_io_level level,
2191 unsigned int voltage_level)
2192{
2193 int ret = 0;
2194 int set_level;
2195 struct sdhci_msm_reg_data *vdd_io_reg;
2196
2197 if (!pdata->vreg_data)
2198 return ret;
2199
2200 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2201 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2202 switch (level) {
2203 case VDD_IO_LOW:
2204 set_level = vdd_io_reg->low_vol_level;
2205 break;
2206 case VDD_IO_HIGH:
2207 set_level = vdd_io_reg->high_vol_level;
2208 break;
2209 case VDD_IO_SET_LEVEL:
2210 set_level = voltage_level;
2211 break;
2212 default:
2213 pr_err("%s: invalid argument level = %d",
2214 __func__, level);
2215 ret = -EINVAL;
2216 return ret;
2217 }
2218 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2219 set_level);
2220 }
2221 return ret;
2222}
2223
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302224void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2225{
2226 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2227 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2228
2229 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2230 mmc_hostname(host->mmc),
2231 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2232 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2233 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2234}
2235
Asutosh Das0ef24812012-12-18 16:14:02 +05302236static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2237{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002238 struct sdhci_host *host = (struct sdhci_host *)data;
2239 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2240 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302241 u8 irq_status = 0;
2242 u8 irq_ack = 0;
2243 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302244 int pwr_state = 0, io_level = 0;
2245 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302246 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302247
2248 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2249 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2250 mmc_hostname(msm_host->mmc), irq, irq_status);
2251
2252 /* Clear the interrupt */
2253 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2254 /*
2255 * SDHC has core_mem and hc_mem device memory and these memory
2256 * addresses do not fall within 1KB region. Hence, any update to
2257 * core_mem address space would require an mb() to ensure this gets
2258 * completed before its next update to registers within hc_mem.
2259 */
2260 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302261 /*
2262 * There is a rare HW scenario where the first clear pulse could be
2263 * lost when actual reset and clear/read of status register is
2264 * happening at a time. Hence, retry for at least 10 times to make
2265 * sure status register is cleared. Otherwise, this will result in
2266 * a spurious power IRQ resulting in system instability.
2267 */
2268 while (irq_status &
2269 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2270 if (retry == 0) {
2271 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2272 mmc_hostname(host->mmc), irq_status);
2273 sdhci_msm_dump_pwr_ctrl_regs(host);
2274 BUG_ON(1);
2275 }
2276 writeb_relaxed(irq_status,
2277 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2278 retry--;
2279 udelay(10);
2280 }
2281 if (likely(retry < 10))
2282 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2283 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302284
2285 /* Handle BUS ON/OFF*/
2286 if (irq_status & CORE_PWRCTL_BUS_ON) {
2287 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302288 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302289 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302290 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2291 VDD_IO_HIGH, 0);
2292 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302293 if (ret)
2294 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2295 else
2296 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302297
2298 pwr_state = REQ_BUS_ON;
2299 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302300 }
2301 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2302 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302303 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302304 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302305 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2306 VDD_IO_LOW, 0);
2307 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302308 if (ret)
2309 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2310 else
2311 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302312
2313 pwr_state = REQ_BUS_OFF;
2314 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302315 }
2316 /* Handle IO LOW/HIGH */
2317 if (irq_status & CORE_PWRCTL_IO_LOW) {
2318 /* Switch voltage Low */
2319 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2320 if (ret)
2321 irq_ack |= CORE_PWRCTL_IO_FAIL;
2322 else
2323 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302324
2325 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302326 }
2327 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2328 /* Switch voltage High */
2329 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2330 if (ret)
2331 irq_ack |= CORE_PWRCTL_IO_FAIL;
2332 else
2333 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302334
2335 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302336 }
2337
2338 /* ACK status to the core */
2339 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2340 /*
2341 * SDHC has core_mem and hc_mem device memory and these memory
2342 * addresses do not fall within 1KB region. Hence, any update to
2343 * core_mem address space would require an mb() to ensure this gets
2344 * completed before its next update to registers within hc_mem.
2345 */
2346 mb();
2347
Krishna Konda46fd1432014-10-30 21:13:27 -07002348 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002349 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2350 ~CORE_IO_PAD_PWR_SWITCH),
2351 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002352 else if ((io_level & REQ_IO_LOW) ||
2353 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002354 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2355 CORE_IO_PAD_PWR_SWITCH),
2356 host->ioaddr + CORE_VENDOR_SPEC);
2357 mb();
2358
Asutosh Das0ef24812012-12-18 16:14:02 +05302359 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2360 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302361 spin_lock_irqsave(&host->lock, flags);
2362 if (pwr_state)
2363 msm_host->curr_pwr_state = pwr_state;
2364 if (io_level)
2365 msm_host->curr_io_level = io_level;
2366 complete(&msm_host->pwr_irq_completion);
2367 spin_unlock_irqrestore(&host->lock, flags);
2368
Asutosh Das0ef24812012-12-18 16:14:02 +05302369 return IRQ_HANDLED;
2370}
2371
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302372static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302373show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2374{
2375 struct sdhci_host *host = dev_get_drvdata(dev);
2376 int poll;
2377 unsigned long flags;
2378
2379 spin_lock_irqsave(&host->lock, flags);
2380 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2381 spin_unlock_irqrestore(&host->lock, flags);
2382
2383 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2384}
2385
2386static ssize_t
2387store_polling(struct device *dev, struct device_attribute *attr,
2388 const char *buf, size_t count)
2389{
2390 struct sdhci_host *host = dev_get_drvdata(dev);
2391 int value;
2392 unsigned long flags;
2393
2394 if (!kstrtou32(buf, 0, &value)) {
2395 spin_lock_irqsave(&host->lock, flags);
2396 if (value) {
2397 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2398 mmc_detect_change(host->mmc, 0);
2399 } else {
2400 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2401 }
2402 spin_unlock_irqrestore(&host->lock, flags);
2403 }
2404 return count;
2405}
2406
2407static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302408show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2409 char *buf)
2410{
2411 struct sdhci_host *host = dev_get_drvdata(dev);
2412 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2413 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2414
2415 return snprintf(buf, PAGE_SIZE, "%u\n",
2416 msm_host->msm_bus_vote.is_max_bw_needed);
2417}
2418
2419static ssize_t
2420store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2421 const char *buf, size_t count)
2422{
2423 struct sdhci_host *host = dev_get_drvdata(dev);
2424 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2425 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2426 uint32_t value;
2427 unsigned long flags;
2428
2429 if (!kstrtou32(buf, 0, &value)) {
2430 spin_lock_irqsave(&host->lock, flags);
2431 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2432 spin_unlock_irqrestore(&host->lock, flags);
2433 }
2434 return count;
2435}
2436
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302437static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302438{
2439 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2440 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302441 unsigned long flags;
2442 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302443 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302444
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302445 spin_lock_irqsave(&host->lock, flags);
2446 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2447 mmc_hostname(host->mmc), __func__, req_type,
2448 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302449 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2450 /*
2451 * The IRQ for request type IO High/Low will be generated when -
2452 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2453 * 2. If 1 is true and when there is a state change in 1.8V enable
2454 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2455 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2456 * layer tries to set it to 3.3V before card detection happens, the
2457 * IRQ doesn't get triggered as there is no state change in this bit.
2458 * The driver already handles this case by changing the IO voltage
2459 * level to high as part of controller power up sequence. Hence, check
2460 * for host->pwr to handle a case where IO voltage high request is
2461 * issued even before controller power up.
2462 */
2463 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2464 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2465 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2466 pr_debug("%s: do not wait for power IRQ that never comes\n",
2467 mmc_hostname(host->mmc));
2468 spin_unlock_irqrestore(&host->lock, flags);
2469 return;
2470 }
2471 }
2472
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302473 if ((req_type & msm_host->curr_pwr_state) ||
2474 (req_type & msm_host->curr_io_level))
2475 done = true;
2476 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302477
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302478 /*
2479 * This is needed here to hanlde a case where IRQ gets
2480 * triggered even before this function is called so that
2481 * x->done counter of completion gets reset. Otherwise,
2482 * next call to wait_for_completion returns immediately
2483 * without actually waiting for the IRQ to be handled.
2484 */
2485 if (done)
2486 init_completion(&msm_host->pwr_irq_completion);
2487 else
2488 wait_for_completion(&msm_host->pwr_irq_completion);
2489
2490 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2491 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302492}
2493
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002494static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2495{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302496 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2497
2498 if (enable) {
2499 config |= CORE_CDR_EN;
2500 config &= ~CORE_CDR_EXT_EN;
2501 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2502 } else {
2503 config &= ~CORE_CDR_EN;
2504 config |= CORE_CDR_EXT_EN;
2505 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2506 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002507}
2508
Asutosh Das648f9d12013-01-10 21:11:04 +05302509static unsigned int sdhci_msm_max_segs(void)
2510{
2511 return SDHCI_MSM_MAX_SEGMENTS;
2512}
2513
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302514static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302515{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302516 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2517 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302518
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302519 return msm_host->pdata->sup_clk_table[0];
2520}
2521
2522static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2523{
2524 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2525 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2526 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2527
2528 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2529}
2530
2531static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2532 u32 req_clk)
2533{
2534 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2535 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2536 unsigned int sel_clk = -1;
2537 unsigned char cnt;
2538
2539 if (req_clk < sdhci_msm_get_min_clock(host)) {
2540 sel_clk = sdhci_msm_get_min_clock(host);
2541 return sel_clk;
2542 }
2543
2544 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2545 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2546 break;
2547 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2548 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2549 break;
2550 } else {
2551 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2552 }
2553 }
2554 return sel_clk;
2555}
2556
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302557static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2558{
2559 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2560 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2561 int rc = 0;
2562
2563 if (atomic_read(&msm_host->controller_clock))
2564 return 0;
2565
2566 sdhci_msm_bus_voting(host, 1);
2567
2568 if (!IS_ERR(msm_host->pclk)) {
2569 rc = clk_prepare_enable(msm_host->pclk);
2570 if (rc) {
2571 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2572 mmc_hostname(host->mmc), __func__, rc);
2573 goto remove_vote;
2574 }
2575 }
2576
2577 rc = clk_prepare_enable(msm_host->clk);
2578 if (rc) {
2579 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2580 mmc_hostname(host->mmc), __func__, rc);
2581 goto disable_pclk;
2582 }
2583
2584 atomic_set(&msm_host->controller_clock, 1);
2585 pr_debug("%s: %s: enabled controller clock\n",
2586 mmc_hostname(host->mmc), __func__);
2587 goto out;
2588
2589disable_pclk:
2590 if (!IS_ERR(msm_host->pclk))
2591 clk_disable_unprepare(msm_host->pclk);
2592remove_vote:
2593 if (msm_host->msm_bus_vote.client_handle)
2594 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2595out:
2596 return rc;
2597}
2598
2599
2600
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302601static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2602{
2603 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2604 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2605 int rc = 0;
2606
2607 if (enable && !atomic_read(&msm_host->clks_on)) {
2608 pr_debug("%s: request to enable clocks\n",
2609 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302610
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302611 /*
2612 * The bus-width or the clock rate might have changed
2613 * after controller clocks are enbaled, update bus vote
2614 * in such case.
2615 */
2616 if (atomic_read(&msm_host->controller_clock))
2617 sdhci_msm_bus_voting(host, 1);
2618
2619 rc = sdhci_msm_enable_controller_clock(host);
2620 if (rc)
2621 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302622
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302623 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2624 rc = clk_prepare_enable(msm_host->bus_clk);
2625 if (rc) {
2626 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2627 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302628 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302629 }
2630 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002631 if (!IS_ERR(msm_host->ff_clk)) {
2632 rc = clk_prepare_enable(msm_host->ff_clk);
2633 if (rc) {
2634 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2635 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302636 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002637 }
2638 }
2639 if (!IS_ERR(msm_host->sleep_clk)) {
2640 rc = clk_prepare_enable(msm_host->sleep_clk);
2641 if (rc) {
2642 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2643 mmc_hostname(host->mmc), __func__, rc);
2644 goto disable_ff_clk;
2645 }
2646 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302647 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302648
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302649 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302650 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2651 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302652 /*
2653 * During 1.8V signal switching the clock source must
2654 * still be ON as it requires accessing SDHC
2655 * registers (SDHCi host control2 register bit 3 must
2656 * be written and polled after stopping the SDCLK).
2657 */
2658 if (host->mmc->card_clock_off)
2659 return 0;
2660 pr_debug("%s: request to disable clocks\n",
2661 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002662 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2663 clk_disable_unprepare(msm_host->sleep_clk);
2664 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2665 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302666 clk_disable_unprepare(msm_host->clk);
2667 if (!IS_ERR(msm_host->pclk))
2668 clk_disable_unprepare(msm_host->pclk);
2669 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2670 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302671
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302672 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302673 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302674 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302675 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302676 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002677disable_ff_clk:
2678 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2679 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302680disable_bus_clk:
2681 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2682 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302683disable_controller_clk:
2684 if (!IS_ERR_OR_NULL(msm_host->clk))
2685 clk_disable_unprepare(msm_host->clk);
2686 if (!IS_ERR_OR_NULL(msm_host->pclk))
2687 clk_disable_unprepare(msm_host->pclk);
2688 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302689remove_vote:
2690 if (msm_host->msm_bus_vote.client_handle)
2691 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302692out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302693 return rc;
2694}
2695
2696static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2697{
2698 int rc;
2699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2700 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2701 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002702 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302703 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302704
2705 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302706 /*
2707 * disable pwrsave to ensure clock is not auto-gated until
2708 * the rate is >400KHz (initialization complete).
2709 */
2710 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2711 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302712 sdhci_msm_prepare_clocks(host, false);
2713 host->clock = clock;
2714 goto out;
2715 }
2716
2717 rc = sdhci_msm_prepare_clocks(host, true);
2718 if (rc)
2719 goto out;
2720
Sahitya Tummala043744a2013-06-24 09:55:33 +05302721 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2722 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302723 if ((clock > 400000) &&
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002724 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302725 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2726 | CORE_CLK_PWRSAVE,
2727 host->ioaddr + CORE_VENDOR_SPEC);
2728 /*
2729 * Disable pwrsave for a newly added card if doesn't allow clock
2730 * gating.
2731 */
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002732 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302733 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2734 & ~CORE_CLK_PWRSAVE,
2735 host->ioaddr + CORE_VENDOR_SPEC);
2736
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302737 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002738 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002739 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002740 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302741 /*
2742 * The SDHC requires internal clock frequency to be double the
2743 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002744 * uses the faster clock(100/400MHz) for some of its parts and
2745 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302746 */
2747 ddr_clock = clock * 2;
2748 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2749 ddr_clock);
2750 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002751
2752 /*
2753 * In general all timing modes are controlled via UHS mode select in
2754 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2755 * their respective modes defined here, hence we use these values.
2756 *
2757 * HS200 - SDR104 (Since they both are equivalent in functionality)
2758 * HS400 - This involves multiple configurations
2759 * Initially SDR104 - when tuning is required as HS200
2760 * Then when switching to DDR @ 400MHz (HS400) we use
2761 * the vendor specific HC_SELECT_IN to control the mode.
2762 *
2763 * In addition to controlling the modes we also need to select the
2764 * correct input clock for DLL depending on the mode.
2765 *
2766 * HS400 - divided clock (free running MCLK/2)
2767 * All other modes - default (free running MCLK)
2768 */
2769 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2770 /* Select the divided clock (free running MCLK/2) */
2771 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2772 & ~CORE_HC_MCLK_SEL_MASK)
2773 | CORE_HC_MCLK_SEL_HS400),
2774 host->ioaddr + CORE_VENDOR_SPEC);
2775 /*
2776 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2777 * register
2778 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302779 if ((msm_host->tuning_done ||
2780 (mmc_card_strobe(msm_host->mmc->card) &&
2781 msm_host->enhanced_strobe)) &&
2782 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002783 /*
2784 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2785 * field in VENDOR_SPEC_FUNC
2786 */
2787 writel_relaxed((readl_relaxed(host->ioaddr + \
2788 CORE_VENDOR_SPEC)
2789 | CORE_HC_SELECT_IN_HS400
2790 | CORE_HC_SELECT_IN_EN),
2791 host->ioaddr + CORE_VENDOR_SPEC);
2792 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002793 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2794 /*
2795 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2796 * CORE_DLL_STATUS to be set. This should get set
2797 * with in 15 us at 200 MHz.
2798 */
2799 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2800 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2801 CORE_DDR_DLL_LOCK)), 10, 1000);
2802 if (rc == -ETIMEDOUT)
2803 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2804 mmc_hostname(host->mmc),
2805 dll_lock);
2806 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002807 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002808 if (!msm_host->use_cdclp533)
2809 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2810 writel_relaxed((readl_relaxed(host->ioaddr +
2811 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2812 host->ioaddr + CORE_VENDOR_SPEC3);
2813
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002814 /* Select the default clock (free running MCLK) */
2815 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2816 & ~CORE_HC_MCLK_SEL_MASK)
2817 | CORE_HC_MCLK_SEL_DFLT),
2818 host->ioaddr + CORE_VENDOR_SPEC);
2819
2820 /*
2821 * Disable HC_SELECT_IN to be able to use the UHS mode select
2822 * configuration from Host Control2 register for all other
2823 * modes.
2824 *
2825 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2826 * in VENDOR_SPEC_FUNC
2827 */
2828 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2829 & ~CORE_HC_SELECT_IN_EN
2830 & ~CORE_HC_SELECT_IN_MASK),
2831 host->ioaddr + CORE_VENDOR_SPEC);
2832 }
2833 mb();
2834
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302835 if (sup_clock != msm_host->clk_rate) {
2836 pr_debug("%s: %s: setting clk rate to %u\n",
2837 mmc_hostname(host->mmc), __func__, sup_clock);
2838 rc = clk_set_rate(msm_host->clk, sup_clock);
2839 if (rc) {
2840 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2841 mmc_hostname(host->mmc), __func__,
2842 sup_clock, rc);
2843 goto out;
2844 }
2845 msm_host->clk_rate = sup_clock;
2846 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302847 /*
2848 * Update the bus vote in case of frequency change due to
2849 * clock scaling.
2850 */
2851 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302852 }
2853out:
2854 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302855}
2856
Sahitya Tummala14613432013-03-21 11:13:25 +05302857static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2858 unsigned int uhs)
2859{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002860 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2861 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302862 u16 ctrl_2;
2863
2864 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2865 /* Select Bus Speed Mode for host */
2866 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002867 if ((uhs == MMC_TIMING_MMC_HS400) ||
2868 (uhs == MMC_TIMING_MMC_HS200) ||
2869 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302870 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2871 else if (uhs == MMC_TIMING_UHS_SDR12)
2872 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2873 else if (uhs == MMC_TIMING_UHS_SDR25)
2874 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2875 else if (uhs == MMC_TIMING_UHS_SDR50)
2876 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002877 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2878 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302879 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302880 /*
2881 * When clock frquency is less than 100MHz, the feedback clock must be
2882 * provided and DLL must not be used so that tuning can be skipped. To
2883 * provide feedback clock, the mode selection can be any value less
2884 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2885 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002886 if (host->clock <= CORE_FREQ_100MHZ) {
2887 if ((uhs == MMC_TIMING_MMC_HS400) ||
2888 (uhs == MMC_TIMING_MMC_HS200) ||
2889 (uhs == MMC_TIMING_UHS_SDR104))
2890 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302891
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002892 /*
2893 * Make sure DLL is disabled when not required
2894 *
2895 * Write 1 to DLL_RST bit of DLL_CONFIG register
2896 */
2897 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2898 | CORE_DLL_RST),
2899 host->ioaddr + CORE_DLL_CONFIG);
2900
2901 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2902 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2903 | CORE_DLL_PDN),
2904 host->ioaddr + CORE_DLL_CONFIG);
2905 mb();
2906
2907 /*
2908 * The DLL needs to be restored and CDCLP533 recalibrated
2909 * when the clock frequency is set back to 400MHz.
2910 */
2911 msm_host->calibration_done = false;
2912 }
2913
2914 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2915 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302916 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2917
2918}
2919
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002920#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002921#define DRV_NAME "cmdq-host"
2922static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_msm_host *msm_host)
2923{
2924 int i = 0;
2925 struct cmdq_host *cq_host = mmc_cmdq_private(msm_host->mmc);
2926 u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2927 u16 minor = version & CORE_VERSION_TARGET_MASK;
2928 /* registers offset changed starting from 4.2.0 */
2929 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
2930
2931 pr_err("---- Debug RAM dump ----\n");
2932 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
2933 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
2934 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
2935
2936 while (i < 16) {
2937 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
2938 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
2939 i++;
2940 }
2941 pr_err("-------------------------\n");
2942}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302943
2944void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2945{
2946 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2947 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2948 int tbsel, tbsel2;
2949 int i, index = 0;
2950 u32 test_bus_val = 0;
2951 u32 debug_reg[MAX_TEST_BUS] = {0};
2952
2953 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07002954 if (host->cq_host)
2955 sdhci_msm_cmdq_dump_debug_ram(msm_host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002956
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302957 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
2958 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
2959 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
2960 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
2961 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
2962 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
2963 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
2964 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
2965 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
2966 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
2967 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
2968 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05302969 pr_info("Vndr func2: 0x%08x\n",
2970 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302971
2972 /*
2973 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
2974 * of CORE_TESTBUS_CONFIG register.
2975 *
2976 * To select test bus 0 to 7 use tbsel and to select any test bus
2977 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
2978 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
2979 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
2980 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002981 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302982 for (tbsel = 0; tbsel < 8; tbsel++) {
2983 if (index >= MAX_TEST_BUS)
2984 break;
2985 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
2986 tbsel | CORE_TESTBUS_ENA;
2987 writel_relaxed(test_bus_val,
2988 msm_host->core_mem + CORE_TESTBUS_CONFIG);
2989 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
2990 CORE_SDCC_DEBUG_REG);
2991 }
2992 }
2993 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
2994 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2995 i, i + 3, debug_reg[i], debug_reg[i+1],
2996 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07002997}
2998
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05302999/*
3000 * sdhci_msm_enhanced_strobe_mask :-
3001 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3002 * SW should write 3 to
3003 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3004 * The default reset value of this register is 2.
3005 */
3006static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3007{
3008 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3009 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3010
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303011 if (!msm_host->enhanced_strobe ||
3012 !mmc_card_strobe(msm_host->mmc->card)) {
3013 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303014 mmc_hostname(host->mmc));
3015 return;
3016 }
3017
3018 if (set) {
3019 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3020 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3021 host->ioaddr + CORE_VENDOR_SPEC3);
3022 } else {
3023 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3024 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3025 host->ioaddr + CORE_VENDOR_SPEC3);
3026 }
3027}
3028
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003029static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3030{
3031 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3032 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3033
3034 if (set) {
3035 writel_relaxed(CORE_TESTBUS_ENA,
3036 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3037 } else {
3038 u32 value;
3039
3040 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3041 value &= ~CORE_TESTBUS_ENA;
3042 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3043 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303044}
3045
Dov Levenglick9c575e22015-07-20 09:30:52 +03003046static void sdhci_msm_detect(struct sdhci_host *host, bool detected)
3047{
3048 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3049 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3050 struct mmc_host *mmc = msm_host->mmc;
3051 struct mmc_card *card = mmc->card;
3052
3053 if (detected && mmc_card_sdio(card))
3054 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3055 else
3056 mmc->pm_caps &= ~MMC_PM_KEEP_POWER;
3057}
3058
Pavan Anamula691dd592015-08-25 16:11:20 +05303059void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3060{
3061 u32 vendor_func2;
3062 unsigned long timeout;
3063
3064 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3065
3066 if (enable) {
3067 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3068 CORE_VENDOR_SPEC_FUNC2);
3069 timeout = 10000;
3070 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3071 HC_SW_RST_REQ) {
3072 if (timeout == 0) {
3073 pr_info("%s: Applying wait idle disable workaround\n",
3074 mmc_hostname(host->mmc));
3075 /*
3076 * Apply the reset workaround to not wait for
3077 * pending data transfers on AXI before
3078 * resetting the controller. This could be
3079 * risky if the transfers were stuck on the
3080 * AXI bus.
3081 */
3082 vendor_func2 = readl_relaxed(host->ioaddr +
3083 CORE_VENDOR_SPEC_FUNC2);
3084 writel_relaxed(vendor_func2 |
3085 HC_SW_RST_WAIT_IDLE_DIS,
3086 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3087 host->reset_wa_t = ktime_get();
3088 return;
3089 }
3090 timeout--;
3091 udelay(10);
3092 }
3093 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3094 mmc_hostname(host->mmc));
3095 } else {
3096 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3097 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3098 }
3099}
3100
Gilad Broner44445992015-09-29 16:05:39 +03003101static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3102{
3103 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
3104 container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);
3105
3106 if (atomic_read(&pm_qos_irq->counter))
3107 return;
3108
3109 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3110 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3111}
3112
3113void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3114{
3115 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3116 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3117 struct sdhci_msm_pm_qos_latency *latency =
3118 &msm_host->pdata->pm_qos_data.irq_latency;
3119 int counter;
3120
3121 if (!msm_host->pm_qos_irq.enabled)
3122 return;
3123
3124 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3125 /* Make sure to update the voting in case power policy has changed */
3126 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3127 && counter > 1)
3128 return;
3129
3130 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3131 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3132 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3133 msm_host->pm_qos_irq.latency);
3134}
3135
3136void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3137{
3138 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3139 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3140 int counter;
3141
3142 if (!msm_host->pm_qos_irq.enabled)
3143 return;
3144
Subhash Jadavani4d813902015-10-15 12:16:43 -07003145 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3146 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3147 } else {
3148 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3149 return;
Gilad Broner44445992015-09-29 16:05:39 +03003150 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003151
Gilad Broner44445992015-09-29 16:05:39 +03003152 if (counter)
3153 return;
3154
3155 if (async) {
3156 schedule_work(&msm_host->pm_qos_irq.unvote_work);
3157 return;
3158 }
3159
3160 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3161 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3162 msm_host->pm_qos_irq.latency);
3163}
3164
Gilad Broner68c54562015-09-20 11:59:46 +03003165static ssize_t
3166sdhci_msm_pm_qos_irq_show(struct device *dev,
3167 struct device_attribute *attr, char *buf)
3168{
3169 struct sdhci_host *host = dev_get_drvdata(dev);
3170 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3171 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3172 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3173
3174 return snprintf(buf, PAGE_SIZE,
3175 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3176 irq->enabled, atomic_read(&irq->counter), irq->latency);
3177}
3178
3179static ssize_t
3180sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3181 struct device_attribute *attr, char *buf)
3182{
3183 struct sdhci_host *host = dev_get_drvdata(dev);
3184 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3185 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3186
3187 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3188}
3189
3190static ssize_t
3191sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3192 struct device_attribute *attr, const char *buf, size_t count)
3193{
3194 struct sdhci_host *host = dev_get_drvdata(dev);
3195 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3196 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3197 uint32_t value;
3198 bool enable;
3199 int ret;
3200
3201 ret = kstrtou32(buf, 0, &value);
3202 if (ret)
3203 goto out;
3204 enable = !!value;
3205
3206 if (enable == msm_host->pm_qos_irq.enabled)
3207 goto out;
3208
3209 msm_host->pm_qos_irq.enabled = enable;
3210 if (!enable) {
3211 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3212 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3213 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3214 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3215 msm_host->pm_qos_irq.latency);
3216 }
3217
3218out:
3219 return count;
3220}
3221
Gilad Broner44445992015-09-29 16:05:39 +03003222void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3223{
3224 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3225 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3226 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003227 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003228
3229 if (!msm_host->pdata->pm_qos_data.irq_valid)
3230 return;
3231
3232 /* Initialize only once as this gets called per partition */
3233 if (msm_host->pm_qos_irq.enabled)
3234 return;
3235
3236 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3237 msm_host->pm_qos_irq.req.type =
3238 msm_host->pdata->pm_qos_data.irq_req_type;
3239 if (msm_host->pm_qos_irq.req.type == PM_QOS_REQ_AFFINE_IRQ)
3240 msm_host->pm_qos_irq.req.irq = host->irq;
3241 else
3242 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3243 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3244
3245 INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
3246 sdhci_msm_pm_qos_irq_unvote_work);
3247 /* For initialization phase, set the performance latency */
3248 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3249 msm_host->pm_qos_irq.latency =
3250 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3251 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3252 msm_host->pm_qos_irq.latency);
3253 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003254
3255 /* sysfs */
3256 msm_host->pm_qos_irq.enable_attr.show =
3257 sdhci_msm_pm_qos_irq_enable_show;
3258 msm_host->pm_qos_irq.enable_attr.store =
3259 sdhci_msm_pm_qos_irq_enable_store;
3260 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3261 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3262 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3263 ret = device_create_file(&msm_host->pdev->dev,
3264 &msm_host->pm_qos_irq.enable_attr);
3265 if (ret)
3266 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3267 __func__, ret);
3268
3269 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3270 msm_host->pm_qos_irq.status_attr.store = NULL;
3271 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3272 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3273 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3274 ret = device_create_file(&msm_host->pdev->dev,
3275 &msm_host->pm_qos_irq.status_attr);
3276 if (ret)
3277 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3278 __func__, ret);
3279}
3280
3281static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3282 struct device_attribute *attr, char *buf)
3283{
3284 struct sdhci_host *host = dev_get_drvdata(dev);
3285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3286 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3287 struct sdhci_msm_pm_qos_group *group;
3288 int i;
3289 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3290 int offset = 0;
3291
3292 for (i = 0; i < nr_groups; i++) {
3293 group = &msm_host->pm_qos[i];
3294 offset += snprintf(&buf[offset], PAGE_SIZE,
3295 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3296 i, group->req.cpus_affine.bits[0],
3297 msm_host->pm_qos_group_enable,
3298 atomic_read(&group->counter),
3299 group->latency);
3300 }
3301
3302 return offset;
3303}
3304
3305static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3306 struct device_attribute *attr, char *buf)
3307{
3308 struct sdhci_host *host = dev_get_drvdata(dev);
3309 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3310 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3311
3312 return snprintf(buf, PAGE_SIZE, "%s\n",
3313 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3314}
3315
3316static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3317 struct device_attribute *attr, const char *buf, size_t count)
3318{
3319 struct sdhci_host *host = dev_get_drvdata(dev);
3320 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3321 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3322 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3323 uint32_t value;
3324 bool enable;
3325 int ret;
3326 int i;
3327
3328 ret = kstrtou32(buf, 0, &value);
3329 if (ret)
3330 goto out;
3331 enable = !!value;
3332
3333 if (enable == msm_host->pm_qos_group_enable)
3334 goto out;
3335
3336 msm_host->pm_qos_group_enable = enable;
3337 if (!enable) {
3338 for (i = 0; i < nr_groups; i++) {
3339 cancel_work_sync(&msm_host->pm_qos[i].unvote_work);
3340 atomic_set(&msm_host->pm_qos[i].counter, 0);
3341 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3342 pm_qos_update_request(&msm_host->pm_qos[i].req,
3343 msm_host->pm_qos[i].latency);
3344 }
3345 }
3346
3347out:
3348 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003349}
3350
3351static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3352{
3353 int i;
3354 struct sdhci_msm_cpu_group_map *map =
3355 &msm_host->pdata->pm_qos_data.cpu_group_map;
3356
3357 if (cpu < 0)
3358 goto not_found;
3359
3360 for (i = 0; i < map->nr_groups; i++)
3361 if (cpumask_test_cpu(cpu, &map->mask[i]))
3362 return i;
3363
3364not_found:
3365 return -EINVAL;
3366}
3367
3368void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3369 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3370{
3371 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3372 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3373 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3374 struct sdhci_msm_pm_qos_group *pm_qos_group;
3375 int counter;
3376
3377 if (!msm_host->pm_qos_group_enable || group < 0)
3378 return;
3379
3380 pm_qos_group = &msm_host->pm_qos[group];
3381 counter = atomic_inc_return(&pm_qos_group->counter);
3382
3383 /* Make sure to update the voting in case power policy has changed */
3384 if (pm_qos_group->latency == latency->latency[host->power_policy]
3385 && counter > 1)
3386 return;
3387
3388 cancel_work_sync(&pm_qos_group->unvote_work);
3389
3390 pm_qos_group->latency = latency->latency[host->power_policy];
3391 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3392}
3393
3394static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3395{
3396 struct sdhci_msm_pm_qos_group *group =
3397 container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);
3398
3399 if (atomic_read(&group->counter))
3400 return;
3401
3402 group->latency = PM_QOS_DEFAULT_VALUE;
3403 pm_qos_update_request(&group->req, group->latency);
3404}
3405
Gilad Broner07d92eb2015-09-29 16:57:21 +03003406bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003407{
3408 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3409 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3410 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3411
3412 if (!msm_host->pm_qos_group_enable || group < 0 ||
3413 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003414 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003415
3416 if (async) {
3417 schedule_work(&msm_host->pm_qos[group].unvote_work);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003418 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003419 }
3420
3421 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3422 pm_qos_update_request(&msm_host->pm_qos[group].req,
3423 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003424 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003425}
3426
3427void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3428 struct sdhci_msm_pm_qos_latency *latency)
3429{
3430 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3431 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3432 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3433 struct sdhci_msm_pm_qos_group *group;
3434 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003435 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003436
3437 if (msm_host->pm_qos_group_enable)
3438 return;
3439
3440 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3441 GFP_KERNEL);
3442 if (!msm_host->pm_qos)
3443 return;
3444
3445 for (i = 0; i < nr_groups; i++) {
3446 group = &msm_host->pm_qos[i];
3447 INIT_WORK(&group->unvote_work,
3448 sdhci_msm_pm_qos_cpu_unvote_work);
3449 atomic_set(&group->counter, 0);
3450 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3451 cpumask_copy(&group->req.cpus_affine,
3452 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3453 /* For initialization phase, set the performance mode latency */
3454 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3455 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3456 group->latency);
3457 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3458 __func__, i,
3459 group->req.cpus_affine.bits[0],
3460 group->latency,
3461 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3462 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003463 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003464 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003465
3466 /* sysfs */
3467 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3468 msm_host->pm_qos_group_status_attr.store = NULL;
3469 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3470 msm_host->pm_qos_group_status_attr.attr.name =
3471 "pm_qos_cpu_groups_status";
3472 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3473 ret = device_create_file(&msm_host->pdev->dev,
3474 &msm_host->pm_qos_group_status_attr);
3475 if (ret)
3476 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3477 __func__, ret);
3478 msm_host->pm_qos_group_enable_attr.show =
3479 sdhci_msm_pm_qos_group_enable_show;
3480 msm_host->pm_qos_group_enable_attr.store =
3481 sdhci_msm_pm_qos_group_enable_store;
3482 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3483 msm_host->pm_qos_group_enable_attr.attr.name =
3484 "pm_qos_cpu_groups_enable";
3485 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3486 ret = device_create_file(&msm_host->pdev->dev,
3487 &msm_host->pm_qos_group_enable_attr);
3488 if (ret)
3489 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3490 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003491}
3492
Gilad Broner07d92eb2015-09-29 16:57:21 +03003493static void sdhci_msm_pre_req(struct sdhci_host *host,
3494 struct mmc_request *mmc_req)
3495{
3496 int cpu;
3497 int group;
3498 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3499 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3500 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3501 msm_host->pm_qos_prev_cpu);
3502
3503 sdhci_msm_pm_qos_irq_vote(host);
3504
3505 cpu = get_cpu();
3506 put_cpu();
3507 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3508 if (group < 0)
3509 return;
3510
3511 if (group != prev_group && prev_group >= 0) {
3512 sdhci_msm_pm_qos_cpu_unvote(host,
3513 msm_host->pm_qos_prev_cpu, false);
3514 prev_group = -1; /* make sure to vote for new group */
3515 }
3516
3517 if (prev_group < 0) {
3518 sdhci_msm_pm_qos_cpu_vote(host,
3519 msm_host->pdata->pm_qos_data.latency, cpu);
3520 msm_host->pm_qos_prev_cpu = cpu;
3521 }
3522}
3523
3524static void sdhci_msm_post_req(struct sdhci_host *host,
3525 struct mmc_request *mmc_req)
3526{
3527 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3528 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3529
3530 sdhci_msm_pm_qos_irq_unvote(host, false);
3531
3532 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3533 msm_host->pm_qos_prev_cpu = -1;
3534}
3535
3536static void sdhci_msm_init(struct sdhci_host *host)
3537{
3538 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3539 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3540
3541 sdhci_msm_pm_qos_irq_init(host);
3542
3543 if (msm_host->pdata->pm_qos_data.legacy_valid)
3544 sdhci_msm_pm_qos_cpu_init(host,
3545 msm_host->pdata->pm_qos_data.latency);
3546}
3547
Asutosh Das0ef24812012-12-18 16:14:02 +05303548static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303549 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303550 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003551 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303552 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003553 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303554 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303555 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303556 .get_min_clock = sdhci_msm_get_min_clock,
3557 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303558 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303559 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303560 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003561 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003562 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003563 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303564 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Dov Levenglick9c575e22015-07-20 09:30:52 +03003565 .detect = sdhci_msm_detect,
Pavan Anamula691dd592015-08-25 16:11:20 +05303566 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003567 .init = sdhci_msm_init,
3568 .pre_req = sdhci_msm_pre_req,
3569 .post_req = sdhci_msm_post_req,
Asutosh Das0ef24812012-12-18 16:14:02 +05303570};
3571
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303572static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3573 struct sdhci_host *host)
3574{
Krishna Konda46fd1432014-10-30 21:13:27 -07003575 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303576 u16 minor;
3577 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303578 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303579
3580 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3581 major = (version & CORE_VERSION_MAJOR_MASK) >>
3582 CORE_VERSION_MAJOR_SHIFT;
3583 minor = version & CORE_VERSION_TARGET_MASK;
3584
Krishna Konda46fd1432014-10-30 21:13:27 -07003585 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3586
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303587 /*
3588 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003589 * controller won't advertise 3.0v, 1.8v and 8-bit features
3590 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303591 */
3592 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003593 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003594 /*
3595 * Enable 1.8V support capability on controllers that
3596 * support dual voltage
3597 */
3598 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003599 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3600 caps |= CORE_3_0V_SUPPORT;
3601 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003602 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303603 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3604 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303605 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003606
3607 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303608 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3609 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3610 */
3611 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303612 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303613 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3614 writel_relaxed((val | CORE_ONE_MID_EN),
3615 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3616 }
3617 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003618 * SDCC 5 controller with major version 1, minor version 0x34 and later
3619 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3620 */
3621 if ((major == 1) && (minor < 0x34))
3622 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003623
3624 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003625 * SDCC 5 controller with major version 1, minor version 0x42 and later
3626 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303627 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003628 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303629 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003630 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303631 msm_host->enhanced_strobe = true;
3632 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003633
3634 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003635 * SDCC 5 controller with major version 1 and minor version 0x42,
3636 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3637 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303638 * when MCLK is gated OFF, it is not gated for less than 0.5us
3639 * and MCLK must be switched on for at-least 1us before DATA
3640 * starts coming.
3641 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003642 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3643 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303644 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003645
3646 if ((major == 1) && (minor >= 0x49))
3647 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303648 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003649 * Mask 64-bit support for controller with 32-bit address bus so that
3650 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003651 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003652 if (!msm_host->pdata->largeaddressbus)
3653 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3654
Gilad Broner2a10ca02014-10-02 17:20:35 +03003655 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003656 /* keep track of the value in SDHCI_CAPABILITIES */
3657 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303658}
3659
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003660#ifdef CONFIG_MMC_CQ_HCI
3661static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3662 struct platform_device *pdev)
3663{
3664 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3665 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3666
3667 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003668 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003669 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3670 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003671 host->cq_host = NULL;
3672 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003673 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003674 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003675}
3676#else
3677static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3678 struct platform_device *pdev)
3679{
3680
3681}
3682#endif
3683
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003684static bool sdhci_msm_is_bootdevice(struct device *dev)
3685{
3686 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3687 strlen(saved_command_line))) {
3688 char search_string[50];
3689
3690 snprintf(search_string, ARRAY_SIZE(search_string),
3691 "androidboot.bootdevice=%s", dev_name(dev));
3692 if (strnstr(saved_command_line, search_string,
3693 strlen(saved_command_line)))
3694 return true;
3695 else
3696 return false;
3697 }
3698
3699 /*
3700 * "androidboot.bootdevice=" argument is not present then
3701 * return true as we don't know the boot device anyways.
3702 */
3703 return true;
3704}
3705
Asutosh Das0ef24812012-12-18 16:14:02 +05303706static int sdhci_msm_probe(struct platform_device *pdev)
3707{
3708 struct sdhci_host *host;
3709 struct sdhci_pltfm_host *pltfm_host;
3710 struct sdhci_msm_host *msm_host;
3711 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003712 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003713 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003714 u32 irq_status, irq_ctl;
Asutosh Das0ef24812012-12-18 16:14:02 +05303715
3716 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3717 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3718 GFP_KERNEL);
3719 if (!msm_host) {
3720 ret = -ENOMEM;
3721 goto out;
3722 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303723
3724 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3725 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3726 if (IS_ERR(host)) {
3727 ret = PTR_ERR(host);
3728 goto out;
3729 }
3730
3731 pltfm_host = sdhci_priv(host);
3732 pltfm_host->priv = msm_host;
3733 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303734 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303735
3736 /* Extract platform data */
3737 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003738 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
3739 if (ret < 0) {
3740 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3741 ret);
3742 goto pltfm_free;
3743 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003744
3745 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003746 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3747 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003748 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003749 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003750
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003751 if (disable_slots & (1 << (ret - 1))) {
3752 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3753 ret);
3754 ret = -ENODEV;
3755 goto pltfm_free;
3756 }
3757
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003758 if (ret <= 2)
3759 sdhci_slot[ret-1] = msm_host;
3760
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003761 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3762 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303763 if (!msm_host->pdata) {
3764 dev_err(&pdev->dev, "DT parsing error\n");
3765 goto pltfm_free;
3766 }
3767 } else {
3768 dev_err(&pdev->dev, "No device tree node\n");
3769 goto pltfm_free;
3770 }
3771
3772 /* Setup Clocks */
3773
3774 /* Setup SDCC bus voter clock. */
3775 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3776 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3777 /* Vote for max. clk rate for max. performance */
3778 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3779 if (ret)
3780 goto pltfm_free;
3781 ret = clk_prepare_enable(msm_host->bus_clk);
3782 if (ret)
3783 goto pltfm_free;
3784 }
3785
3786 /* Setup main peripheral bus clock */
3787 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3788 if (!IS_ERR(msm_host->pclk)) {
3789 ret = clk_prepare_enable(msm_host->pclk);
3790 if (ret)
3791 goto bus_clk_disable;
3792 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303793 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303794
3795 /* Setup SDC MMC clock */
3796 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3797 if (IS_ERR(msm_host->clk)) {
3798 ret = PTR_ERR(msm_host->clk);
3799 goto pclk_disable;
3800 }
3801
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303802 /* Set to the minimum supported clock frequency */
3803 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3804 if (ret) {
3805 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303806 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303807 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303808 ret = clk_prepare_enable(msm_host->clk);
3809 if (ret)
3810 goto pclk_disable;
3811
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303812 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303813 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303814
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003815 /* Setup CDC calibration fixed feedback clock */
3816 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3817 if (!IS_ERR(msm_host->ff_clk)) {
3818 ret = clk_prepare_enable(msm_host->ff_clk);
3819 if (ret)
3820 goto clk_disable;
3821 }
3822
3823 /* Setup CDC calibration sleep clock */
3824 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3825 if (!IS_ERR(msm_host->sleep_clk)) {
3826 ret = clk_prepare_enable(msm_host->sleep_clk);
3827 if (ret)
3828 goto ff_clk_disable;
3829 }
3830
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003831 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3832
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303833 ret = sdhci_msm_bus_register(msm_host, pdev);
3834 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003835 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303836
3837 if (msm_host->msm_bus_vote.client_handle)
3838 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3839 sdhci_msm_bus_work);
3840 sdhci_msm_bus_voting(host, 1);
3841
Asutosh Das0ef24812012-12-18 16:14:02 +05303842 /* Setup regulators */
3843 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3844 if (ret) {
3845 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303846 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303847 }
3848
3849 /* Reset the core and Enable SDHC mode */
3850 core_memres = platform_get_resource_byname(pdev,
3851 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303852 if (!core_memres) {
3853 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3854 goto vreg_deinit;
3855 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303856 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3857 resource_size(core_memres));
3858
3859 if (!msm_host->core_mem) {
3860 dev_err(&pdev->dev, "Failed to remap registers\n");
3861 ret = -ENOMEM;
3862 goto vreg_deinit;
3863 }
3864
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303865 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003866 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303867 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003868 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3869 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303870
Asutosh Das0ef24812012-12-18 16:14:02 +05303871 /* Set HC_MODE_EN bit in HC_MODE register */
3872 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3873
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003874 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3875 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3876 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3877
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303878 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003879
3880 /*
3881 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3882 * be used as required later on.
3883 */
3884 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3885 CORE_IO_PAD_PWR_SWITCH_EN),
3886 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303887 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303888 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3889 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3890 * interrupt in GIC (by registering the interrupt handler), we need to
3891 * ensure that any pending power irq interrupt status is acknowledged
3892 * otherwise power irq interrupt handler would be fired prematurely.
3893 */
3894 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
3895 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
3896 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
3897 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
3898 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
3899 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
3900 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
3901 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07003902
Subhash Jadavani28137342013-05-14 17:46:43 +05303903 /*
3904 * Ensure that above writes are propogated before interrupt enablement
3905 * in GIC.
3906 */
3907 mb();
3908
3909 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05303910 * Following are the deviations from SDHC spec v3.0 -
3911 * 1. Card detection is handled using separate GPIO.
3912 * 2. Bus power control is handled by interacting with PMIC.
3913 */
3914 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
3915 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303916 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03003917 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303918 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05303919 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05303920 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05303921 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das0ef24812012-12-18 16:14:02 +05303922
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05303923 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
3924 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
3925
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003926 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003927 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
3928 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
3929 SDHCI_VENDOR_VER_SHIFT));
3930 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
3931 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
3932 /*
3933 * Add 40us delay in interrupt handler when
3934 * operating at initialization frequency(400KHz).
3935 */
3936 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
3937 /*
3938 * Set Software Reset for DAT line in Software
3939 * Reset Register (Bit 2).
3940 */
3941 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
3942 }
3943
Asutosh Das214b9662013-06-13 14:27:42 +05303944 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
3945
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003946 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003947 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
3948 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05303949 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003950 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05303951 goto vreg_deinit;
3952 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003953 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05303954 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003955 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303956 if (ret) {
3957 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003958 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05303959 goto vreg_deinit;
3960 }
3961
3962 /* Enable pwr irq interrupts */
3963 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
3964
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303965#ifdef CONFIG_MMC_CLKGATE
3966 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
3967 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
3968#endif
3969
Asutosh Das0ef24812012-12-18 16:14:02 +05303970 /* Set host capabilities */
3971 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
3972 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003973 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05303974 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05303975 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08003976 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3977 msm_host->mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
Subhash Jadavani6d472b22013-05-29 15:52:10 +05303978 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08003979 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03003980 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05303981 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07003982 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03003983 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Asutosh Das0ef24812012-12-18 16:14:02 +05303984
3985 if (msm_host->pdata->nonremovable)
3986 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
3987
Guoping Yuf7c91332014-08-20 16:56:18 +08003988 if (msm_host->pdata->nonhotplug)
3989 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
3990
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303991 init_completion(&msm_host->pwr_irq_completion);
3992
Sahitya Tummala581df132013-03-12 14:57:46 +05303993 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05303994 /*
3995 * Set up the card detect GPIO in active configuration before
3996 * configuring it as an IRQ. Otherwise, it can be in some
3997 * weird/inconsistent state resulting in flood of interrupts.
3998 */
3999 sdhci_msm_setup_pins(msm_host->pdata, true);
4000
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304001 /*
4002 * This delay is needed for stabilizing the card detect GPIO
4003 * line after changing the pull configs.
4004 */
4005 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304006 ret = mmc_gpio_request_cd(msm_host->mmc,
4007 msm_host->pdata->status_gpio, 0);
4008 if (ret) {
4009 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4010 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304011 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304012 }
4013 }
4014
Krishna Konda7feab352013-09-17 23:55:40 -07004015 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4016 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4017 host->dma_mask = DMA_BIT_MASK(64);
4018 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304019 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004020 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304021 host->dma_mask = DMA_BIT_MASK(32);
4022 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304023 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304024 } else {
4025 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4026 }
4027
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004028 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304029 ret = sdhci_add_host(host);
4030 if (ret) {
4031 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304032 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304033 }
4034
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004035 pm_runtime_set_active(&pdev->dev);
4036 pm_runtime_enable(&pdev->dev);
4037 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4038 pm_runtime_use_autosuspend(&pdev->dev);
4039
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304040 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4041 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4042 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4043 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4044 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4045 ret = device_create_file(&pdev->dev,
4046 &msm_host->msm_bus_vote.max_bus_bw);
4047 if (ret)
4048 goto remove_host;
4049
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304050 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4051 msm_host->polling.show = show_polling;
4052 msm_host->polling.store = store_polling;
4053 sysfs_attr_init(&msm_host->polling.attr);
4054 msm_host->polling.attr.name = "polling";
4055 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4056 ret = device_create_file(&pdev->dev, &msm_host->polling);
4057 if (ret)
4058 goto remove_max_bus_bw_file;
4059 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304060
4061 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4062 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4063 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4064 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4065 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4066 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4067 if (ret) {
4068 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4069 mmc_hostname(host->mmc), __func__, ret);
4070 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4071 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304072 /* Successful initialization */
4073 goto out;
4074
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304075remove_max_bus_bw_file:
4076 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304077remove_host:
4078 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004079 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304080 sdhci_remove_host(host, dead);
4081vreg_deinit:
4082 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304083bus_unregister:
4084 if (msm_host->msm_bus_vote.client_handle)
4085 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4086 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004087sleep_clk_disable:
4088 if (!IS_ERR(msm_host->sleep_clk))
4089 clk_disable_unprepare(msm_host->sleep_clk);
4090ff_clk_disable:
4091 if (!IS_ERR(msm_host->ff_clk))
4092 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304093clk_disable:
4094 if (!IS_ERR(msm_host->clk))
4095 clk_disable_unprepare(msm_host->clk);
4096pclk_disable:
4097 if (!IS_ERR(msm_host->pclk))
4098 clk_disable_unprepare(msm_host->pclk);
4099bus_clk_disable:
4100 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4101 clk_disable_unprepare(msm_host->bus_clk);
4102pltfm_free:
4103 sdhci_pltfm_free(pdev);
4104out:
4105 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4106 return ret;
4107}
4108
4109static int sdhci_msm_remove(struct platform_device *pdev)
4110{
4111 struct sdhci_host *host = platform_get_drvdata(pdev);
4112 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4113 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4114 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4115 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4116 0xffffffff);
4117
4118 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304119 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4120 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304121 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004122 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304123 sdhci_remove_host(host, dead);
4124 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304125
Asutosh Das0ef24812012-12-18 16:14:02 +05304126 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304127
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304128 sdhci_msm_setup_pins(pdata, true);
4129 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304130
4131 if (msm_host->msm_bus_vote.client_handle) {
4132 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4133 sdhci_msm_bus_unregister(msm_host);
4134 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304135 return 0;
4136}
4137
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004138#ifdef CONFIG_PM
4139static int sdhci_msm_runtime_suspend(struct device *dev)
4140{
4141 struct sdhci_host *host = dev_get_drvdata(dev);
4142 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4143 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004144 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004145
4146 disable_irq(host->irq);
4147 disable_irq(msm_host->pwr_irq);
4148
4149 /*
4150 * Remove the vote immediately only if clocks are off in which
4151 * case we might have queued work to remove vote but it may not
4152 * be completed before runtime suspend or system suspend.
4153 */
4154 if (!atomic_read(&msm_host->clks_on)) {
4155 if (msm_host->msm_bus_vote.client_handle)
4156 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4157 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004158 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4159 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004160
4161 return 0;
4162}
4163
4164static int sdhci_msm_runtime_resume(struct device *dev)
4165{
4166 struct sdhci_host *host = dev_get_drvdata(dev);
4167 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4168 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004169 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004170
4171 enable_irq(msm_host->pwr_irq);
4172 enable_irq(host->irq);
4173
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004174 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4175 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004176 return 0;
4177}
4178
4179static int sdhci_msm_suspend(struct device *dev)
4180{
4181 struct sdhci_host *host = dev_get_drvdata(dev);
4182 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4183 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004184 int ret = 0;
4185 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004186
4187 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4188 (msm_host->mmc->slot.cd_irq >= 0))
4189 disable_irq(msm_host->mmc->slot.cd_irq);
4190
4191 if (pm_runtime_suspended(dev)) {
4192 pr_debug("%s: %s: already runtime suspended\n",
4193 mmc_hostname(host->mmc), __func__);
4194 goto out;
4195 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004196 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004197out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004198 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4199 ktime_to_us(ktime_sub(ktime_get(), start)));
4200 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004201}
4202
4203static int sdhci_msm_resume(struct device *dev)
4204{
4205 struct sdhci_host *host = dev_get_drvdata(dev);
4206 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4207 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4208 int ret = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004209 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004210
4211 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4212 (msm_host->mmc->slot.cd_irq >= 0))
4213 enable_irq(msm_host->mmc->slot.cd_irq);
4214
4215 if (pm_runtime_suspended(dev)) {
4216 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4217 mmc_hostname(host->mmc), __func__);
4218 goto out;
4219 }
4220
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004221 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004222out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004223 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4224 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004225 return ret;
4226}
4227
4228static const struct dev_pm_ops sdhci_msm_pmops = {
4229 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4230 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4231 NULL)
4232};
4233
4234#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4235
4236#else
4237#define SDHCI_MSM_PMOPS NULL
4238#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304239static const struct of_device_id sdhci_msm_dt_match[] = {
4240 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004241 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304242};
4243MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4244
4245static struct platform_driver sdhci_msm_driver = {
4246 .probe = sdhci_msm_probe,
4247 .remove = sdhci_msm_remove,
4248 .driver = {
4249 .name = "sdhci_msm",
4250 .owner = THIS_MODULE,
4251 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004252 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304253 },
4254};
4255
4256module_platform_driver(sdhci_msm_driver);
4257
4258MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4259MODULE_LICENSE("GPL v2");