blob: 1dfc12e77079768d3be997d77efc0c7e39ab3600 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070045#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053046
Asutosh Das36c2e922015-12-01 12:19:58 +053047#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080048#define CORE_POWER 0x0
49#define CORE_SW_RST (1 << 7)
50
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070051#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080052
53#define CORE_VERSION_STEP_MASK 0x0000FFFF
54#define CORE_VERSION_MINOR_MASK 0x0FFF0000
55#define CORE_VERSION_MINOR_SHIFT 16
56#define CORE_VERSION_MAJOR_MASK 0xF0000000
57#define CORE_VERSION_MAJOR_SHIFT 28
58#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030059#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080060
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053062
63#define CORE_VERSION_MAJOR_MASK 0xF0000000
64#define CORE_VERSION_MAJOR_SHIFT 28
65
Asutosh Das0ef24812012-12-18 16:14:02 +053066#define CORE_HC_MODE 0x78
67#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070068#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053069
Asutosh Das0ef24812012-12-18 16:14:02 +053070#define CORE_PWRCTL_BUS_OFF 0x01
71#define CORE_PWRCTL_BUS_ON (1 << 1)
72#define CORE_PWRCTL_IO_LOW (1 << 2)
73#define CORE_PWRCTL_IO_HIGH (1 << 3)
74
75#define CORE_PWRCTL_BUS_SUCCESS 0x01
76#define CORE_PWRCTL_BUS_FAIL (1 << 1)
77#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
78#define CORE_PWRCTL_IO_FAIL (1 << 3)
79
80#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070081#define MAX_PHASES 16
82
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070083#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070084#define CORE_DLL_EN (1 << 16)
85#define CORE_CDR_EN (1 << 17)
86#define CORE_CK_OUT_EN (1 << 18)
87#define CORE_CDR_EXT_EN (1 << 19)
88#define CORE_DLL_PDN (1 << 29)
89#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070090
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070092#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093
Krishna Konda46fd1432014-10-30 21:13:27 -070094#define CORE_CLK_PWRSAVE (1 << 1)
95#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
96#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
97#define CORE_HC_MCLK_SEL_MASK (3 << 8)
98#define CORE_HC_AUTO_CMD21_EN (1 << 6)
99#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700100#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700101#define CORE_HC_SELECT_IN_EN (1 << 18)
102#define CORE_HC_SELECT_IN_HS400 (6 << 19)
103#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700104#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105
Pavan Anamula691dd592015-08-25 16:11:20 +0530106#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
107#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530108#define CORE_ONE_MID_EN (1 << 25)
109
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530110#define CORE_8_BIT_SUPPORT (1 << 18)
111#define CORE_3_3V_SUPPORT (1 << 24)
112#define CORE_3_0V_SUPPORT (1 << 25)
113#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300114#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700115
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700116#define CORE_CSR_CDC_CTLR_CFG0 0x130
117#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
118#define CORE_HW_AUTOCAL_ENA (1 << 17)
119
120#define CORE_CSR_CDC_CTLR_CFG1 0x134
121#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
122#define CORE_TIMER_ENA (1 << 16)
123
124#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
125#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
126#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
127#define CORE_CDC_OFFSET_CFG 0x14C
128#define CORE_CSR_CDC_DELAY_CFG 0x150
129#define CORE_CDC_SLAVE_DDA_CFG 0x160
130#define CORE_CSR_CDC_STATUS0 0x164
131#define CORE_CALIBRATION_DONE (1 << 0)
132
133#define CORE_CDC_ERROR_CODE_MASK 0x7000000
134
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300135#define CQ_CMD_DBG_RAM 0x110
136#define CQ_CMD_DBG_RAM_WA 0x150
137#define CQ_CMD_DBG_RAM_OL 0x154
138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_GEN_CFG 0x178
140#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
141#define CORE_CDC_SWITCH_RC_EN (1 << 1)
142
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700143#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530144#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700147#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530148#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800151#define CORE_FLL_CYCLE_CNT (1 << 18)
152#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530154#define DDR_CONFIG_POR_VAL 0x80040853
155#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
156#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700157#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700158
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700159/* 512 descriptors */
160#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530161#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530162
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700163#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800164#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700166#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530167#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168
Krishna Konda96e6b112013-10-28 15:25:03 -0700169#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200170#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200171#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700172
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530173struct sdhci_msm_offset {
174 u32 CORE_MCI_DATA_CNT;
175 u32 CORE_MCI_STATUS;
176 u32 CORE_MCI_FIFO_CNT;
177 u32 CORE_MCI_VERSION;
178 u32 CORE_GENERICS;
179 u32 CORE_TESTBUS_CONFIG;
180 u32 CORE_TESTBUS_SEL2_BIT;
181 u32 CORE_TESTBUS_ENA;
182 u32 CORE_TESTBUS_SEL2;
183 u32 CORE_PWRCTL_STATUS;
184 u32 CORE_PWRCTL_MASK;
185 u32 CORE_PWRCTL_CLEAR;
186 u32 CORE_PWRCTL_CTL;
187 u32 CORE_SDCC_DEBUG_REG;
188 u32 CORE_DLL_CONFIG;
189 u32 CORE_DLL_STATUS;
190 u32 CORE_VENDOR_SPEC;
191 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
193 u32 CORE_VENDOR_SPEC_FUNC2;
194 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
195 u32 CORE_DDR_200_CFG;
196 u32 CORE_VENDOR_SPEC3;
197 u32 CORE_DLL_CONFIG_2;
198 u32 CORE_DDR_CONFIG;
199 u32 CORE_DDR_CONFIG_2;
200};
201
202struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
203 .CORE_MCI_DATA_CNT = 0x35C,
204 .CORE_MCI_STATUS = 0x324,
205 .CORE_MCI_FIFO_CNT = 0x308,
206 .CORE_MCI_VERSION = 0x318,
207 .CORE_GENERICS = 0x320,
208 .CORE_TESTBUS_CONFIG = 0x32C,
209 .CORE_TESTBUS_SEL2_BIT = 3,
210 .CORE_TESTBUS_ENA = (1 << 31),
211 .CORE_TESTBUS_SEL2 = (1 << 3),
212 .CORE_PWRCTL_STATUS = 0x240,
213 .CORE_PWRCTL_MASK = 0x244,
214 .CORE_PWRCTL_CLEAR = 0x248,
215 .CORE_PWRCTL_CTL = 0x24C,
216 .CORE_SDCC_DEBUG_REG = 0x358,
217 .CORE_DLL_CONFIG = 0x200,
218 .CORE_DLL_STATUS = 0x208,
219 .CORE_VENDOR_SPEC = 0x20C,
220 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
222 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
223 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
224 .CORE_DDR_200_CFG = 0x224,
225 .CORE_VENDOR_SPEC3 = 0x250,
226 .CORE_DLL_CONFIG_2 = 0x254,
227 .CORE_DDR_CONFIG = 0x258,
228 .CORE_DDR_CONFIG_2 = 0x25C,
229};
230
231struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
232 .CORE_MCI_DATA_CNT = 0x30,
233 .CORE_MCI_STATUS = 0x34,
234 .CORE_MCI_FIFO_CNT = 0x44,
235 .CORE_MCI_VERSION = 0x050,
236 .CORE_GENERICS = 0x70,
237 .CORE_TESTBUS_CONFIG = 0x0CC,
238 .CORE_TESTBUS_SEL2_BIT = 4,
239 .CORE_TESTBUS_ENA = (1 << 3),
240 .CORE_TESTBUS_SEL2 = (1 << 4),
241 .CORE_PWRCTL_STATUS = 0xDC,
242 .CORE_PWRCTL_MASK = 0xE0,
243 .CORE_PWRCTL_CLEAR = 0xE4,
244 .CORE_PWRCTL_CTL = 0xE8,
245 .CORE_SDCC_DEBUG_REG = 0x124,
246 .CORE_DLL_CONFIG = 0x100,
247 .CORE_DLL_STATUS = 0x108,
248 .CORE_VENDOR_SPEC = 0x10C,
249 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
251 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
252 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
253 .CORE_DDR_200_CFG = 0x184,
254 .CORE_VENDOR_SPEC3 = 0x1B0,
255 .CORE_DLL_CONFIG_2 = 0x1B4,
256 .CORE_DDR_CONFIG = 0x1B8,
257 .CORE_DDR_CONFIG_2 = 0x1BC,
258};
259
260u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
264 void __iomem *base_addr;
265
266 if (msm_host->mci_removed)
267 base_addr = host->ioaddr;
268 else
269 base_addr = msm_host->core_mem;
270
271 return readb_relaxed(base_addr + offset);
272}
273
274u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
275{
276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
278 void __iomem *base_addr;
279
280 if (msm_host->mci_removed)
281 base_addr = host->ioaddr;
282 else
283 base_addr = msm_host->core_mem;
284
285 return readl_relaxed(base_addr + offset);
286}
287
288void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
289{
290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
292 void __iomem *base_addr;
293
294 if (msm_host->mci_removed)
295 base_addr = host->ioaddr;
296 else
297 base_addr = msm_host->core_mem;
298
299 writeb_relaxed(val, base_addr + offset);
300}
301
302void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
303{
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
306 void __iomem *base_addr;
307
308 if (msm_host->mci_removed)
309 base_addr = host->ioaddr;
310 else
311 base_addr = msm_host->core_mem;
312
313 writel_relaxed(val, base_addr + offset);
314}
315
Ritesh Harjani82124772014-11-04 15:34:00 +0530316/* Timeout value to avoid infinite waiting for pwr_irq */
317#define MSM_PWR_IRQ_TIMEOUT_MS 5000
318
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700319static const u32 tuning_block_64[] = {
320 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
321 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
322 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
323 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
324};
325
326static const u32 tuning_block_128[] = {
327 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
328 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
329 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
330 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
331 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
332 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
333 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
334 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
335};
Asutosh Das0ef24812012-12-18 16:14:02 +0530336
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700337/* global to hold each slot instance for debug */
338static struct sdhci_msm_host *sdhci_slot[2];
339
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700340static int disable_slots;
341/* root can write, others read */
342module_param(disable_slots, int, S_IRUGO|S_IWUSR);
343
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530344static bool nocmdq;
345module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
346
Asutosh Das0ef24812012-12-18 16:14:02 +0530347enum vdd_io_level {
348 /* set vdd_io_data->low_vol_level */
349 VDD_IO_LOW,
350 /* set vdd_io_data->high_vol_level */
351 VDD_IO_HIGH,
352 /*
353 * set whatever there in voltage_level (third argument) of
354 * sdhci_msm_set_vdd_io_vol() function.
355 */
356 VDD_IO_SET_LEVEL,
357};
358
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700359/* MSM platform specific tuning */
360static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
361 u8 poll)
362{
363 int rc = 0;
364 u32 wait_cnt = 50;
365 u8 ck_out_en = 0;
366 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530367 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
368 struct sdhci_msm_host *msm_host = pltfm_host->priv;
369 const struct sdhci_msm_offset *msm_host_offset =
370 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700371
372 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530373 ck_out_en = !!(readl_relaxed(host->ioaddr +
374 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700375
376 while (ck_out_en != poll) {
377 if (--wait_cnt == 0) {
378 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
379 mmc_hostname(mmc), __func__, poll);
380 rc = -ETIMEDOUT;
381 goto out;
382 }
383 udelay(1);
384
385 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530386 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700387 }
388out:
389 return rc;
390}
391
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530392/*
393 * Enable CDR to track changes of DAT lines and adjust sampling
394 * point according to voltage/temperature variations
395 */
396static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
397{
398 int rc = 0;
399 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530400 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
401 struct sdhci_msm_host *msm_host = pltfm_host->priv;
402 const struct sdhci_msm_offset *msm_host_offset =
403 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530404
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530405 config = readl_relaxed(host->ioaddr +
406 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530407 config |= CORE_CDR_EN;
408 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530409 writel_relaxed(config, host->ioaddr +
410 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530411
412 rc = msm_dll_poll_ck_out_en(host, 0);
413 if (rc)
414 goto err;
415
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530416 writel_relaxed((readl_relaxed(host->ioaddr +
417 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
418 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530419
420 rc = msm_dll_poll_ck_out_en(host, 1);
421 if (rc)
422 goto err;
423 goto out;
424err:
425 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
426out:
427 return rc;
428}
429
430static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
431 *attr, const char *buf, size_t count)
432{
433 struct sdhci_host *host = dev_get_drvdata(dev);
434 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
435 struct sdhci_msm_host *msm_host = pltfm_host->priv;
436 u32 tmp;
437 unsigned long flags;
438
439 if (!kstrtou32(buf, 0, &tmp)) {
440 spin_lock_irqsave(&host->lock, flags);
441 msm_host->en_auto_cmd21 = !!tmp;
442 spin_unlock_irqrestore(&host->lock, flags);
443 }
444 return count;
445}
446
447static ssize_t show_auto_cmd21(struct device *dev,
448 struct device_attribute *attr, char *buf)
449{
450 struct sdhci_host *host = dev_get_drvdata(dev);
451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
452 struct sdhci_msm_host *msm_host = pltfm_host->priv;
453
454 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
455}
456
457/* MSM auto-tuning handler */
458static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
459 bool enable,
460 u32 type)
461{
462 int rc = 0;
463 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
464 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530465 const struct sdhci_msm_offset *msm_host_offset =
466 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530467 u32 val = 0;
468
469 if (!msm_host->en_auto_cmd21)
470 return 0;
471
472 if (type == MMC_SEND_TUNING_BLOCK_HS200)
473 val = CORE_HC_AUTO_CMD21_EN;
474 else
475 return 0;
476
477 if (enable) {
478 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530479 writel_relaxed(readl_relaxed(host->ioaddr +
480 msm_host_offset->CORE_VENDOR_SPEC) | val,
481 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530482 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530483 writel_relaxed(readl_relaxed(host->ioaddr +
484 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
485 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530486 }
487 return rc;
488}
489
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700490static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
491{
492 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530493 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
494 struct sdhci_msm_host *msm_host = pltfm_host->priv;
495 const struct sdhci_msm_offset *msm_host_offset =
496 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700497 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
498 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
499 0x8};
500 unsigned long flags;
501 u32 config;
502 struct mmc_host *mmc = host->mmc;
503
504 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
505 spin_lock_irqsave(&host->lock, flags);
506
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530507 config = readl_relaxed(host->ioaddr +
508 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700509 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
510 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530511 writel_relaxed(config, host->ioaddr +
512 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700513
514 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
515 rc = msm_dll_poll_ck_out_en(host, 0);
516 if (rc)
517 goto err_out;
518
519 /*
520 * Write the selected DLL clock output phase (0 ... 15)
521 * to CDR_SELEXT bit field of DLL_CONFIG register.
522 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530523 writel_relaxed(((readl_relaxed(host->ioaddr +
524 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700525 & ~(0xF << 20))
526 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530527 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700528
529 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530530 writel_relaxed((readl_relaxed(host->ioaddr +
531 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
532 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700533
534 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
535 rc = msm_dll_poll_ck_out_en(host, 1);
536 if (rc)
537 goto err_out;
538
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530539 config = readl_relaxed(host->ioaddr +
540 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700541 config |= CORE_CDR_EN;
542 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530543 writel_relaxed(config, host->ioaddr +
544 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700545 goto out;
546
547err_out:
548 pr_err("%s: %s: Failed to set DLL phase: %d\n",
549 mmc_hostname(mmc), __func__, phase);
550out:
551 spin_unlock_irqrestore(&host->lock, flags);
552 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
553 return rc;
554}
555
556/*
557 * Find out the greatest range of consecuitive selected
558 * DLL clock output phases that can be used as sampling
559 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700560 * timing mode) or for eMMC4.5 card read operation (in
561 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700562 * Select the 3/4 of the range and configure the DLL with the
563 * selected DLL clock output phase.
564 */
565
566static int msm_find_most_appropriate_phase(struct sdhci_host *host,
567 u8 *phase_table, u8 total_phases)
568{
569 int ret;
570 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
571 u8 phases_per_row[MAX_PHASES] = {0};
572 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
573 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
574 bool phase_0_found = false, phase_15_found = false;
575 struct mmc_host *mmc = host->mmc;
576
577 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
578 if (!total_phases || (total_phases > MAX_PHASES)) {
579 pr_err("%s: %s: invalid argument: total_phases=%d\n",
580 mmc_hostname(mmc), __func__, total_phases);
581 return -EINVAL;
582 }
583
584 for (cnt = 0; cnt < total_phases; cnt++) {
585 ranges[row_index][col_index] = phase_table[cnt];
586 phases_per_row[row_index] += 1;
587 col_index++;
588
589 if ((cnt + 1) == total_phases) {
590 continue;
591 /* check if next phase in phase_table is consecutive or not */
592 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
593 row_index++;
594 col_index = 0;
595 }
596 }
597
598 if (row_index >= MAX_PHASES)
599 return -EINVAL;
600
601 /* Check if phase-0 is present in first valid window? */
602 if (!ranges[0][0]) {
603 phase_0_found = true;
604 phase_0_raw_index = 0;
605 /* Check if cycle exist between 2 valid windows */
606 for (cnt = 1; cnt <= row_index; cnt++) {
607 if (phases_per_row[cnt]) {
608 for (i = 0; i < phases_per_row[cnt]; i++) {
609 if (ranges[cnt][i] == 15) {
610 phase_15_found = true;
611 phase_15_raw_index = cnt;
612 break;
613 }
614 }
615 }
616 }
617 }
618
619 /* If 2 valid windows form cycle then merge them as single window */
620 if (phase_0_found && phase_15_found) {
621 /* number of phases in raw where phase 0 is present */
622 u8 phases_0 = phases_per_row[phase_0_raw_index];
623 /* number of phases in raw where phase 15 is present */
624 u8 phases_15 = phases_per_row[phase_15_raw_index];
625
626 if (phases_0 + phases_15 >= MAX_PHASES)
627 /*
628 * If there are more than 1 phase windows then total
629 * number of phases in both the windows should not be
630 * more than or equal to MAX_PHASES.
631 */
632 return -EINVAL;
633
634 /* Merge 2 cyclic windows */
635 i = phases_15;
636 for (cnt = 0; cnt < phases_0; cnt++) {
637 ranges[phase_15_raw_index][i] =
638 ranges[phase_0_raw_index][cnt];
639 if (++i >= MAX_PHASES)
640 break;
641 }
642
643 phases_per_row[phase_0_raw_index] = 0;
644 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
645 }
646
647 for (cnt = 0; cnt <= row_index; cnt++) {
648 if (phases_per_row[cnt] > curr_max) {
649 curr_max = phases_per_row[cnt];
650 selected_row_index = cnt;
651 }
652 }
653
654 i = ((curr_max * 3) / 4);
655 if (i)
656 i--;
657
658 ret = (int)ranges[selected_row_index][i];
659
660 if (ret >= MAX_PHASES) {
661 ret = -EINVAL;
662 pr_err("%s: %s: invalid phase selected=%d\n",
663 mmc_hostname(mmc), __func__, ret);
664 }
665
666 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
667 return ret;
668}
669
670static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
671{
672 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530673 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
674 struct sdhci_msm_host *msm_host = pltfm_host->priv;
675 const struct sdhci_msm_offset *msm_host_offset =
676 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700677
678 /* Program the MCLK value to MCLK_FREQ bit field */
679 if (host->clock <= 112000000)
680 mclk_freq = 0;
681 else if (host->clock <= 125000000)
682 mclk_freq = 1;
683 else if (host->clock <= 137000000)
684 mclk_freq = 2;
685 else if (host->clock <= 150000000)
686 mclk_freq = 3;
687 else if (host->clock <= 162000000)
688 mclk_freq = 4;
689 else if (host->clock <= 175000000)
690 mclk_freq = 5;
691 else if (host->clock <= 187000000)
692 mclk_freq = 6;
693 else if (host->clock <= 200000000)
694 mclk_freq = 7;
695
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530696 writel_relaxed(((readl_relaxed(host->ioaddr +
697 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700698 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530699 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700700}
701
702/* Initialize the DLL (Programmable Delay Line ) */
703static int msm_init_cm_dll(struct sdhci_host *host)
704{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800705 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
706 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530707 const struct sdhci_msm_offset *msm_host_offset =
708 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700709 struct mmc_host *mmc = host->mmc;
710 int rc = 0;
711 unsigned long flags;
712 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530713 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700714
715 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
716 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530717 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
718 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530719 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700720 /*
721 * Make sure that clock is always enabled when DLL
722 * tuning is in progress. Keeping PWRSAVE ON may
723 * turn off the clock. So let's disable the PWRSAVE
724 * here and re-enable it once tuning is completed.
725 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530726 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530727 writel_relaxed((readl_relaxed(host->ioaddr +
728 msm_host_offset->CORE_VENDOR_SPEC)
729 & ~CORE_CLK_PWRSAVE), host->ioaddr +
730 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530731 curr_pwrsave = false;
732 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700733
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800734 if (msm_host->use_updated_dll_reset) {
735 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530736 writel_relaxed((readl_relaxed(host->ioaddr +
737 msm_host_offset->CORE_DLL_CONFIG)
738 & ~CORE_CK_OUT_EN), host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800740
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530741 writel_relaxed((readl_relaxed(host->ioaddr +
742 msm_host_offset->CORE_DLL_CONFIG_2)
743 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800745 }
746
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700747 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530748 writel_relaxed((readl_relaxed(host->ioaddr +
749 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
750 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700751
752 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530753 writel_relaxed((readl_relaxed(host->ioaddr +
754 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
755 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700756 msm_cm_dll_set_freq(host);
757
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800758 if (msm_host->use_updated_dll_reset) {
759 u32 mclk_freq = 0;
760
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530761 if ((readl_relaxed(host->ioaddr +
762 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800763 & CORE_FLL_CYCLE_CNT))
764 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
765 else
766 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
767
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530768 writel_relaxed(((readl_relaxed(host->ioaddr +
769 msm_host_offset->CORE_DLL_CONFIG_2)
770 & ~(0xFF << 10)) | (mclk_freq << 10)),
771 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800772 /* wait for 5us before enabling DLL clock */
773 udelay(5);
774 }
775
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700776 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530777 writel_relaxed((readl_relaxed(host->ioaddr +
778 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
779 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700780
781 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530782 writel_relaxed((readl_relaxed(host->ioaddr +
783 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
784 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700785
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800786 if (msm_host->use_updated_dll_reset) {
787 msm_cm_dll_set_freq(host);
788 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530789 writel_relaxed((readl_relaxed(host->ioaddr +
790 msm_host_offset->CORE_DLL_CONFIG_2)
791 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800793 }
794
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700795 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530796 writel_relaxed((readl_relaxed(host->ioaddr +
797 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
798 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700799
800 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530801 writel_relaxed((readl_relaxed(host->ioaddr +
802 msm_host_offset->CORE_DLL_CONFIG)
803 | CORE_CK_OUT_EN), host->ioaddr +
804 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700805
806 wait_cnt = 50;
807 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530808 while (!(readl_relaxed(host->ioaddr +
809 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700810 /* max. wait for 50us sec for LOCK bit to be set */
811 if (--wait_cnt == 0) {
812 pr_err("%s: %s: DLL failed to LOCK\n",
813 mmc_hostname(mmc), __func__);
814 rc = -ETIMEDOUT;
815 goto out;
816 }
817 /* wait for 1us before polling again */
818 udelay(1);
819 }
820
821out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530822 /* Restore the correct PWRSAVE state */
823 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530824 u32 reg = readl_relaxed(host->ioaddr +
825 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530826
827 if (prev_pwrsave)
828 reg |= CORE_CLK_PWRSAVE;
829 else
830 reg &= ~CORE_CLK_PWRSAVE;
831
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530832 writel_relaxed(reg, host->ioaddr +
833 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530834 }
835
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700836 spin_unlock_irqrestore(&host->lock, flags);
837 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
838 return rc;
839}
840
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700841static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
842{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700843 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700844 int ret = 0;
845 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530846 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
847 struct sdhci_msm_host *msm_host = pltfm_host->priv;
848 const struct sdhci_msm_offset *msm_host_offset =
849 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700850
851 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
852
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700853 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530854 writel_relaxed((readl_relaxed(host->ioaddr +
855 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700856 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530857 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700858
859 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
860 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
861 & ~CORE_CDC_SWITCH_BYPASS_OFF),
862 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
863
864 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
865 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
866 | CORE_CDC_SWITCH_RC_EN),
867 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
868
869 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530870 writel_relaxed((readl_relaxed(host->ioaddr +
871 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700872 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530873 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874
875 /*
876 * Perform CDC Register Initialization Sequence
877 *
878 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
879 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
880 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
881 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
882 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
883 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
884 * CORE_CSR_CDC_DELAY_CFG 0x3AC
885 * CORE_CDC_OFFSET_CFG 0x0
886 * CORE_CDC_SLAVE_DDA_CFG 0x16334
887 */
888
889 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
890 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
891 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
892 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
893 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
894 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700895 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700896 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
897 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
898
899 /* CDC HW Calibration */
900
901 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
902 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
903 | CORE_SW_TRIG_FULL_CALIB),
904 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
905
906 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
907 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
908 & ~CORE_SW_TRIG_FULL_CALIB),
909 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
910
911 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
912 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
913 | CORE_HW_AUTOCAL_ENA),
914 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
915
916 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
917 writel_relaxed((readl_relaxed(host->ioaddr +
918 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
919 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
920
921 mb();
922
923 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700924 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
925 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
926
927 if (ret == -ETIMEDOUT) {
928 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700929 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700930 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700931 }
932
933 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
934 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
935 & CORE_CDC_ERROR_CODE_MASK;
936 if (cdc_err) {
937 pr_err("%s: %s: CDC Error Code %d\n",
938 mmc_hostname(host->mmc), __func__, cdc_err);
939 ret = -EINVAL;
940 goto out;
941 }
942
943 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530944 writel_relaxed((readl_relaxed(host->ioaddr +
945 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700946 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530947 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700948out:
949 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
950 __func__, ret);
951 return ret;
952}
953
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700954static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
955{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530956 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
957 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530958 const struct sdhci_msm_offset *msm_host_offset =
959 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530960 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700961 int ret = 0;
962
963 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
964
965 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530966 * Reprogramming the value in case it might have been modified by
967 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700968 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700969 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530970 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
971 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700972 } else {
973 ddr_config = DDR_CONFIG_POR_VAL &
974 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
975 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530976 writel_relaxed(ddr_config, host->ioaddr +
977 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700978 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700979
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530980 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530981 writel_relaxed((readl_relaxed(host->ioaddr +
982 msm_host_offset->CORE_DDR_200_CFG)
983 | CORE_CMDIN_RCLK_EN), host->ioaddr +
984 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530985
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700986 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 writel_relaxed((readl_relaxed(host->ioaddr +
988 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700989 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530990 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700991
992 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530993 ret = readl_poll_timeout(host->ioaddr +
994 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700995 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
996
997 if (ret == -ETIMEDOUT) {
998 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
999 mmc_hostname(host->mmc), __func__);
1000 goto out;
1001 }
1002
Ritesh Harjani764065e2015-05-13 14:14:45 +05301003 /*
1004 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1005 * when MCLK is gated OFF, it is not gated for less than 0.5us
1006 * and MCLK must be switched on for at-least 1us before DATA
1007 * starts coming. Controllers with 14lpp tech DLL cannot
1008 * guarantee above requirement. So PWRSAVE_DLL should not be
1009 * turned on for host controllers using this DLL.
1010 */
1011 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301012 writel_relaxed((readl_relaxed(host->ioaddr +
1013 msm_host_offset->CORE_VENDOR_SPEC3)
1014 | CORE_PWRSAVE_DLL), host->ioaddr +
1015 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001016 mb();
1017out:
1018 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1019 __func__, ret);
1020 return ret;
1021}
1022
Ritesh Harjaniea709662015-05-27 15:40:24 +05301023static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1024{
1025 int ret = 0;
1026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1027 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1028 struct mmc_host *mmc = host->mmc;
1029
1030 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1031
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301032 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1033 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301034 mmc_hostname(mmc));
1035 return -EINVAL;
1036 }
1037
1038 if (msm_host->calibration_done ||
1039 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1040 return 0;
1041 }
1042
1043 /*
1044 * Reset the tuning block.
1045 */
1046 ret = msm_init_cm_dll(host);
1047 if (ret)
1048 goto out;
1049
1050 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1051out:
1052 if (!ret)
1053 msm_host->calibration_done = true;
1054 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1055 __func__, ret);
1056 return ret;
1057}
1058
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001059static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1060{
1061 int ret = 0;
1062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1063 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301064 const struct sdhci_msm_offset *msm_host_offset =
1065 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001066
1067 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1068
1069 /*
1070 * Retuning in HS400 (DDR mode) will fail, just reset the
1071 * tuning block and restore the saved tuning phase.
1072 */
1073 ret = msm_init_cm_dll(host);
1074 if (ret)
1075 goto out;
1076
1077 /* Set the selected phase in delay line hw block */
1078 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1079 if (ret)
1080 goto out;
1081
Krishna Konda0e8efba2014-06-23 14:50:38 -07001082 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301083 writel_relaxed((readl_relaxed(host->ioaddr +
1084 msm_host_offset->CORE_DLL_CONFIG)
1085 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1086 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001087
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001088 if (msm_host->use_cdclp533)
1089 /* Calibrate CDCLP533 DLL HW */
1090 ret = sdhci_msm_cdclp533_calibration(host);
1091 else
1092 /* Calibrate CM_DLL_SDC4 HW */
1093 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1094out:
1095 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1096 __func__, ret);
1097 return ret;
1098}
1099
Krishna Konda96e6b112013-10-28 15:25:03 -07001100static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1101 u8 drv_type)
1102{
1103 struct mmc_command cmd = {0};
1104 struct mmc_request mrq = {NULL};
1105 struct mmc_host *mmc = host->mmc;
1106 u8 val = ((drv_type << 4) | 2);
1107
1108 cmd.opcode = MMC_SWITCH;
1109 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1110 (EXT_CSD_HS_TIMING << 16) |
1111 (val << 8) |
1112 EXT_CSD_CMD_SET_NORMAL;
1113 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1114 /* 1 sec */
1115 cmd.busy_timeout = 1000 * 1000;
1116
1117 memset(cmd.resp, 0, sizeof(cmd.resp));
1118 cmd.retries = 3;
1119
1120 mrq.cmd = &cmd;
1121 cmd.data = NULL;
1122
1123 mmc_wait_for_req(mmc, &mrq);
1124 pr_debug("%s: %s: set card drive type to %d\n",
1125 mmc_hostname(mmc), __func__,
1126 drv_type);
1127}
1128
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001129int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1130{
1131 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301132 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001133 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001134 const u32 *tuning_block_pattern = tuning_block_64;
1135 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1136 int rc;
1137 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301138 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1140 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001141 u8 drv_type = 0;
1142 bool drv_type_changed = false;
1143 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301144 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301145
1146 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001147 * Tuning is required for SDR104, HS200 and HS400 cards and
1148 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301149 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001150 if (host->clock <= CORE_FREQ_100MHZ ||
1151 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1152 (ios.timing == MMC_TIMING_MMC_HS200) ||
1153 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301154 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001155
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301156 /*
1157 * Don't allow re-tuning for CRC errors observed for any commands
1158 * that are sent during tuning sequence itself.
1159 */
1160 if (msm_host->tuning_in_progress)
1161 return 0;
1162 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001163 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001164
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001165 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001166 if (msm_host->tuning_done && !msm_host->calibration_done &&
1167 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001168 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001169 spin_lock_irqsave(&host->lock, flags);
1170 if (!rc)
1171 msm_host->calibration_done = true;
1172 spin_unlock_irqrestore(&host->lock, flags);
1173 goto out;
1174 }
1175
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001176 spin_lock_irqsave(&host->lock, flags);
1177
1178 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1179 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1180 tuning_block_pattern = tuning_block_128;
1181 size = sizeof(tuning_block_128);
1182 }
1183 spin_unlock_irqrestore(&host->lock, flags);
1184
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001185 data_buf = kmalloc(size, GFP_KERNEL);
1186 if (!data_buf) {
1187 rc = -ENOMEM;
1188 goto out;
1189 }
1190
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301191retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001192 tuned_phase_cnt = 0;
1193
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301194 /* first of all reset the tuning block */
1195 rc = msm_init_cm_dll(host);
1196 if (rc)
1197 goto kfree;
1198
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001199 phase = 0;
1200 do {
1201 struct mmc_command cmd = {0};
1202 struct mmc_data data = {0};
1203 struct mmc_request mrq = {
1204 .cmd = &cmd,
1205 .data = &data
1206 };
1207 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301208 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001209
1210 /* set the phase in delay line hw block */
1211 rc = msm_config_cm_dll_phase(host, phase);
1212 if (rc)
1213 goto kfree;
1214
1215 cmd.opcode = opcode;
1216 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1217
1218 data.blksz = size;
1219 data.blocks = 1;
1220 data.flags = MMC_DATA_READ;
1221 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1222
1223 data.sg = &sg;
1224 data.sg_len = 1;
1225 sg_init_one(&sg, data_buf, size);
1226 memset(data_buf, 0, size);
1227 mmc_wait_for_req(mmc, &mrq);
1228
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301229 if (card && (cmd.error || data.error)) {
1230 sts_cmd.opcode = MMC_SEND_STATUS;
1231 sts_cmd.arg = card->rca << 16;
1232 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1233 sts_retry = 5;
1234 while (sts_retry) {
1235 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1236
1237 if (sts_cmd.error ||
1238 (R1_CURRENT_STATE(sts_cmd.resp[0])
1239 != R1_STATE_TRAN)) {
1240 sts_retry--;
1241 /*
1242 * wait for at least 146 MCLK cycles for
1243 * the card to move to TRANS state. As
1244 * the MCLK would be min 200MHz for
1245 * tuning, we need max 0.73us delay. To
1246 * be on safer side 1ms delay is given.
1247 */
1248 usleep_range(1000, 1200);
1249 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1250 mmc_hostname(mmc), phase,
1251 sts_cmd.error, sts_cmd.resp[0]);
1252 continue;
1253 }
1254 break;
1255 };
1256 }
1257
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001258 if (!cmd.error && !data.error &&
1259 !memcmp(data_buf, tuning_block_pattern, size)) {
1260 /* tuning is successful at this tuning point */
1261 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001262 pr_debug("%s: %s: found *** good *** phase = %d\n",
1263 mmc_hostname(mmc), __func__, phase);
1264 } else {
1265 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001266 mmc_hostname(mmc), __func__, phase);
1267 }
1268 } while (++phase < 16);
1269
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301270 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1271 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001272 /*
1273 * If all phases pass then its a problem. So change the card's
1274 * drive type to a different value, if supported and repeat
1275 * tuning until at least one phase fails. Then set the original
1276 * drive type back.
1277 *
1278 * If all the phases still pass after trying all possible
1279 * drive types, then one of those 16 phases will be picked.
1280 * This is no different from what was going on before the
1281 * modification to change drive type and retune.
1282 */
1283 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1284 tuned_phase_cnt);
1285
1286 /* set drive type to other value . default setting is 0x0 */
1287 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001288 pr_debug("%s: trying different drive strength (%d)\n",
1289 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001290 if (card->ext_csd.raw_driver_strength &
1291 (1 << drv_type)) {
1292 sdhci_msm_set_mmc_drv_type(host, opcode,
1293 drv_type);
1294 if (!drv_type_changed)
1295 drv_type_changed = true;
1296 goto retry;
1297 }
1298 }
1299 }
1300
1301 /* reset drive type to default (50 ohm) if changed */
1302 if (drv_type_changed)
1303 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1304
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001305 if (tuned_phase_cnt) {
1306 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1307 tuned_phase_cnt);
1308 if (rc < 0)
1309 goto kfree;
1310 else
1311 phase = (u8)rc;
1312
1313 /*
1314 * Finally set the selected phase in delay
1315 * line hw block.
1316 */
1317 rc = msm_config_cm_dll_phase(host, phase);
1318 if (rc)
1319 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001320 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001321 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1322 mmc_hostname(mmc), __func__, phase);
1323 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301324 if (--tuning_seq_cnt)
1325 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001326 /* tuning failed */
1327 pr_err("%s: %s: no tuning point found\n",
1328 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301329 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001330 }
1331
1332kfree:
1333 kfree(data_buf);
1334out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001335 spin_lock_irqsave(&host->lock, flags);
1336 if (!rc)
1337 msm_host->tuning_done = true;
1338 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301339 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001340 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001341 return rc;
1342}
1343
Asutosh Das0ef24812012-12-18 16:14:02 +05301344static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1345{
1346 struct sdhci_msm_gpio_data *curr;
1347 int i, ret = 0;
1348
1349 curr = pdata->pin_data->gpio_data;
1350 for (i = 0; i < curr->size; i++) {
1351 if (!gpio_is_valid(curr->gpio[i].no)) {
1352 ret = -EINVAL;
1353 pr_err("%s: Invalid gpio = %d\n", __func__,
1354 curr->gpio[i].no);
1355 goto free_gpios;
1356 }
1357 if (enable) {
1358 ret = gpio_request(curr->gpio[i].no,
1359 curr->gpio[i].name);
1360 if (ret) {
1361 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1362 __func__, curr->gpio[i].no,
1363 curr->gpio[i].name, ret);
1364 goto free_gpios;
1365 }
1366 curr->gpio[i].is_enabled = true;
1367 } else {
1368 gpio_free(curr->gpio[i].no);
1369 curr->gpio[i].is_enabled = false;
1370 }
1371 }
1372 return ret;
1373
1374free_gpios:
1375 for (i--; i >= 0; i--) {
1376 gpio_free(curr->gpio[i].no);
1377 curr->gpio[i].is_enabled = false;
1378 }
1379 return ret;
1380}
1381
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301382static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1383 bool enable)
1384{
1385 int ret = 0;
1386
1387 if (enable)
1388 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1389 pdata->pctrl_data->pins_active);
1390 else
1391 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1392 pdata->pctrl_data->pins_sleep);
1393
1394 if (ret < 0)
1395 pr_err("%s state for pinctrl failed with %d\n",
1396 enable ? "Enabling" : "Disabling", ret);
1397
1398 return ret;
1399}
1400
Asutosh Das0ef24812012-12-18 16:14:02 +05301401static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1402{
1403 int ret = 0;
1404
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301405 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301406 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301407 } else if (pdata->pctrl_data) {
1408 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1409 goto out;
1410 } else if (!pdata->pin_data) {
1411 return 0;
1412 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301413
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301414 if (pdata->pin_data->is_gpio)
1415 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301416out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301417 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301418 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301419
1420 return ret;
1421}
1422
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301423static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1424 u32 **out, int *len, u32 size)
1425{
1426 int ret = 0;
1427 struct device_node *np = dev->of_node;
1428 size_t sz;
1429 u32 *arr = NULL;
1430
1431 if (!of_get_property(np, prop_name, len)) {
1432 ret = -EINVAL;
1433 goto out;
1434 }
1435 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001436 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301437 dev_err(dev, "%s invalid size\n", prop_name);
1438 ret = -EINVAL;
1439 goto out;
1440 }
1441
1442 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1443 if (!arr) {
1444 dev_err(dev, "%s failed allocating memory\n", prop_name);
1445 ret = -ENOMEM;
1446 goto out;
1447 }
1448
1449 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1450 if (ret < 0) {
1451 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1452 goto out;
1453 }
1454 *out = arr;
1455out:
1456 if (ret)
1457 *len = 0;
1458 return ret;
1459}
1460
Asutosh Das0ef24812012-12-18 16:14:02 +05301461#define MAX_PROP_SIZE 32
1462static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1463 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1464{
1465 int len, ret = 0;
1466 const __be32 *prop;
1467 char prop_name[MAX_PROP_SIZE];
1468 struct sdhci_msm_reg_data *vreg;
1469 struct device_node *np = dev->of_node;
1470
1471 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1472 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301473 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301474 return ret;
1475 }
1476
1477 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1478 if (!vreg) {
1479 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1480 ret = -ENOMEM;
1481 return ret;
1482 }
1483
1484 vreg->name = vreg_name;
1485
1486 snprintf(prop_name, MAX_PROP_SIZE,
1487 "qcom,%s-always-on", vreg_name);
1488 if (of_get_property(np, prop_name, NULL))
1489 vreg->is_always_on = true;
1490
1491 snprintf(prop_name, MAX_PROP_SIZE,
1492 "qcom,%s-lpm-sup", vreg_name);
1493 if (of_get_property(np, prop_name, NULL))
1494 vreg->lpm_sup = true;
1495
1496 snprintf(prop_name, MAX_PROP_SIZE,
1497 "qcom,%s-voltage-level", vreg_name);
1498 prop = of_get_property(np, prop_name, &len);
1499 if (!prop || (len != (2 * sizeof(__be32)))) {
1500 dev_warn(dev, "%s %s property\n",
1501 prop ? "invalid format" : "no", prop_name);
1502 } else {
1503 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1504 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1505 }
1506
1507 snprintf(prop_name, MAX_PROP_SIZE,
1508 "qcom,%s-current-level", vreg_name);
1509 prop = of_get_property(np, prop_name, &len);
1510 if (!prop || (len != (2 * sizeof(__be32)))) {
1511 dev_warn(dev, "%s %s property\n",
1512 prop ? "invalid format" : "no", prop_name);
1513 } else {
1514 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1515 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1516 }
1517
1518 *vreg_data = vreg;
1519 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1520 vreg->name, vreg->is_always_on ? "always_on," : "",
1521 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1522 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1523
1524 return ret;
1525}
1526
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301527static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1528 struct sdhci_msm_pltfm_data *pdata)
1529{
1530 struct sdhci_pinctrl_data *pctrl_data;
1531 struct pinctrl *pctrl;
1532 int ret = 0;
1533
1534 /* Try to obtain pinctrl handle */
1535 pctrl = devm_pinctrl_get(dev);
1536 if (IS_ERR(pctrl)) {
1537 ret = PTR_ERR(pctrl);
1538 goto out;
1539 }
1540 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1541 if (!pctrl_data) {
1542 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1543 ret = -ENOMEM;
1544 goto out;
1545 }
1546 pctrl_data->pctrl = pctrl;
1547 /* Look-up and keep the states handy to be used later */
1548 pctrl_data->pins_active = pinctrl_lookup_state(
1549 pctrl_data->pctrl, "active");
1550 if (IS_ERR(pctrl_data->pins_active)) {
1551 ret = PTR_ERR(pctrl_data->pins_active);
1552 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1553 goto out;
1554 }
1555 pctrl_data->pins_sleep = pinctrl_lookup_state(
1556 pctrl_data->pctrl, "sleep");
1557 if (IS_ERR(pctrl_data->pins_sleep)) {
1558 ret = PTR_ERR(pctrl_data->pins_sleep);
1559 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1560 goto out;
1561 }
1562 pdata->pctrl_data = pctrl_data;
1563out:
1564 return ret;
1565}
1566
Asutosh Das0ef24812012-12-18 16:14:02 +05301567#define GPIO_NAME_MAX_LEN 32
1568static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1569 struct sdhci_msm_pltfm_data *pdata)
1570{
1571 int ret = 0, cnt, i;
1572 struct sdhci_msm_pin_data *pin_data;
1573 struct device_node *np = dev->of_node;
1574
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301575 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1576 if (!ret) {
1577 goto out;
1578 } else if (ret == -EPROBE_DEFER) {
1579 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1580 goto out;
1581 } else {
1582 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1583 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301584 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301585 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301586 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1587 if (!pin_data) {
1588 dev_err(dev, "No memory for pin_data\n");
1589 ret = -ENOMEM;
1590 goto out;
1591 }
1592
1593 cnt = of_gpio_count(np);
1594 if (cnt > 0) {
1595 pin_data->gpio_data = devm_kzalloc(dev,
1596 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1597 if (!pin_data->gpio_data) {
1598 dev_err(dev, "No memory for gpio_data\n");
1599 ret = -ENOMEM;
1600 goto out;
1601 }
1602 pin_data->gpio_data->size = cnt;
1603 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1604 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1605
1606 if (!pin_data->gpio_data->gpio) {
1607 dev_err(dev, "No memory for gpio\n");
1608 ret = -ENOMEM;
1609 goto out;
1610 }
1611
1612 for (i = 0; i < cnt; i++) {
1613 const char *name = NULL;
1614 char result[GPIO_NAME_MAX_LEN];
1615 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1616 of_property_read_string_index(np,
1617 "qcom,gpio-names", i, &name);
1618
1619 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1620 dev_name(dev), name ? name : "?");
1621 pin_data->gpio_data->gpio[i].name = result;
1622 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1623 pin_data->gpio_data->gpio[i].name,
1624 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301625 }
1626 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301627 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301628out:
1629 if (ret)
1630 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1631 return ret;
1632}
1633
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001634#ifdef CONFIG_SMP
1635static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1636{
1637 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1638}
1639#else
1640static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1641#endif
1642
Gilad Bronerc788a672015-09-08 15:39:11 +03001643static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1644 struct sdhci_msm_pltfm_data *pdata)
1645{
1646 struct device_node *np = dev->of_node;
1647 const char *str;
1648 u32 cpu;
1649 int ret = 0;
1650 int i;
1651
1652 pdata->pm_qos_data.irq_valid = false;
1653 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1654 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1655 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001656 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001657 }
1658
1659 /* must specify cpu for "affine_cores" type */
1660 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1661 pdata->pm_qos_data.irq_cpu = -1;
1662 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1663 if (ret) {
1664 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1665 ret);
1666 goto out;
1667 }
1668 if (cpu < 0 || cpu >= num_possible_cpus()) {
1669 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1670 __func__, cpu, num_possible_cpus());
1671 ret = -EINVAL;
1672 goto out;
1673 }
1674 pdata->pm_qos_data.irq_cpu = cpu;
1675 }
1676
1677 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1678 SDHCI_POWER_POLICY_NUM) {
1679 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1680 __func__, SDHCI_POWER_POLICY_NUM);
1681 ret = -EINVAL;
1682 goto out;
1683 }
1684
1685 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1686 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1687 &pdata->pm_qos_data.irq_latency.latency[i]);
1688
1689 pdata->pm_qos_data.irq_valid = true;
1690out:
1691 return ret;
1692}
1693
1694static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1695 struct sdhci_msm_pltfm_data *pdata)
1696{
1697 struct device_node *np = dev->of_node;
1698 u32 mask;
1699 int nr_groups;
1700 int ret;
1701 int i;
1702
1703 /* Read cpu group mapping */
1704 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1705 if (nr_groups <= 0) {
1706 ret = -EINVAL;
1707 goto out;
1708 }
1709 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1710 pdata->pm_qos_data.cpu_group_map.mask =
1711 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1712 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1713 ret = -ENOMEM;
1714 goto out;
1715 }
1716
1717 for (i = 0; i < nr_groups; i++) {
1718 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1719 i, &mask);
1720
1721 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1722 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1723 cpu_possible_mask)) {
1724 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1725 __func__, mask, i);
1726 ret = -EINVAL;
1727 goto free_res;
1728 }
1729 }
1730 return 0;
1731
1732free_res:
1733 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1734out:
1735 return ret;
1736}
1737
1738static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1739 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1740{
1741 struct device_node *np = dev->of_node;
1742 struct sdhci_msm_pm_qos_latency *values;
1743 int ret;
1744 int i;
1745 int group;
1746 int cfg;
1747
1748 ret = of_property_count_u32_elems(np, name);
1749 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1750 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1751 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1752 ret);
1753 return -EINVAL;
1754 } else if (ret < 0) {
1755 return ret;
1756 }
1757
1758 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1759 GFP_KERNEL);
1760 if (!values)
1761 return -ENOMEM;
1762
1763 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1764 group = i / SDHCI_POWER_POLICY_NUM;
1765 cfg = i % SDHCI_POWER_POLICY_NUM;
1766 of_property_read_u32_index(np, name, i,
1767 &(values[group].latency[cfg]));
1768 }
1769
1770 *latency = values;
1771 return 0;
1772}
1773
1774static void sdhci_msm_pm_qos_parse(struct device *dev,
1775 struct sdhci_msm_pltfm_data *pdata)
1776{
1777 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1778 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1779 __func__);
1780
1781 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1782 pdata->pm_qos_data.cmdq_valid =
1783 !sdhci_msm_pm_qos_parse_latency(dev,
1784 "qcom,pm-qos-cmdq-latency-us",
1785 pdata->pm_qos_data.cpu_group_map.nr_groups,
1786 &pdata->pm_qos_data.cmdq_latency);
1787 pdata->pm_qos_data.legacy_valid =
1788 !sdhci_msm_pm_qos_parse_latency(dev,
1789 "qcom,pm-qos-legacy-latency-us",
1790 pdata->pm_qos_data.cpu_group_map.nr_groups,
1791 &pdata->pm_qos_data.latency);
1792 if (!pdata->pm_qos_data.cmdq_valid &&
1793 !pdata->pm_qos_data.legacy_valid) {
1794 /* clean-up previously allocated arrays */
1795 kfree(pdata->pm_qos_data.latency);
1796 kfree(pdata->pm_qos_data.cmdq_latency);
1797 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1798 __func__);
1799 }
1800 } else {
1801 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1802 __func__);
1803 }
1804}
1805
Asutosh Das0ef24812012-12-18 16:14:02 +05301806/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001807static
1808struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1809 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301810{
1811 struct sdhci_msm_pltfm_data *pdata = NULL;
1812 struct device_node *np = dev->of_node;
1813 u32 bus_width = 0;
1814 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301815 int clk_table_len;
1816 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301817 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301818 const char *lower_bus_speed = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301819
1820 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1821 if (!pdata) {
1822 dev_err(dev, "failed to allocate memory for platform data\n");
1823 goto out;
1824 }
1825
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301826 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1827 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1828 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301829
Asutosh Das0ef24812012-12-18 16:14:02 +05301830 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1831 if (bus_width == 8)
1832 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1833 else if (bus_width == 4)
1834 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1835 else {
1836 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1837 pdata->mmc_bus_width = 0;
1838 }
1839
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001840 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301841 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1842 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001843 pr_debug("%s: no clock scaling frequencies were supplied\n",
1844 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301845 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1846 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1847 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001848
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301849 /*
1850 * Few hosts can support DDR52 mode at the same lower
1851 * system voltage corner as high-speed mode. In such cases,
1852 * it is always better to put it in DDR mode which will
1853 * improve the performance without any power impact.
1854 */
1855 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
1856 &lower_bus_speed)) {
1857 if (!strcmp(lower_bus_speed, "DDR52"))
1858 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
1859 MMC_SCALING_LOWER_DDR52_MODE;
1860 }
1861
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301862 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1863 &clk_table, &clk_table_len, 0)) {
1864 dev_err(dev, "failed parsing supported clock rates\n");
1865 goto out;
1866 }
1867 if (!clk_table || !clk_table_len) {
1868 dev_err(dev, "Invalid clock table\n");
1869 goto out;
1870 }
1871 pdata->sup_clk_table = clk_table;
1872 pdata->sup_clk_cnt = clk_table_len;
1873
Asutosh Das0ef24812012-12-18 16:14:02 +05301874 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1875 sdhci_msm_slot_reg_data),
1876 GFP_KERNEL);
1877 if (!pdata->vreg_data) {
1878 dev_err(dev, "failed to allocate memory for vreg data\n");
1879 goto out;
1880 }
1881
1882 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1883 "vdd")) {
1884 dev_err(dev, "failed parsing vdd data\n");
1885 goto out;
1886 }
1887 if (sdhci_msm_dt_parse_vreg_info(dev,
1888 &pdata->vreg_data->vdd_io_data,
1889 "vdd-io")) {
1890 dev_err(dev, "failed parsing vdd-io data\n");
1891 goto out;
1892 }
1893
1894 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1895 dev_err(dev, "failed parsing gpio data\n");
1896 goto out;
1897 }
1898
Asutosh Das0ef24812012-12-18 16:14:02 +05301899 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1900
1901 for (i = 0; i < len; i++) {
1902 const char *name = NULL;
1903
1904 of_property_read_string_index(np,
1905 "qcom,bus-speed-mode", i, &name);
1906 if (!name)
1907 continue;
1908
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001909 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1910 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1911 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1912 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1913 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301914 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1915 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1916 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1917 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1918 pdata->caps |= MMC_CAP_1_8V_DDR
1919 | MMC_CAP_UHS_DDR50;
1920 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1921 pdata->caps |= MMC_CAP_1_2V_DDR
1922 | MMC_CAP_UHS_DDR50;
1923 }
1924
1925 if (of_get_property(np, "qcom,nonremovable", NULL))
1926 pdata->nonremovable = true;
1927
Guoping Yuf7c91332014-08-20 16:56:18 +08001928 if (of_get_property(np, "qcom,nonhotplug", NULL))
1929 pdata->nonhotplug = true;
1930
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001931 pdata->largeaddressbus =
1932 of_property_read_bool(np, "qcom,large-address-bus");
1933
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001934 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1935 msm_host->mmc->wakeup_on_idle = true;
1936
Gilad Bronerc788a672015-09-08 15:39:11 +03001937 sdhci_msm_pm_qos_parse(dev, pdata);
1938
Pavan Anamula5a256df2015-10-16 14:38:28 +05301939 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1940 pdata->core_3_0v_support = true;
1941
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001942 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
1943
Asutosh Das0ef24812012-12-18 16:14:02 +05301944 return pdata;
1945out:
1946 return NULL;
1947}
1948
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301949/* Returns required bandwidth in Bytes per Sec */
1950static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1951 struct mmc_ios *ios)
1952{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1955
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301956 unsigned int bw;
1957
Sahitya Tummala2886c922013-04-03 18:03:31 +05301958 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301959 /*
1960 * For DDR mode, SDCC controller clock will be at
1961 * the double rate than the actual clock that goes to card.
1962 */
1963 if (ios->bus_width == MMC_BUS_WIDTH_4)
1964 bw /= 2;
1965 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1966 bw /= 8;
1967
1968 return bw;
1969}
1970
1971static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1972 unsigned int bw)
1973{
1974 unsigned int *table = host->pdata->voting_data->bw_vecs;
1975 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1976 int i;
1977
1978 if (host->msm_bus_vote.is_max_bw_needed && bw)
1979 return host->msm_bus_vote.max_bw_vote;
1980
1981 for (i = 0; i < size; i++) {
1982 if (bw <= table[i])
1983 break;
1984 }
1985
1986 if (i && (i == size))
1987 i--;
1988
1989 return i;
1990}
1991
1992/*
1993 * This function must be called with host lock acquired.
1994 * Caller of this function should also ensure that msm bus client
1995 * handle is not null.
1996 */
1997static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1998 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301999 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302000{
2001 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2002 int rc = 0;
2003
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302004 BUG_ON(!flags);
2005
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302006 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302007 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302008 rc = msm_bus_scale_client_update_request(
2009 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302010 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302011 if (rc) {
2012 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2013 mmc_hostname(host->mmc),
2014 msm_host->msm_bus_vote.client_handle, vote, rc);
2015 goto out;
2016 }
2017 msm_host->msm_bus_vote.curr_vote = vote;
2018 }
2019out:
2020 return rc;
2021}
2022
2023/*
2024 * Internal work. Work to set 0 bandwidth for msm bus.
2025 */
2026static void sdhci_msm_bus_work(struct work_struct *work)
2027{
2028 struct sdhci_msm_host *msm_host;
2029 struct sdhci_host *host;
2030 unsigned long flags;
2031
2032 msm_host = container_of(work, struct sdhci_msm_host,
2033 msm_bus_vote.vote_work.work);
2034 host = platform_get_drvdata(msm_host->pdev);
2035
2036 if (!msm_host->msm_bus_vote.client_handle)
2037 return;
2038
2039 spin_lock_irqsave(&host->lock, flags);
2040 /* don't vote for 0 bandwidth if any request is in progress */
2041 if (!host->mrq) {
2042 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302043 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302044 } else
2045 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2046 mmc_hostname(host->mmc), __func__);
2047 spin_unlock_irqrestore(&host->lock, flags);
2048}
2049
2050/*
2051 * This function cancels any scheduled delayed work and sets the bus
2052 * vote based on bw (bandwidth) argument.
2053 */
2054static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2055 unsigned int bw)
2056{
2057 int vote;
2058 unsigned long flags;
2059 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2060 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2061
2062 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2063 spin_lock_irqsave(&host->lock, flags);
2064 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302065 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302066 spin_unlock_irqrestore(&host->lock, flags);
2067}
2068
2069#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2070
2071/* This function queues a work which will set the bandwidth requiement to 0 */
2072static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2073{
2074 unsigned long flags;
2075 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2076 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2077
2078 spin_lock_irqsave(&host->lock, flags);
2079 if (msm_host->msm_bus_vote.min_bw_vote !=
2080 msm_host->msm_bus_vote.curr_vote)
2081 queue_delayed_work(system_wq,
2082 &msm_host->msm_bus_vote.vote_work,
2083 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2084 spin_unlock_irqrestore(&host->lock, flags);
2085}
2086
2087static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2088 struct platform_device *pdev)
2089{
2090 int rc = 0;
2091 struct msm_bus_scale_pdata *bus_pdata;
2092
2093 struct sdhci_msm_bus_voting_data *data;
2094 struct device *dev = &pdev->dev;
2095
2096 data = devm_kzalloc(dev,
2097 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2098 if (!data) {
2099 dev_err(&pdev->dev,
2100 "%s: failed to allocate memory\n", __func__);
2101 rc = -ENOMEM;
2102 goto out;
2103 }
2104 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2105 if (data->bus_pdata) {
2106 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2107 &data->bw_vecs, &data->bw_vecs_size, 0);
2108 if (rc) {
2109 dev_err(&pdev->dev,
2110 "%s: Failed to get bus-bw-vectors-bps\n",
2111 __func__);
2112 goto out;
2113 }
2114 host->pdata->voting_data = data;
2115 }
2116 if (host->pdata->voting_data &&
2117 host->pdata->voting_data->bus_pdata &&
2118 host->pdata->voting_data->bw_vecs &&
2119 host->pdata->voting_data->bw_vecs_size) {
2120
2121 bus_pdata = host->pdata->voting_data->bus_pdata;
2122 host->msm_bus_vote.client_handle =
2123 msm_bus_scale_register_client(bus_pdata);
2124 if (!host->msm_bus_vote.client_handle) {
2125 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2126 rc = -EFAULT;
2127 goto out;
2128 }
2129 /* cache the vote index for minimum and maximum bandwidth */
2130 host->msm_bus_vote.min_bw_vote =
2131 sdhci_msm_bus_get_vote_for_bw(host, 0);
2132 host->msm_bus_vote.max_bw_vote =
2133 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2134 } else {
2135 devm_kfree(dev, data);
2136 }
2137
2138out:
2139 return rc;
2140}
2141
2142static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2143{
2144 if (host->msm_bus_vote.client_handle)
2145 msm_bus_scale_unregister_client(
2146 host->msm_bus_vote.client_handle);
2147}
2148
2149static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2150{
2151 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2152 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2153 struct mmc_ios *ios = &host->mmc->ios;
2154 unsigned int bw;
2155
2156 if (!msm_host->msm_bus_vote.client_handle)
2157 return;
2158
2159 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302160 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302161 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302162 } else {
2163 /*
2164 * If clock gating is enabled, then remove the vote
2165 * immediately because clocks will be disabled only
2166 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2167 * additional delay is required to remove the bus vote.
2168 */
2169#ifdef CONFIG_MMC_CLKGATE
2170 if (host->mmc->clkgate_delay)
2171 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2172 else
2173#endif
2174 sdhci_msm_bus_queue_work(host);
2175 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302176}
2177
Asutosh Das0ef24812012-12-18 16:14:02 +05302178/* Regulator utility functions */
2179static int sdhci_msm_vreg_init_reg(struct device *dev,
2180 struct sdhci_msm_reg_data *vreg)
2181{
2182 int ret = 0;
2183
2184 /* check if regulator is already initialized? */
2185 if (vreg->reg)
2186 goto out;
2187
2188 /* Get the regulator handle */
2189 vreg->reg = devm_regulator_get(dev, vreg->name);
2190 if (IS_ERR(vreg->reg)) {
2191 ret = PTR_ERR(vreg->reg);
2192 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2193 __func__, vreg->name, ret);
2194 goto out;
2195 }
2196
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302197 if (regulator_count_voltages(vreg->reg) > 0) {
2198 vreg->set_voltage_sup = true;
2199 /* sanity check */
2200 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2201 pr_err("%s: %s invalid constraints specified\n",
2202 __func__, vreg->name);
2203 ret = -EINVAL;
2204 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302205 }
2206
2207out:
2208 return ret;
2209}
2210
2211static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2212{
2213 if (vreg->reg)
2214 devm_regulator_put(vreg->reg);
2215}
2216
2217static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2218 *vreg, int uA_load)
2219{
2220 int ret = 0;
2221
2222 /*
2223 * regulators that do not support regulator_set_voltage also
2224 * do not support regulator_set_optimum_mode
2225 */
2226 if (vreg->set_voltage_sup) {
2227 ret = regulator_set_load(vreg->reg, uA_load);
2228 if (ret < 0)
2229 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2230 __func__, vreg->name, uA_load, ret);
2231 else
2232 /*
2233 * regulator_set_load() can return non zero
2234 * value even for success case.
2235 */
2236 ret = 0;
2237 }
2238 return ret;
2239}
2240
2241static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2242 int min_uV, int max_uV)
2243{
2244 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302245 if (vreg->set_voltage_sup) {
2246 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2247 if (ret) {
2248 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302249 __func__, vreg->name, min_uV, max_uV, ret);
2250 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302251 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302252
2253 return ret;
2254}
2255
2256static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2257{
2258 int ret = 0;
2259
2260 /* Put regulator in HPM (high power mode) */
2261 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2262 if (ret < 0)
2263 return ret;
2264
2265 if (!vreg->is_enabled) {
2266 /* Set voltage level */
2267 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2268 vreg->high_vol_level);
2269 if (ret)
2270 return ret;
2271 }
2272 ret = regulator_enable(vreg->reg);
2273 if (ret) {
2274 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2275 __func__, vreg->name, ret);
2276 return ret;
2277 }
2278 vreg->is_enabled = true;
2279 return ret;
2280}
2281
2282static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2283{
2284 int ret = 0;
2285
2286 /* Never disable regulator marked as always_on */
2287 if (vreg->is_enabled && !vreg->is_always_on) {
2288 ret = regulator_disable(vreg->reg);
2289 if (ret) {
2290 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2291 __func__, vreg->name, ret);
2292 goto out;
2293 }
2294 vreg->is_enabled = false;
2295
2296 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2297 if (ret < 0)
2298 goto out;
2299
2300 /* Set min. voltage level to 0 */
2301 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2302 if (ret)
2303 goto out;
2304 } else if (vreg->is_enabled && vreg->is_always_on) {
2305 if (vreg->lpm_sup) {
2306 /* Put always_on regulator in LPM (low power mode) */
2307 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2308 vreg->lpm_uA);
2309 if (ret < 0)
2310 goto out;
2311 }
2312 }
2313out:
2314 return ret;
2315}
2316
2317static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2318 bool enable, bool is_init)
2319{
2320 int ret = 0, i;
2321 struct sdhci_msm_slot_reg_data *curr_slot;
2322 struct sdhci_msm_reg_data *vreg_table[2];
2323
2324 curr_slot = pdata->vreg_data;
2325 if (!curr_slot) {
2326 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2327 __func__);
2328 goto out;
2329 }
2330
2331 vreg_table[0] = curr_slot->vdd_data;
2332 vreg_table[1] = curr_slot->vdd_io_data;
2333
2334 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2335 if (vreg_table[i]) {
2336 if (enable)
2337 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2338 else
2339 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2340 if (ret)
2341 goto out;
2342 }
2343 }
2344out:
2345 return ret;
2346}
2347
2348/*
2349 * Reset vreg by ensuring it is off during probe. A call
2350 * to enable vreg is needed to balance disable vreg
2351 */
2352static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2353{
2354 int ret;
2355
2356 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2357 if (ret)
2358 return ret;
2359 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2360 return ret;
2361}
2362
2363/* This init function should be called only once for each SDHC slot */
2364static int sdhci_msm_vreg_init(struct device *dev,
2365 struct sdhci_msm_pltfm_data *pdata,
2366 bool is_init)
2367{
2368 int ret = 0;
2369 struct sdhci_msm_slot_reg_data *curr_slot;
2370 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2371
2372 curr_slot = pdata->vreg_data;
2373 if (!curr_slot)
2374 goto out;
2375
2376 curr_vdd_reg = curr_slot->vdd_data;
2377 curr_vdd_io_reg = curr_slot->vdd_io_data;
2378
2379 if (!is_init)
2380 /* Deregister all regulators from regulator framework */
2381 goto vdd_io_reg_deinit;
2382
2383 /*
2384 * Get the regulator handle from voltage regulator framework
2385 * and then try to set the voltage level for the regulator
2386 */
2387 if (curr_vdd_reg) {
2388 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2389 if (ret)
2390 goto out;
2391 }
2392 if (curr_vdd_io_reg) {
2393 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2394 if (ret)
2395 goto vdd_reg_deinit;
2396 }
2397 ret = sdhci_msm_vreg_reset(pdata);
2398 if (ret)
2399 dev_err(dev, "vreg reset failed (%d)\n", ret);
2400 goto out;
2401
2402vdd_io_reg_deinit:
2403 if (curr_vdd_io_reg)
2404 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2405vdd_reg_deinit:
2406 if (curr_vdd_reg)
2407 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2408out:
2409 return ret;
2410}
2411
2412
2413static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2414 enum vdd_io_level level,
2415 unsigned int voltage_level)
2416{
2417 int ret = 0;
2418 int set_level;
2419 struct sdhci_msm_reg_data *vdd_io_reg;
2420
2421 if (!pdata->vreg_data)
2422 return ret;
2423
2424 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2425 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2426 switch (level) {
2427 case VDD_IO_LOW:
2428 set_level = vdd_io_reg->low_vol_level;
2429 break;
2430 case VDD_IO_HIGH:
2431 set_level = vdd_io_reg->high_vol_level;
2432 break;
2433 case VDD_IO_SET_LEVEL:
2434 set_level = voltage_level;
2435 break;
2436 default:
2437 pr_err("%s: invalid argument level = %d",
2438 __func__, level);
2439 ret = -EINVAL;
2440 return ret;
2441 }
2442 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2443 set_level);
2444 }
2445 return ret;
2446}
2447
Ritesh Harjani42876f42015-11-17 17:46:51 +05302448/*
2449 * Acquire spin-lock host->lock before calling this function
2450 */
2451static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2452 bool enable)
2453{
2454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2456
2457 if (enable && !msm_host->is_sdiowakeup_enabled)
2458 enable_irq(msm_host->pdata->sdiowakeup_irq);
2459 else if (!enable && msm_host->is_sdiowakeup_enabled)
2460 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2461 else
2462 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2463 __func__, enable, msm_host->is_sdiowakeup_enabled);
2464 msm_host->is_sdiowakeup_enabled = enable;
2465}
2466
2467static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2468{
2469 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302470 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2471 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2472
Ritesh Harjani42876f42015-11-17 17:46:51 +05302473 unsigned long flags;
2474
2475 pr_debug("%s: irq (%d) received\n", __func__, irq);
2476
2477 spin_lock_irqsave(&host->lock, flags);
2478 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2479 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302480 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302481
2482 return IRQ_HANDLED;
2483}
2484
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302485void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2486{
2487 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2488 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302489 const struct sdhci_msm_offset *msm_host_offset =
2490 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302491
2492 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2493 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302494 sdhci_msm_readl_relaxed(host,
2495 msm_host_offset->CORE_PWRCTL_STATUS),
2496 sdhci_msm_readl_relaxed(host,
2497 msm_host_offset->CORE_PWRCTL_MASK),
2498 sdhci_msm_readl_relaxed(host,
2499 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302500}
2501
Asutosh Das0ef24812012-12-18 16:14:02 +05302502static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2503{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002504 struct sdhci_host *host = (struct sdhci_host *)data;
2505 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2506 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302507 const struct sdhci_msm_offset *msm_host_offset =
2508 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302509 u8 irq_status = 0;
2510 u8 irq_ack = 0;
2511 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302512 int pwr_state = 0, io_level = 0;
2513 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302514 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302515
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302516 irq_status = sdhci_msm_readb_relaxed(host,
2517 msm_host_offset->CORE_PWRCTL_STATUS);
2518
Asutosh Das0ef24812012-12-18 16:14:02 +05302519 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2520 mmc_hostname(msm_host->mmc), irq, irq_status);
2521
2522 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302523 sdhci_msm_writeb_relaxed(irq_status, host,
2524 msm_host_offset->CORE_PWRCTL_CLEAR);
2525
Asutosh Das0ef24812012-12-18 16:14:02 +05302526 /*
2527 * SDHC has core_mem and hc_mem device memory and these memory
2528 * addresses do not fall within 1KB region. Hence, any update to
2529 * core_mem address space would require an mb() to ensure this gets
2530 * completed before its next update to registers within hc_mem.
2531 */
2532 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302533 /*
2534 * There is a rare HW scenario where the first clear pulse could be
2535 * lost when actual reset and clear/read of status register is
2536 * happening at a time. Hence, retry for at least 10 times to make
2537 * sure status register is cleared. Otherwise, this will result in
2538 * a spurious power IRQ resulting in system instability.
2539 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302540 while (irq_status & sdhci_msm_readb_relaxed(host,
2541 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302542 if (retry == 0) {
2543 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2544 mmc_hostname(host->mmc), irq_status);
2545 sdhci_msm_dump_pwr_ctrl_regs(host);
2546 BUG_ON(1);
2547 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302548 sdhci_msm_writeb_relaxed(irq_status, host,
2549 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302550 retry--;
2551 udelay(10);
2552 }
2553 if (likely(retry < 10))
2554 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2555 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302556
2557 /* Handle BUS ON/OFF*/
2558 if (irq_status & CORE_PWRCTL_BUS_ON) {
2559 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302560 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302561 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302562 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2563 VDD_IO_HIGH, 0);
2564 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302565 if (ret)
2566 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2567 else
2568 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302569
2570 pwr_state = REQ_BUS_ON;
2571 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302572 }
2573 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2574 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302575 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302576 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302577 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2578 VDD_IO_LOW, 0);
2579 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302580 if (ret)
2581 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2582 else
2583 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302584
2585 pwr_state = REQ_BUS_OFF;
2586 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302587 }
2588 /* Handle IO LOW/HIGH */
2589 if (irq_status & CORE_PWRCTL_IO_LOW) {
2590 /* Switch voltage Low */
2591 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2592 if (ret)
2593 irq_ack |= CORE_PWRCTL_IO_FAIL;
2594 else
2595 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302596
2597 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302598 }
2599 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2600 /* Switch voltage High */
2601 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2602 if (ret)
2603 irq_ack |= CORE_PWRCTL_IO_FAIL;
2604 else
2605 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302606
2607 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302608 }
2609
2610 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302611 sdhci_msm_writeb_relaxed(irq_ack, host,
2612 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302613 /*
2614 * SDHC has core_mem and hc_mem device memory and these memory
2615 * addresses do not fall within 1KB region. Hence, any update to
2616 * core_mem address space would require an mb() to ensure this gets
2617 * completed before its next update to registers within hc_mem.
2618 */
2619 mb();
2620
Krishna Konda46fd1432014-10-30 21:13:27 -07002621 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302622 writel_relaxed((readl_relaxed(host->ioaddr +
2623 msm_host_offset->CORE_VENDOR_SPEC) &
2624 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2625 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002626 else if ((io_level & REQ_IO_LOW) ||
2627 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302628 writel_relaxed((readl_relaxed(host->ioaddr +
2629 msm_host_offset->CORE_VENDOR_SPEC) |
2630 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2631 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002632 mb();
2633
Asutosh Das0ef24812012-12-18 16:14:02 +05302634 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2635 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302636 spin_lock_irqsave(&host->lock, flags);
2637 if (pwr_state)
2638 msm_host->curr_pwr_state = pwr_state;
2639 if (io_level)
2640 msm_host->curr_io_level = io_level;
2641 complete(&msm_host->pwr_irq_completion);
2642 spin_unlock_irqrestore(&host->lock, flags);
2643
Asutosh Das0ef24812012-12-18 16:14:02 +05302644 return IRQ_HANDLED;
2645}
2646
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302647static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302648show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2649{
2650 struct sdhci_host *host = dev_get_drvdata(dev);
2651 int poll;
2652 unsigned long flags;
2653
2654 spin_lock_irqsave(&host->lock, flags);
2655 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2656 spin_unlock_irqrestore(&host->lock, flags);
2657
2658 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2659}
2660
2661static ssize_t
2662store_polling(struct device *dev, struct device_attribute *attr,
2663 const char *buf, size_t count)
2664{
2665 struct sdhci_host *host = dev_get_drvdata(dev);
2666 int value;
2667 unsigned long flags;
2668
2669 if (!kstrtou32(buf, 0, &value)) {
2670 spin_lock_irqsave(&host->lock, flags);
2671 if (value) {
2672 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2673 mmc_detect_change(host->mmc, 0);
2674 } else {
2675 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2676 }
2677 spin_unlock_irqrestore(&host->lock, flags);
2678 }
2679 return count;
2680}
2681
2682static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302683show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2684 char *buf)
2685{
2686 struct sdhci_host *host = dev_get_drvdata(dev);
2687 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2688 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2689
2690 return snprintf(buf, PAGE_SIZE, "%u\n",
2691 msm_host->msm_bus_vote.is_max_bw_needed);
2692}
2693
2694static ssize_t
2695store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2696 const char *buf, size_t count)
2697{
2698 struct sdhci_host *host = dev_get_drvdata(dev);
2699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2700 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2701 uint32_t value;
2702 unsigned long flags;
2703
2704 if (!kstrtou32(buf, 0, &value)) {
2705 spin_lock_irqsave(&host->lock, flags);
2706 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2707 spin_unlock_irqrestore(&host->lock, flags);
2708 }
2709 return count;
2710}
2711
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302712static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302713{
2714 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2715 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302716 const struct sdhci_msm_offset *msm_host_offset =
2717 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302718 unsigned long flags;
2719 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302720 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302721
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302722 spin_lock_irqsave(&host->lock, flags);
2723 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2724 mmc_hostname(host->mmc), __func__, req_type,
2725 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302726 if (!msm_host->mci_removed)
2727 io_sig_sts = sdhci_msm_readl_relaxed(host,
2728 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302729
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302730 /*
2731 * The IRQ for request type IO High/Low will be generated when -
2732 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2733 * 2. If 1 is true and when there is a state change in 1.8V enable
2734 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2735 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2736 * layer tries to set it to 3.3V before card detection happens, the
2737 * IRQ doesn't get triggered as there is no state change in this bit.
2738 * The driver already handles this case by changing the IO voltage
2739 * level to high as part of controller power up sequence. Hence, check
2740 * for host->pwr to handle a case where IO voltage high request is
2741 * issued even before controller power up.
2742 */
2743 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2744 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2745 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2746 pr_debug("%s: do not wait for power IRQ that never comes\n",
2747 mmc_hostname(host->mmc));
2748 spin_unlock_irqrestore(&host->lock, flags);
2749 return;
2750 }
2751 }
2752
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302753 if ((req_type & msm_host->curr_pwr_state) ||
2754 (req_type & msm_host->curr_io_level))
2755 done = true;
2756 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302757
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302758 /*
2759 * This is needed here to hanlde a case where IRQ gets
2760 * triggered even before this function is called so that
2761 * x->done counter of completion gets reset. Otherwise,
2762 * next call to wait_for_completion returns immediately
2763 * without actually waiting for the IRQ to be handled.
2764 */
2765 if (done)
2766 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302767 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
2768 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
2769 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2770 mmc_hostname(host->mmc), req_type);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302771
2772 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2773 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302774}
2775
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002776static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2777{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302778 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2779 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2780 const struct sdhci_msm_offset *msm_host_offset =
2781 msm_host->offset;
2782 u32 config = readl_relaxed(host->ioaddr +
2783 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302784
2785 if (enable) {
2786 config |= CORE_CDR_EN;
2787 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302788 writel_relaxed(config, host->ioaddr +
2789 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302790 } else {
2791 config &= ~CORE_CDR_EN;
2792 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302793 writel_relaxed(config, host->ioaddr +
2794 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302795 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002796}
2797
Asutosh Das648f9d12013-01-10 21:11:04 +05302798static unsigned int sdhci_msm_max_segs(void)
2799{
2800 return SDHCI_MSM_MAX_SEGMENTS;
2801}
2802
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302803static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302804{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302805 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2806 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302807
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302808 return msm_host->pdata->sup_clk_table[0];
2809}
2810
2811static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2812{
2813 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2814 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2815 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2816
2817 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2818}
2819
2820static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2821 u32 req_clk)
2822{
2823 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2824 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2825 unsigned int sel_clk = -1;
2826 unsigned char cnt;
2827
2828 if (req_clk < sdhci_msm_get_min_clock(host)) {
2829 sel_clk = sdhci_msm_get_min_clock(host);
2830 return sel_clk;
2831 }
2832
2833 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2834 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2835 break;
2836 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2837 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2838 break;
2839 } else {
2840 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2841 }
2842 }
2843 return sel_clk;
2844}
2845
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302846static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2847{
2848 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2849 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2850 int rc = 0;
2851
2852 if (atomic_read(&msm_host->controller_clock))
2853 return 0;
2854
2855 sdhci_msm_bus_voting(host, 1);
2856
2857 if (!IS_ERR(msm_host->pclk)) {
2858 rc = clk_prepare_enable(msm_host->pclk);
2859 if (rc) {
2860 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2861 mmc_hostname(host->mmc), __func__, rc);
2862 goto remove_vote;
2863 }
2864 }
2865
2866 rc = clk_prepare_enable(msm_host->clk);
2867 if (rc) {
2868 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2869 mmc_hostname(host->mmc), __func__, rc);
2870 goto disable_pclk;
2871 }
2872
2873 atomic_set(&msm_host->controller_clock, 1);
2874 pr_debug("%s: %s: enabled controller clock\n",
2875 mmc_hostname(host->mmc), __func__);
2876 goto out;
2877
2878disable_pclk:
2879 if (!IS_ERR(msm_host->pclk))
2880 clk_disable_unprepare(msm_host->pclk);
2881remove_vote:
2882 if (msm_host->msm_bus_vote.client_handle)
2883 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2884out:
2885 return rc;
2886}
2887
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302888static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
2889{
2890 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2891 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302892
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302893 if (atomic_read(&msm_host->controller_clock)) {
2894 if (!IS_ERR(msm_host->clk))
2895 clk_disable_unprepare(msm_host->clk);
2896 if (!IS_ERR(msm_host->pclk))
2897 clk_disable_unprepare(msm_host->pclk);
2898 if (!IS_ERR(msm_host->ice_clk))
2899 clk_disable_unprepare(msm_host->ice_clk);
2900 sdhci_msm_bus_voting(host, 0);
2901 atomic_set(&msm_host->controller_clock, 0);
2902 pr_debug("%s: %s: disabled controller clock\n",
2903 mmc_hostname(host->mmc), __func__);
2904 }
2905}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302906
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302907static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2908{
2909 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2910 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2911 int rc = 0;
2912
2913 if (enable && !atomic_read(&msm_host->clks_on)) {
2914 pr_debug("%s: request to enable clocks\n",
2915 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302916
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302917 /*
2918 * The bus-width or the clock rate might have changed
2919 * after controller clocks are enbaled, update bus vote
2920 * in such case.
2921 */
2922 if (atomic_read(&msm_host->controller_clock))
2923 sdhci_msm_bus_voting(host, 1);
2924
2925 rc = sdhci_msm_enable_controller_clock(host);
2926 if (rc)
2927 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302928
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302929 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2930 rc = clk_prepare_enable(msm_host->bus_clk);
2931 if (rc) {
2932 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2933 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302934 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302935 }
2936 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002937 if (!IS_ERR(msm_host->ff_clk)) {
2938 rc = clk_prepare_enable(msm_host->ff_clk);
2939 if (rc) {
2940 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2941 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302942 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002943 }
2944 }
2945 if (!IS_ERR(msm_host->sleep_clk)) {
2946 rc = clk_prepare_enable(msm_host->sleep_clk);
2947 if (rc) {
2948 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2949 mmc_hostname(host->mmc), __func__, rc);
2950 goto disable_ff_clk;
2951 }
2952 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302953 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302954
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302955 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302956 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2957 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302958 /*
2959 * During 1.8V signal switching the clock source must
2960 * still be ON as it requires accessing SDHC
2961 * registers (SDHCi host control2 register bit 3 must
2962 * be written and polled after stopping the SDCLK).
2963 */
2964 if (host->mmc->card_clock_off)
2965 return 0;
2966 pr_debug("%s: request to disable clocks\n",
2967 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002968 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2969 clk_disable_unprepare(msm_host->sleep_clk);
2970 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2971 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302972 clk_disable_unprepare(msm_host->clk);
2973 if (!IS_ERR(msm_host->pclk))
2974 clk_disable_unprepare(msm_host->pclk);
2975 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2976 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302977
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302978 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302979 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302980 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302981 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302982 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002983disable_ff_clk:
2984 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2985 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302986disable_bus_clk:
2987 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2988 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302989disable_controller_clk:
2990 if (!IS_ERR_OR_NULL(msm_host->clk))
2991 clk_disable_unprepare(msm_host->clk);
2992 if (!IS_ERR_OR_NULL(msm_host->pclk))
2993 clk_disable_unprepare(msm_host->pclk);
2994 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302995remove_vote:
2996 if (msm_host->msm_bus_vote.client_handle)
2997 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302998out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302999 return rc;
3000}
3001
3002static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3003{
3004 int rc;
3005 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3006 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303007 const struct sdhci_msm_offset *msm_host_offset =
3008 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003009 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303010 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003011 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303012 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303013
3014 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303015 /*
3016 * disable pwrsave to ensure clock is not auto-gated until
3017 * the rate is >400KHz (initialization complete).
3018 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303019 writel_relaxed(readl_relaxed(host->ioaddr +
3020 msm_host_offset->CORE_VENDOR_SPEC) &
3021 ~CORE_CLK_PWRSAVE, host->ioaddr +
3022 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303023 sdhci_msm_prepare_clocks(host, false);
3024 host->clock = clock;
3025 goto out;
3026 }
3027
3028 rc = sdhci_msm_prepare_clocks(host, true);
3029 if (rc)
3030 goto out;
3031
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303032 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3033 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303034 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003035 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303036 writel_relaxed(readl_relaxed(host->ioaddr +
3037 msm_host_offset->CORE_VENDOR_SPEC)
3038 | CORE_CLK_PWRSAVE, host->ioaddr +
3039 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303040 /*
3041 * Disable pwrsave for a newly added card if doesn't allow clock
3042 * gating.
3043 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003044 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303045 writel_relaxed(readl_relaxed(host->ioaddr +
3046 msm_host_offset->CORE_VENDOR_SPEC)
3047 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3048 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303049
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303050 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003051 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003052 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003053 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303054 /*
3055 * The SDHC requires internal clock frequency to be double the
3056 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003057 * uses the faster clock(100/400MHz) for some of its parts and
3058 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303059 */
3060 ddr_clock = clock * 2;
3061 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3062 ddr_clock);
3063 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003064
3065 /*
3066 * In general all timing modes are controlled via UHS mode select in
3067 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3068 * their respective modes defined here, hence we use these values.
3069 *
3070 * HS200 - SDR104 (Since they both are equivalent in functionality)
3071 * HS400 - This involves multiple configurations
3072 * Initially SDR104 - when tuning is required as HS200
3073 * Then when switching to DDR @ 400MHz (HS400) we use
3074 * the vendor specific HC_SELECT_IN to control the mode.
3075 *
3076 * In addition to controlling the modes we also need to select the
3077 * correct input clock for DLL depending on the mode.
3078 *
3079 * HS400 - divided clock (free running MCLK/2)
3080 * All other modes - default (free running MCLK)
3081 */
3082 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3083 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303084 writel_relaxed(((readl_relaxed(host->ioaddr +
3085 msm_host_offset->CORE_VENDOR_SPEC)
3086 & ~CORE_HC_MCLK_SEL_MASK)
3087 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3088 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003089 /*
3090 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3091 * register
3092 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303093 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003094 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303095 msm_host->enhanced_strobe)) &&
3096 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003097 /*
3098 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3099 * field in VENDOR_SPEC_FUNC
3100 */
3101 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303102 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003103 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303104 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3105 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003106 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003107 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3108 /*
3109 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3110 * CORE_DLL_STATUS to be set. This should get set
3111 * with in 15 us at 200 MHz.
3112 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303113 rc = readl_poll_timeout(host->ioaddr +
3114 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003115 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3116 CORE_DDR_DLL_LOCK)), 10, 1000);
3117 if (rc == -ETIMEDOUT)
3118 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3119 mmc_hostname(host->mmc),
3120 dll_lock);
3121 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003122 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003123 if (!msm_host->use_cdclp533)
3124 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3125 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303126 msm_host_offset->CORE_VENDOR_SPEC3)
3127 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3128 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003129
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003130 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303131 writel_relaxed(((readl_relaxed(host->ioaddr +
3132 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003133 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303134 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3135 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003136
3137 /*
3138 * Disable HC_SELECT_IN to be able to use the UHS mode select
3139 * configuration from Host Control2 register for all other
3140 * modes.
3141 *
3142 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3143 * in VENDOR_SPEC_FUNC
3144 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303145 writel_relaxed((readl_relaxed(host->ioaddr +
3146 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003147 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303148 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3149 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003150 }
3151 mb();
3152
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303153 if (sup_clock != msm_host->clk_rate) {
3154 pr_debug("%s: %s: setting clk rate to %u\n",
3155 mmc_hostname(host->mmc), __func__, sup_clock);
3156 rc = clk_set_rate(msm_host->clk, sup_clock);
3157 if (rc) {
3158 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3159 mmc_hostname(host->mmc), __func__,
3160 sup_clock, rc);
3161 goto out;
3162 }
3163 msm_host->clk_rate = sup_clock;
3164 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303165 /*
3166 * Update the bus vote in case of frequency change due to
3167 * clock scaling.
3168 */
3169 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303170 }
3171out:
3172 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303173}
3174
Sahitya Tummala14613432013-03-21 11:13:25 +05303175static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3176 unsigned int uhs)
3177{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003178 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3179 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303180 const struct sdhci_msm_offset *msm_host_offset =
3181 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303182 u16 ctrl_2;
3183
3184 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3185 /* Select Bus Speed Mode for host */
3186 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003187 if ((uhs == MMC_TIMING_MMC_HS400) ||
3188 (uhs == MMC_TIMING_MMC_HS200) ||
3189 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303190 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3191 else if (uhs == MMC_TIMING_UHS_SDR12)
3192 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3193 else if (uhs == MMC_TIMING_UHS_SDR25)
3194 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3195 else if (uhs == MMC_TIMING_UHS_SDR50)
3196 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003197 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3198 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303199 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303200 /*
3201 * When clock frquency is less than 100MHz, the feedback clock must be
3202 * provided and DLL must not be used so that tuning can be skipped. To
3203 * provide feedback clock, the mode selection can be any value less
3204 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3205 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003206 if (host->clock <= CORE_FREQ_100MHZ) {
3207 if ((uhs == MMC_TIMING_MMC_HS400) ||
3208 (uhs == MMC_TIMING_MMC_HS200) ||
3209 (uhs == MMC_TIMING_UHS_SDR104))
3210 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303211
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003212 /*
3213 * Make sure DLL is disabled when not required
3214 *
3215 * Write 1 to DLL_RST bit of DLL_CONFIG register
3216 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303217 writel_relaxed((readl_relaxed(host->ioaddr +
3218 msm_host_offset->CORE_DLL_CONFIG)
3219 | CORE_DLL_RST), host->ioaddr +
3220 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003221
3222 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303223 writel_relaxed((readl_relaxed(host->ioaddr +
3224 msm_host_offset->CORE_DLL_CONFIG)
3225 | CORE_DLL_PDN), host->ioaddr +
3226 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003227 mb();
3228
3229 /*
3230 * The DLL needs to be restored and CDCLP533 recalibrated
3231 * when the clock frequency is set back to 400MHz.
3232 */
3233 msm_host->calibration_done = false;
3234 }
3235
3236 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3237 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303238 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3239
3240}
3241
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003242#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003243#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303244static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003245{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303246 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303247 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3248 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303249 const struct sdhci_msm_offset *msm_host_offset =
3250 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303251 struct cmdq_host *cq_host = host->cq_host;
3252
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303253 u32 version = sdhci_msm_readl_relaxed(host,
3254 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003255 u16 minor = version & CORE_VERSION_TARGET_MASK;
3256 /* registers offset changed starting from 4.2.0 */
3257 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3258
3259 pr_err("---- Debug RAM dump ----\n");
3260 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3261 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3262 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3263
3264 while (i < 16) {
3265 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3266 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3267 i++;
3268 }
3269 pr_err("-------------------------\n");
3270}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303271
3272void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3273{
3274 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3275 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303276 const struct sdhci_msm_offset *msm_host_offset =
3277 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303278 int tbsel, tbsel2;
3279 int i, index = 0;
3280 u32 test_bus_val = 0;
3281 u32 debug_reg[MAX_TEST_BUS] = {0};
3282
3283 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003284 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303285 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003286
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303287 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3288 sdhci_msm_readl_relaxed(host,
3289 msm_host_offset->CORE_MCI_DATA_CNT),
3290 sdhci_msm_readl_relaxed(host,
3291 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303292 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303293 sdhci_msm_readl_relaxed(host,
3294 msm_host_offset->CORE_MCI_DATA_CNT),
3295 sdhci_msm_readl_relaxed(host,
3296 msm_host_offset->CORE_MCI_FIFO_CNT),
3297 sdhci_msm_readl_relaxed(host,
3298 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303299 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303300 readl_relaxed(host->ioaddr +
3301 msm_host_offset->CORE_DLL_CONFIG),
3302 readl_relaxed(host->ioaddr +
3303 msm_host_offset->CORE_DLL_STATUS),
3304 sdhci_msm_readl_relaxed(host,
3305 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303306 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303307 readl_relaxed(host->ioaddr +
3308 msm_host_offset->CORE_VENDOR_SPEC),
3309 readl_relaxed(host->ioaddr +
3310 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3311 readl_relaxed(host->ioaddr +
3312 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303313 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303314 readl_relaxed(host->ioaddr +
3315 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303316
3317 /*
3318 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3319 * of CORE_TESTBUS_CONFIG register.
3320 *
3321 * To select test bus 0 to 7 use tbsel and to select any test bus
3322 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3323 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3324 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3325 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003326 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303327 for (tbsel = 0; tbsel < 8; tbsel++) {
3328 if (index >= MAX_TEST_BUS)
3329 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303330 test_bus_val =
3331 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3332 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3333 sdhci_msm_writel_relaxed(test_bus_val, host,
3334 msm_host_offset->CORE_TESTBUS_CONFIG);
3335 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3336 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303337 }
3338 }
3339 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3340 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3341 i, i + 3, debug_reg[i], debug_reg[i+1],
3342 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003343}
3344
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303345/*
3346 * sdhci_msm_enhanced_strobe_mask :-
3347 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3348 * SW should write 3 to
3349 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3350 * The default reset value of this register is 2.
3351 */
3352static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3353{
3354 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3355 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303356 const struct sdhci_msm_offset *msm_host_offset =
3357 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303358
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303359 if (!msm_host->enhanced_strobe ||
3360 !mmc_card_strobe(msm_host->mmc->card)) {
3361 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303362 mmc_hostname(host->mmc));
3363 return;
3364 }
3365
3366 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303367 writel_relaxed((readl_relaxed(host->ioaddr +
3368 msm_host_offset->CORE_VENDOR_SPEC3)
3369 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3370 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303371 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303372 writel_relaxed((readl_relaxed(host->ioaddr +
3373 msm_host_offset->CORE_VENDOR_SPEC3)
3374 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3375 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303376 }
3377}
3378
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003379static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3380{
3381 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3382 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303383 const struct sdhci_msm_offset *msm_host_offset =
3384 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003385
3386 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303387 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3388 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003389 } else {
3390 u32 value;
3391
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303392 value = sdhci_msm_readl_relaxed(host,
3393 msm_host_offset->CORE_TESTBUS_CONFIG);
3394 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3395 sdhci_msm_writel_relaxed(value, host,
3396 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003397 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303398}
3399
Pavan Anamula691dd592015-08-25 16:11:20 +05303400void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3401{
3402 u32 vendor_func2;
3403 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303404 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3405 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3406 const struct sdhci_msm_offset *msm_host_offset =
3407 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303408
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303409 vendor_func2 = readl_relaxed(host->ioaddr +
3410 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303411
3412 if (enable) {
3413 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303414 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303415 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303416 while (readl_relaxed(host->ioaddr +
3417 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303418 if (timeout == 0) {
3419 pr_info("%s: Applying wait idle disable workaround\n",
3420 mmc_hostname(host->mmc));
3421 /*
3422 * Apply the reset workaround to not wait for
3423 * pending data transfers on AXI before
3424 * resetting the controller. This could be
3425 * risky if the transfers were stuck on the
3426 * AXI bus.
3427 */
3428 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303429 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303430 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303431 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3432 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303433 host->reset_wa_t = ktime_get();
3434 return;
3435 }
3436 timeout--;
3437 udelay(10);
3438 }
3439 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3440 mmc_hostname(host->mmc));
3441 } else {
3442 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303443 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303444 }
3445}
3446
Gilad Broner44445992015-09-29 16:05:39 +03003447static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3448{
3449 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303450 container_of(work, struct sdhci_msm_pm_qos_irq,
3451 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003452
3453 if (atomic_read(&pm_qos_irq->counter))
3454 return;
3455
3456 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3457 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3458}
3459
3460void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3461{
3462 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3463 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3464 struct sdhci_msm_pm_qos_latency *latency =
3465 &msm_host->pdata->pm_qos_data.irq_latency;
3466 int counter;
3467
3468 if (!msm_host->pm_qos_irq.enabled)
3469 return;
3470
3471 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3472 /* Make sure to update the voting in case power policy has changed */
3473 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3474 && counter > 1)
3475 return;
3476
Asutosh Das36c2e922015-12-01 12:19:58 +05303477 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003478 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3479 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3480 msm_host->pm_qos_irq.latency);
3481}
3482
3483void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3484{
3485 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3486 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3487 int counter;
3488
3489 if (!msm_host->pm_qos_irq.enabled)
3490 return;
3491
Subhash Jadavani4d813902015-10-15 12:16:43 -07003492 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3493 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3494 } else {
3495 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3496 return;
Gilad Broner44445992015-09-29 16:05:39 +03003497 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003498
Gilad Broner44445992015-09-29 16:05:39 +03003499 if (counter)
3500 return;
3501
3502 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303503 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3504 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003505 return;
3506 }
3507
3508 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3509 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3510 msm_host->pm_qos_irq.latency);
3511}
3512
Gilad Broner68c54562015-09-20 11:59:46 +03003513static ssize_t
3514sdhci_msm_pm_qos_irq_show(struct device *dev,
3515 struct device_attribute *attr, char *buf)
3516{
3517 struct sdhci_host *host = dev_get_drvdata(dev);
3518 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3519 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3520 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3521
3522 return snprintf(buf, PAGE_SIZE,
3523 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3524 irq->enabled, atomic_read(&irq->counter), irq->latency);
3525}
3526
3527static ssize_t
3528sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3529 struct device_attribute *attr, char *buf)
3530{
3531 struct sdhci_host *host = dev_get_drvdata(dev);
3532 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3533 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3534
3535 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3536}
3537
3538static ssize_t
3539sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3540 struct device_attribute *attr, const char *buf, size_t count)
3541{
3542 struct sdhci_host *host = dev_get_drvdata(dev);
3543 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3544 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3545 uint32_t value;
3546 bool enable;
3547 int ret;
3548
3549 ret = kstrtou32(buf, 0, &value);
3550 if (ret)
3551 goto out;
3552 enable = !!value;
3553
3554 if (enable == msm_host->pm_qos_irq.enabled)
3555 goto out;
3556
3557 msm_host->pm_qos_irq.enabled = enable;
3558 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303559 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003560 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3561 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3562 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3563 msm_host->pm_qos_irq.latency);
3564 }
3565
3566out:
3567 return count;
3568}
3569
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003570#ifdef CONFIG_SMP
3571static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3572 struct sdhci_host *host)
3573{
3574 msm_host->pm_qos_irq.req.irq = host->irq;
3575}
3576#else
3577static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3578 struct sdhci_host *host) { }
3579#endif
3580
Gilad Broner44445992015-09-29 16:05:39 +03003581void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3582{
3583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3584 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3585 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003586 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003587
3588 if (!msm_host->pdata->pm_qos_data.irq_valid)
3589 return;
3590
3591 /* Initialize only once as this gets called per partition */
3592 if (msm_host->pm_qos_irq.enabled)
3593 return;
3594
3595 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3596 msm_host->pm_qos_irq.req.type =
3597 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003598 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3599 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3600 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003601 else
3602 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3603 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3604
Asutosh Das36c2e922015-12-01 12:19:58 +05303605 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003606 sdhci_msm_pm_qos_irq_unvote_work);
3607 /* For initialization phase, set the performance latency */
3608 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3609 msm_host->pm_qos_irq.latency =
3610 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3611 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3612 msm_host->pm_qos_irq.latency);
3613 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003614
3615 /* sysfs */
3616 msm_host->pm_qos_irq.enable_attr.show =
3617 sdhci_msm_pm_qos_irq_enable_show;
3618 msm_host->pm_qos_irq.enable_attr.store =
3619 sdhci_msm_pm_qos_irq_enable_store;
3620 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3621 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3622 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3623 ret = device_create_file(&msm_host->pdev->dev,
3624 &msm_host->pm_qos_irq.enable_attr);
3625 if (ret)
3626 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3627 __func__, ret);
3628
3629 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3630 msm_host->pm_qos_irq.status_attr.store = NULL;
3631 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3632 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3633 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3634 ret = device_create_file(&msm_host->pdev->dev,
3635 &msm_host->pm_qos_irq.status_attr);
3636 if (ret)
3637 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3638 __func__, ret);
3639}
3640
3641static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3642 struct device_attribute *attr, char *buf)
3643{
3644 struct sdhci_host *host = dev_get_drvdata(dev);
3645 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3646 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3647 struct sdhci_msm_pm_qos_group *group;
3648 int i;
3649 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3650 int offset = 0;
3651
3652 for (i = 0; i < nr_groups; i++) {
3653 group = &msm_host->pm_qos[i];
3654 offset += snprintf(&buf[offset], PAGE_SIZE,
3655 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3656 i, group->req.cpus_affine.bits[0],
3657 msm_host->pm_qos_group_enable,
3658 atomic_read(&group->counter),
3659 group->latency);
3660 }
3661
3662 return offset;
3663}
3664
3665static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3666 struct device_attribute *attr, char *buf)
3667{
3668 struct sdhci_host *host = dev_get_drvdata(dev);
3669 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3670 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3671
3672 return snprintf(buf, PAGE_SIZE, "%s\n",
3673 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3674}
3675
3676static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3677 struct device_attribute *attr, const char *buf, size_t count)
3678{
3679 struct sdhci_host *host = dev_get_drvdata(dev);
3680 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3681 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3682 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3683 uint32_t value;
3684 bool enable;
3685 int ret;
3686 int i;
3687
3688 ret = kstrtou32(buf, 0, &value);
3689 if (ret)
3690 goto out;
3691 enable = !!value;
3692
3693 if (enable == msm_host->pm_qos_group_enable)
3694 goto out;
3695
3696 msm_host->pm_qos_group_enable = enable;
3697 if (!enable) {
3698 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303699 cancel_delayed_work_sync(
3700 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003701 atomic_set(&msm_host->pm_qos[i].counter, 0);
3702 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3703 pm_qos_update_request(&msm_host->pm_qos[i].req,
3704 msm_host->pm_qos[i].latency);
3705 }
3706 }
3707
3708out:
3709 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003710}
3711
3712static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3713{
3714 int i;
3715 struct sdhci_msm_cpu_group_map *map =
3716 &msm_host->pdata->pm_qos_data.cpu_group_map;
3717
3718 if (cpu < 0)
3719 goto not_found;
3720
3721 for (i = 0; i < map->nr_groups; i++)
3722 if (cpumask_test_cpu(cpu, &map->mask[i]))
3723 return i;
3724
3725not_found:
3726 return -EINVAL;
3727}
3728
3729void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3730 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3731{
3732 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3733 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3734 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3735 struct sdhci_msm_pm_qos_group *pm_qos_group;
3736 int counter;
3737
3738 if (!msm_host->pm_qos_group_enable || group < 0)
3739 return;
3740
3741 pm_qos_group = &msm_host->pm_qos[group];
3742 counter = atomic_inc_return(&pm_qos_group->counter);
3743
3744 /* Make sure to update the voting in case power policy has changed */
3745 if (pm_qos_group->latency == latency->latency[host->power_policy]
3746 && counter > 1)
3747 return;
3748
Asutosh Das36c2e922015-12-01 12:19:58 +05303749 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003750
3751 pm_qos_group->latency = latency->latency[host->power_policy];
3752 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3753}
3754
3755static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3756{
3757 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303758 container_of(work, struct sdhci_msm_pm_qos_group,
3759 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003760
3761 if (atomic_read(&group->counter))
3762 return;
3763
3764 group->latency = PM_QOS_DEFAULT_VALUE;
3765 pm_qos_update_request(&group->req, group->latency);
3766}
3767
Gilad Broner07d92eb2015-09-29 16:57:21 +03003768bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003769{
3770 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3771 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3772 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3773
3774 if (!msm_host->pm_qos_group_enable || group < 0 ||
3775 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003776 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003777
3778 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303779 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3780 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003781 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003782 }
3783
3784 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3785 pm_qos_update_request(&msm_host->pm_qos[group].req,
3786 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003787 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003788}
3789
3790void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3791 struct sdhci_msm_pm_qos_latency *latency)
3792{
3793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3794 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3795 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3796 struct sdhci_msm_pm_qos_group *group;
3797 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003798 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003799
3800 if (msm_host->pm_qos_group_enable)
3801 return;
3802
3803 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3804 GFP_KERNEL);
3805 if (!msm_host->pm_qos)
3806 return;
3807
3808 for (i = 0; i < nr_groups; i++) {
3809 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303810 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003811 sdhci_msm_pm_qos_cpu_unvote_work);
3812 atomic_set(&group->counter, 0);
3813 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3814 cpumask_copy(&group->req.cpus_affine,
3815 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3816 /* For initialization phase, set the performance mode latency */
3817 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3818 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3819 group->latency);
3820 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3821 __func__, i,
3822 group->req.cpus_affine.bits[0],
3823 group->latency,
3824 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3825 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003826 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003827 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003828
3829 /* sysfs */
3830 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3831 msm_host->pm_qos_group_status_attr.store = NULL;
3832 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3833 msm_host->pm_qos_group_status_attr.attr.name =
3834 "pm_qos_cpu_groups_status";
3835 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3836 ret = device_create_file(&msm_host->pdev->dev,
3837 &msm_host->pm_qos_group_status_attr);
3838 if (ret)
3839 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3840 __func__, ret);
3841 msm_host->pm_qos_group_enable_attr.show =
3842 sdhci_msm_pm_qos_group_enable_show;
3843 msm_host->pm_qos_group_enable_attr.store =
3844 sdhci_msm_pm_qos_group_enable_store;
3845 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3846 msm_host->pm_qos_group_enable_attr.attr.name =
3847 "pm_qos_cpu_groups_enable";
3848 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3849 ret = device_create_file(&msm_host->pdev->dev,
3850 &msm_host->pm_qos_group_enable_attr);
3851 if (ret)
3852 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3853 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003854}
3855
Gilad Broner07d92eb2015-09-29 16:57:21 +03003856static void sdhci_msm_pre_req(struct sdhci_host *host,
3857 struct mmc_request *mmc_req)
3858{
3859 int cpu;
3860 int group;
3861 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3862 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3863 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3864 msm_host->pm_qos_prev_cpu);
3865
3866 sdhci_msm_pm_qos_irq_vote(host);
3867
3868 cpu = get_cpu();
3869 put_cpu();
3870 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3871 if (group < 0)
3872 return;
3873
3874 if (group != prev_group && prev_group >= 0) {
3875 sdhci_msm_pm_qos_cpu_unvote(host,
3876 msm_host->pm_qos_prev_cpu, false);
3877 prev_group = -1; /* make sure to vote for new group */
3878 }
3879
3880 if (prev_group < 0) {
3881 sdhci_msm_pm_qos_cpu_vote(host,
3882 msm_host->pdata->pm_qos_data.latency, cpu);
3883 msm_host->pm_qos_prev_cpu = cpu;
3884 }
3885}
3886
3887static void sdhci_msm_post_req(struct sdhci_host *host,
3888 struct mmc_request *mmc_req)
3889{
3890 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3891 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3892
3893 sdhci_msm_pm_qos_irq_unvote(host, false);
3894
3895 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3896 msm_host->pm_qos_prev_cpu = -1;
3897}
3898
3899static void sdhci_msm_init(struct sdhci_host *host)
3900{
3901 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3902 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3903
3904 sdhci_msm_pm_qos_irq_init(host);
3905
3906 if (msm_host->pdata->pm_qos_data.legacy_valid)
3907 sdhci_msm_pm_qos_cpu_init(host,
3908 msm_host->pdata->pm_qos_data.latency);
3909}
3910
Sahitya Tummala9150a942014-10-31 15:33:04 +05303911static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
3912{
3913 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3914 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3915 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
3916 u32 max_curr = 0;
3917
3918 if (curr_slot && curr_slot->vdd_data)
3919 max_curr = curr_slot->vdd_data->hpm_uA;
3920
3921 return max_curr;
3922}
3923
Asutosh Das0ef24812012-12-18 16:14:02 +05303924static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303925 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303926 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003927 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303928 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003929 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303930 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303931 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303932 .get_min_clock = sdhci_msm_get_min_clock,
3933 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303934 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303935 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303936 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003937 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003938 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003939 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303940 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303941 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003942 .init = sdhci_msm_init,
3943 .pre_req = sdhci_msm_pre_req,
3944 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05303945 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05303946};
3947
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303948static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3949 struct sdhci_host *host)
3950{
Krishna Konda46fd1432014-10-30 21:13:27 -07003951 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303952 u16 minor;
3953 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303954 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303955 const struct sdhci_msm_offset *msm_host_offset =
3956 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303957
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303958 version = sdhci_msm_readl_relaxed(host,
3959 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303960 major = (version & CORE_VERSION_MAJOR_MASK) >>
3961 CORE_VERSION_MAJOR_SHIFT;
3962 minor = version & CORE_VERSION_TARGET_MASK;
3963
Krishna Konda46fd1432014-10-30 21:13:27 -07003964 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3965
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303966 /*
3967 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003968 * controller won't advertise 3.0v, 1.8v and 8-bit features
3969 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303970 */
3971 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003972 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003973 /*
3974 * Enable 1.8V support capability on controllers that
3975 * support dual voltage
3976 */
3977 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003978 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3979 caps |= CORE_3_0V_SUPPORT;
3980 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003981 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303982 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3983 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303984 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003985
3986 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303987 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3988 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3989 */
3990 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303991 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303992 val = readl_relaxed(host->ioaddr +
3993 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303994 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303995 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303996 }
3997 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003998 * SDCC 5 controller with major version 1, minor version 0x34 and later
3999 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4000 */
4001 if ((major == 1) && (minor < 0x34))
4002 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004003
4004 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004005 * SDCC 5 controller with major version 1, minor version 0x42 and later
4006 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304007 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004008 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304009 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004010 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304011 msm_host->enhanced_strobe = true;
4012 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004013
4014 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004015 * SDCC 5 controller with major version 1 and minor version 0x42,
4016 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4017 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304018 * when MCLK is gated OFF, it is not gated for less than 0.5us
4019 * and MCLK must be switched on for at-least 1us before DATA
4020 * starts coming.
4021 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004022 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
4023 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304024 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004025
Pavan Anamula5a256df2015-10-16 14:38:28 +05304026 /* Fake 3.0V support for SDIO devices which requires such voltage */
4027 if (msm_host->pdata->core_3_0v_support) {
4028 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304029 writel_relaxed((readl_relaxed(host->ioaddr +
4030 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4031 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304032 }
4033
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004034 if ((major == 1) && (minor >= 0x49))
4035 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304036 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004037 * Mask 64-bit support for controller with 32-bit address bus so that
4038 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004039 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004040 if (!msm_host->pdata->largeaddressbus)
4041 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4042
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304043 writel_relaxed(caps, host->ioaddr +
4044 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004045 /* keep track of the value in SDHCI_CAPABILITIES */
4046 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304047
4048 if ((major == 1) && (minor >= 0x6b))
4049 msm_host->ice_hci_support = true;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304050}
4051
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004052#ifdef CONFIG_MMC_CQ_HCI
4053static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4054 struct platform_device *pdev)
4055{
4056 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4057 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4058
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304059 if (nocmdq) {
4060 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4061 return;
4062 }
4063
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004064 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004065 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004066 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4067 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004068 host->cq_host = NULL;
4069 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004070 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004071 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004072}
4073#else
4074static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4075 struct platform_device *pdev)
4076{
4077
4078}
4079#endif
4080
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004081static bool sdhci_msm_is_bootdevice(struct device *dev)
4082{
4083 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4084 strlen(saved_command_line))) {
4085 char search_string[50];
4086
4087 snprintf(search_string, ARRAY_SIZE(search_string),
4088 "androidboot.bootdevice=%s", dev_name(dev));
4089 if (strnstr(saved_command_line, search_string,
4090 strlen(saved_command_line)))
4091 return true;
4092 else
4093 return false;
4094 }
4095
4096 /*
4097 * "androidboot.bootdevice=" argument is not present then
4098 * return true as we don't know the boot device anyways.
4099 */
4100 return true;
4101}
4102
Asutosh Das0ef24812012-12-18 16:14:02 +05304103static int sdhci_msm_probe(struct platform_device *pdev)
4104{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304105 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304106 struct sdhci_host *host;
4107 struct sdhci_pltfm_host *pltfm_host;
4108 struct sdhci_msm_host *msm_host;
4109 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004110 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004111 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004112 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304113 struct resource *tlmm_memres = NULL;
4114 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304115 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304116
4117 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4118 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4119 GFP_KERNEL);
4120 if (!msm_host) {
4121 ret = -ENOMEM;
4122 goto out;
4123 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304124
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304125 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4126 msm_host->mci_removed = true;
4127 msm_host->offset = &sdhci_msm_offset_mci_removed;
4128 } else {
4129 msm_host->mci_removed = false;
4130 msm_host->offset = &sdhci_msm_offset_mci_present;
4131 }
4132 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304133 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4134 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4135 if (IS_ERR(host)) {
4136 ret = PTR_ERR(host);
4137 goto out;
4138 }
4139
4140 pltfm_host = sdhci_priv(host);
4141 pltfm_host->priv = msm_host;
4142 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304143 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304144
4145 /* Extract platform data */
4146 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004147 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304148 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004149 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4150 ret);
4151 goto pltfm_free;
4152 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004153
4154 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004155 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4156 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004157 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004158 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004159
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004160 if (disable_slots & (1 << (ret - 1))) {
4161 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4162 ret);
4163 ret = -ENODEV;
4164 goto pltfm_free;
4165 }
4166
Sayali Lokhande5f768322016-04-11 18:36:53 +05304167 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004168 sdhci_slot[ret-1] = msm_host;
4169
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004170 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4171 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304172 if (!msm_host->pdata) {
4173 dev_err(&pdev->dev, "DT parsing error\n");
4174 goto pltfm_free;
4175 }
4176 } else {
4177 dev_err(&pdev->dev, "No device tree node\n");
4178 goto pltfm_free;
4179 }
4180
4181 /* Setup Clocks */
4182
4183 /* Setup SDCC bus voter clock. */
4184 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4185 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4186 /* Vote for max. clk rate for max. performance */
4187 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4188 if (ret)
4189 goto pltfm_free;
4190 ret = clk_prepare_enable(msm_host->bus_clk);
4191 if (ret)
4192 goto pltfm_free;
4193 }
4194
4195 /* Setup main peripheral bus clock */
4196 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4197 if (!IS_ERR(msm_host->pclk)) {
4198 ret = clk_prepare_enable(msm_host->pclk);
4199 if (ret)
4200 goto bus_clk_disable;
4201 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304202 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304203
4204 /* Setup SDC MMC clock */
4205 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4206 if (IS_ERR(msm_host->clk)) {
4207 ret = PTR_ERR(msm_host->clk);
4208 goto pclk_disable;
4209 }
4210
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304211 /* Set to the minimum supported clock frequency */
4212 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4213 if (ret) {
4214 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304215 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304216 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304217 ret = clk_prepare_enable(msm_host->clk);
4218 if (ret)
4219 goto pclk_disable;
4220
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304221 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304222 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304223
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004224 /* Setup CDC calibration fixed feedback clock */
4225 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4226 if (!IS_ERR(msm_host->ff_clk)) {
4227 ret = clk_prepare_enable(msm_host->ff_clk);
4228 if (ret)
4229 goto clk_disable;
4230 }
4231
4232 /* Setup CDC calibration sleep clock */
4233 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4234 if (!IS_ERR(msm_host->sleep_clk)) {
4235 ret = clk_prepare_enable(msm_host->sleep_clk);
4236 if (ret)
4237 goto ff_clk_disable;
4238 }
4239
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004240 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4241
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304242 ret = sdhci_msm_bus_register(msm_host, pdev);
4243 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004244 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304245
4246 if (msm_host->msm_bus_vote.client_handle)
4247 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4248 sdhci_msm_bus_work);
4249 sdhci_msm_bus_voting(host, 1);
4250
Asutosh Das0ef24812012-12-18 16:14:02 +05304251 /* Setup regulators */
4252 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4253 if (ret) {
4254 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304255 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304256 }
4257
4258 /* Reset the core and Enable SDHC mode */
4259 core_memres = platform_get_resource_byname(pdev,
4260 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304261 if (!msm_host->mci_removed) {
4262 if (!core_memres) {
4263 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4264 goto vreg_deinit;
4265 }
4266 msm_host->core_mem = devm_ioremap(&pdev->dev,
4267 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304268
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304269 if (!msm_host->core_mem) {
4270 dev_err(&pdev->dev, "Failed to remap registers\n");
4271 ret = -ENOMEM;
4272 goto vreg_deinit;
4273 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304274 }
4275
Sahitya Tummala079ed852015-10-29 20:18:45 +05304276 tlmm_memres = platform_get_resource_byname(pdev,
4277 IORESOURCE_MEM, "tlmm_mem");
4278 if (tlmm_memres) {
4279 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4280 resource_size(tlmm_memres));
4281
4282 if (!tlmm_mem) {
4283 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4284 ret = -ENOMEM;
4285 goto vreg_deinit;
4286 }
4287 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4288 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4289 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4290 }
4291
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304292 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004293 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304294 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004295 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304296 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304297
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304298 if (!msm_host->mci_removed) {
4299 /* Set HC_MODE_EN bit in HC_MODE register */
4300 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304301
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304302 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4303 writel_relaxed(readl_relaxed(msm_host->core_mem +
4304 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4305 msm_host->core_mem + CORE_HC_MODE);
4306 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304307 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004308
4309 /*
4310 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4311 * be used as required later on.
4312 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304313 writel_relaxed((readl_relaxed(host->ioaddr +
4314 msm_host_offset->CORE_VENDOR_SPEC) |
4315 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4316 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304317 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304318 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4319 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4320 * interrupt in GIC (by registering the interrupt handler), we need to
4321 * ensure that any pending power irq interrupt status is acknowledged
4322 * otherwise power irq interrupt handler would be fired prematurely.
4323 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304324 irq_status = sdhci_msm_readl_relaxed(host,
4325 msm_host_offset->CORE_PWRCTL_STATUS);
4326 sdhci_msm_writel_relaxed(irq_status, host,
4327 msm_host_offset->CORE_PWRCTL_CLEAR);
4328 irq_ctl = sdhci_msm_readl_relaxed(host,
4329 msm_host_offset->CORE_PWRCTL_CTL);
4330
Subhash Jadavani28137342013-05-14 17:46:43 +05304331 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4332 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4333 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4334 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304335 sdhci_msm_writel_relaxed(irq_ctl, host,
4336 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004337
Subhash Jadavani28137342013-05-14 17:46:43 +05304338 /*
4339 * Ensure that above writes are propogated before interrupt enablement
4340 * in GIC.
4341 */
4342 mb();
4343
4344 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304345 * Following are the deviations from SDHC spec v3.0 -
4346 * 1. Card detection is handled using separate GPIO.
4347 * 2. Bus power control is handled by interacting with PMIC.
4348 */
4349 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4350 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304351 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004352 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304353 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304354 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304355 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304356 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304357 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304358 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304359
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304360 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4361 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4362
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004363 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004364 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4365 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4366 SDHCI_VENDOR_VER_SHIFT));
4367 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4368 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4369 /*
4370 * Add 40us delay in interrupt handler when
4371 * operating at initialization frequency(400KHz).
4372 */
4373 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4374 /*
4375 * Set Software Reset for DAT line in Software
4376 * Reset Register (Bit 2).
4377 */
4378 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4379 }
4380
Asutosh Das214b9662013-06-13 14:27:42 +05304381 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4382
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004383 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004384 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4385 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304386 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004387 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304388 goto vreg_deinit;
4389 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004390 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304391 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004392 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304393 if (ret) {
4394 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004395 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304396 goto vreg_deinit;
4397 }
4398
4399 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304400 sdhci_msm_writel_relaxed(INT_MASK, host,
4401 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304402
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304403#ifdef CONFIG_MMC_CLKGATE
4404 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4405 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4406#endif
4407
Asutosh Das0ef24812012-12-18 16:14:02 +05304408 /* Set host capabilities */
4409 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4410 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004411 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304412 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304413 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004414 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004415 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004416 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304417 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004418 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004419 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304420 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304421
4422 if (msm_host->pdata->nonremovable)
4423 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4424
Guoping Yuf7c91332014-08-20 16:56:18 +08004425 if (msm_host->pdata->nonhotplug)
4426 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4427
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07004428 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
4429
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304430 init_completion(&msm_host->pwr_irq_completion);
4431
Sahitya Tummala581df132013-03-12 14:57:46 +05304432 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304433 /*
4434 * Set up the card detect GPIO in active configuration before
4435 * configuring it as an IRQ. Otherwise, it can be in some
4436 * weird/inconsistent state resulting in flood of interrupts.
4437 */
4438 sdhci_msm_setup_pins(msm_host->pdata, true);
4439
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304440 /*
4441 * This delay is needed for stabilizing the card detect GPIO
4442 * line after changing the pull configs.
4443 */
4444 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304445 ret = mmc_gpio_request_cd(msm_host->mmc,
4446 msm_host->pdata->status_gpio, 0);
4447 if (ret) {
4448 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4449 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304450 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304451 }
4452 }
4453
Krishna Konda7feab352013-09-17 23:55:40 -07004454 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4455 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4456 host->dma_mask = DMA_BIT_MASK(64);
4457 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304458 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004459 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304460 host->dma_mask = DMA_BIT_MASK(32);
4461 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304462 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304463 } else {
4464 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4465 }
4466
Ritesh Harjani42876f42015-11-17 17:46:51 +05304467 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4468 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304469 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304470 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4471 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304472 msm_host->is_sdiowakeup_enabled = true;
4473 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4474 sdhci_msm_sdiowakeup_irq,
4475 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4476 "sdhci-msm sdiowakeup", host);
4477 if (ret) {
4478 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4479 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4480 msm_host->pdata->sdiowakeup_irq = -1;
4481 msm_host->is_sdiowakeup_enabled = false;
4482 goto vreg_deinit;
4483 } else {
4484 spin_lock_irqsave(&host->lock, flags);
4485 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304486 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304487 spin_unlock_irqrestore(&host->lock, flags);
4488 }
4489 }
4490
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004491 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304492 ret = sdhci_add_host(host);
4493 if (ret) {
4494 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304495 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304496 }
4497
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004498 pm_runtime_set_active(&pdev->dev);
4499 pm_runtime_enable(&pdev->dev);
4500 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4501 pm_runtime_use_autosuspend(&pdev->dev);
4502
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304503 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4504 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4505 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4506 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4507 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4508 ret = device_create_file(&pdev->dev,
4509 &msm_host->msm_bus_vote.max_bus_bw);
4510 if (ret)
4511 goto remove_host;
4512
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304513 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4514 msm_host->polling.show = show_polling;
4515 msm_host->polling.store = store_polling;
4516 sysfs_attr_init(&msm_host->polling.attr);
4517 msm_host->polling.attr.name = "polling";
4518 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4519 ret = device_create_file(&pdev->dev, &msm_host->polling);
4520 if (ret)
4521 goto remove_max_bus_bw_file;
4522 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304523
4524 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4525 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4526 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4527 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4528 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4529 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4530 if (ret) {
4531 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4532 mmc_hostname(host->mmc), __func__, ret);
4533 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4534 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304535 /* Successful initialization */
4536 goto out;
4537
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304538remove_max_bus_bw_file:
4539 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304540remove_host:
4541 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004542 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304543 sdhci_remove_host(host, dead);
4544vreg_deinit:
4545 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304546bus_unregister:
4547 if (msm_host->msm_bus_vote.client_handle)
4548 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4549 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004550sleep_clk_disable:
4551 if (!IS_ERR(msm_host->sleep_clk))
4552 clk_disable_unprepare(msm_host->sleep_clk);
4553ff_clk_disable:
4554 if (!IS_ERR(msm_host->ff_clk))
4555 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304556clk_disable:
4557 if (!IS_ERR(msm_host->clk))
4558 clk_disable_unprepare(msm_host->clk);
4559pclk_disable:
4560 if (!IS_ERR(msm_host->pclk))
4561 clk_disable_unprepare(msm_host->pclk);
4562bus_clk_disable:
4563 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4564 clk_disable_unprepare(msm_host->bus_clk);
4565pltfm_free:
4566 sdhci_pltfm_free(pdev);
4567out:
4568 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4569 return ret;
4570}
4571
4572static int sdhci_msm_remove(struct platform_device *pdev)
4573{
4574 struct sdhci_host *host = platform_get_drvdata(pdev);
4575 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4576 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4577 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4578 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4579 0xffffffff);
4580
4581 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304582 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4583 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304584 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004585 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304586 sdhci_remove_host(host, dead);
4587 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304588
Asutosh Das0ef24812012-12-18 16:14:02 +05304589 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304590
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304591 sdhci_msm_setup_pins(pdata, true);
4592 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304593
4594 if (msm_host->msm_bus_vote.client_handle) {
4595 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4596 sdhci_msm_bus_unregister(msm_host);
4597 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304598 return 0;
4599}
4600
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004601#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304602static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4603{
4604 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4605 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4606 unsigned long flags;
4607 int ret = 0;
4608
4609 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4610 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4611 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304612 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304613 return 1;
4614 }
4615
4616 spin_lock_irqsave(&host->lock, flags);
4617 if (enable) {
4618 /* configure DAT1 gpio if applicable */
4619 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304620 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304621 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4622 if (!ret)
4623 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4624 goto out;
4625 } else {
4626 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4627 mmc_hostname(host->mmc), enable);
4628 }
4629 } else {
4630 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4631 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4632 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304633 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304634 } else {
4635 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4636 mmc_hostname(host->mmc), enable);
4637
4638 }
4639 }
4640out:
4641 if (ret)
4642 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4643 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4644 ret, msm_host->pdata->sdiowakeup_irq);
4645 spin_unlock_irqrestore(&host->lock, flags);
4646 return ret;
4647}
4648
4649
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004650static int sdhci_msm_runtime_suspend(struct device *dev)
4651{
4652 struct sdhci_host *host = dev_get_drvdata(dev);
4653 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4654 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004655 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004656
Ritesh Harjani42876f42015-11-17 17:46:51 +05304657 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4658 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304659
Ritesh Harjani42876f42015-11-17 17:46:51 +05304660 sdhci_cfg_irq(host, false, true);
4661
4662defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004663 disable_irq(msm_host->pwr_irq);
4664
4665 /*
4666 * Remove the vote immediately only if clocks are off in which
4667 * case we might have queued work to remove vote but it may not
4668 * be completed before runtime suspend or system suspend.
4669 */
4670 if (!atomic_read(&msm_host->clks_on)) {
4671 if (msm_host->msm_bus_vote.client_handle)
4672 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4673 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004674 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4675 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004676
4677 return 0;
4678}
4679
4680static int sdhci_msm_runtime_resume(struct device *dev)
4681{
4682 struct sdhci_host *host = dev_get_drvdata(dev);
4683 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4684 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004685 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004686
Ritesh Harjani42876f42015-11-17 17:46:51 +05304687 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4688 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304689
Ritesh Harjani42876f42015-11-17 17:46:51 +05304690 sdhci_cfg_irq(host, true, true);
4691
4692defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004693 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004694
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004695 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4696 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004697 return 0;
4698}
4699
4700static int sdhci_msm_suspend(struct device *dev)
4701{
4702 struct sdhci_host *host = dev_get_drvdata(dev);
4703 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4704 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004705 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304706 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004707 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004708
4709 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4710 (msm_host->mmc->slot.cd_irq >= 0))
4711 disable_irq(msm_host->mmc->slot.cd_irq);
4712
4713 if (pm_runtime_suspended(dev)) {
4714 pr_debug("%s: %s: already runtime suspended\n",
4715 mmc_hostname(host->mmc), __func__);
4716 goto out;
4717 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004718 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004719out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05304720 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304721 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4722 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4723 if (sdio_cfg)
4724 sdhci_cfg_irq(host, false, true);
4725 }
4726
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004727 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4728 ktime_to_us(ktime_sub(ktime_get(), start)));
4729 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004730}
4731
4732static int sdhci_msm_resume(struct device *dev)
4733{
4734 struct sdhci_host *host = dev_get_drvdata(dev);
4735 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4736 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4737 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304738 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004739 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004740
4741 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4742 (msm_host->mmc->slot.cd_irq >= 0))
4743 enable_irq(msm_host->mmc->slot.cd_irq);
4744
4745 if (pm_runtime_suspended(dev)) {
4746 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4747 mmc_hostname(host->mmc), __func__);
4748 goto out;
4749 }
4750
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004751 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004752out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304753 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4754 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4755 if (sdio_cfg)
4756 sdhci_cfg_irq(host, true, true);
4757 }
4758
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004759 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4760 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004761 return ret;
4762}
4763
Ritesh Harjani42876f42015-11-17 17:46:51 +05304764static int sdhci_msm_suspend_noirq(struct device *dev)
4765{
4766 struct sdhci_host *host = dev_get_drvdata(dev);
4767 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4768 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4769 int ret = 0;
4770
4771 /*
4772 * ksdioirqd may be running, hence retry
4773 * suspend in case the clocks are ON
4774 */
4775 if (atomic_read(&msm_host->clks_on)) {
4776 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4777 mmc_hostname(host->mmc), __func__);
4778 ret = -EAGAIN;
4779 }
4780
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304781 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4782 if (msm_host->sdio_pending_processing)
4783 ret = -EBUSY;
4784
Ritesh Harjani42876f42015-11-17 17:46:51 +05304785 return ret;
4786}
4787
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004788static const struct dev_pm_ops sdhci_msm_pmops = {
4789 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4790 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4791 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304792 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004793};
4794
4795#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4796
4797#else
4798#define SDHCI_MSM_PMOPS NULL
4799#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304800static const struct of_device_id sdhci_msm_dt_match[] = {
4801 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304802 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004803 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304804};
4805MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4806
4807static struct platform_driver sdhci_msm_driver = {
4808 .probe = sdhci_msm_probe,
4809 .remove = sdhci_msm_remove,
4810 .driver = {
4811 .name = "sdhci_msm",
4812 .owner = THIS_MODULE,
4813 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004814 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304815 },
4816};
4817
4818module_platform_driver(sdhci_msm_driver);
4819
4820MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4821MODULE_LICENSE("GPL v2");