blob: 05d8a52e63df9513a875e1e193cbae060a6066d4 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070045#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053046
Asutosh Das36c2e922015-12-01 12:19:58 +053047#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080048#define CORE_POWER 0x0
49#define CORE_SW_RST (1 << 7)
50
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070051#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080052
53#define CORE_VERSION_STEP_MASK 0x0000FFFF
54#define CORE_VERSION_MINOR_MASK 0x0FFF0000
55#define CORE_VERSION_MINOR_SHIFT 16
56#define CORE_VERSION_MAJOR_MASK 0xF0000000
57#define CORE_VERSION_MAJOR_SHIFT 28
58#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030059#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080060
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053062
63#define CORE_VERSION_MAJOR_MASK 0xF0000000
64#define CORE_VERSION_MAJOR_SHIFT 28
65
Asutosh Das0ef24812012-12-18 16:14:02 +053066#define CORE_HC_MODE 0x78
67#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070068#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053069
Asutosh Das0ef24812012-12-18 16:14:02 +053070#define CORE_PWRCTL_BUS_OFF 0x01
71#define CORE_PWRCTL_BUS_ON (1 << 1)
72#define CORE_PWRCTL_IO_LOW (1 << 2)
73#define CORE_PWRCTL_IO_HIGH (1 << 3)
74
75#define CORE_PWRCTL_BUS_SUCCESS 0x01
76#define CORE_PWRCTL_BUS_FAIL (1 << 1)
77#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
78#define CORE_PWRCTL_IO_FAIL (1 << 3)
79
80#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070081#define MAX_PHASES 16
82
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070083#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070084#define CORE_DLL_EN (1 << 16)
85#define CORE_CDR_EN (1 << 17)
86#define CORE_CK_OUT_EN (1 << 18)
87#define CORE_CDR_EXT_EN (1 << 19)
88#define CORE_DLL_PDN (1 << 29)
89#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070090
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070092#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093
Krishna Konda46fd1432014-10-30 21:13:27 -070094#define CORE_CLK_PWRSAVE (1 << 1)
95#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
96#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
97#define CORE_HC_MCLK_SEL_MASK (3 << 8)
98#define CORE_HC_AUTO_CMD21_EN (1 << 6)
99#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700100#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700101#define CORE_HC_SELECT_IN_EN (1 << 18)
102#define CORE_HC_SELECT_IN_HS400 (6 << 19)
103#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700104#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105
Pavan Anamula691dd592015-08-25 16:11:20 +0530106#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
107#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530108#define CORE_ONE_MID_EN (1 << 25)
109
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530110#define CORE_8_BIT_SUPPORT (1 << 18)
111#define CORE_3_3V_SUPPORT (1 << 24)
112#define CORE_3_0V_SUPPORT (1 << 25)
113#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300114#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700115
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700116#define CORE_CSR_CDC_CTLR_CFG0 0x130
117#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
118#define CORE_HW_AUTOCAL_ENA (1 << 17)
119
120#define CORE_CSR_CDC_CTLR_CFG1 0x134
121#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
122#define CORE_TIMER_ENA (1 << 16)
123
124#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
125#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
126#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
127#define CORE_CDC_OFFSET_CFG 0x14C
128#define CORE_CSR_CDC_DELAY_CFG 0x150
129#define CORE_CDC_SLAVE_DDA_CFG 0x160
130#define CORE_CSR_CDC_STATUS0 0x164
131#define CORE_CALIBRATION_DONE (1 << 0)
132
133#define CORE_CDC_ERROR_CODE_MASK 0x7000000
134
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300135#define CQ_CMD_DBG_RAM 0x110
136#define CQ_CMD_DBG_RAM_WA 0x150
137#define CQ_CMD_DBG_RAM_OL 0x154
138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_GEN_CFG 0x178
140#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
141#define CORE_CDC_SWITCH_RC_EN (1 << 1)
142
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700143#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530144#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700147#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530148#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800151#define CORE_FLL_CYCLE_CNT (1 << 18)
152#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530154#define DDR_CONFIG_POR_VAL 0x80040853
155#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
156#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700157#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700158
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700159/* 512 descriptors */
160#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530161#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530162
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700163#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800164#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700166#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530167#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168
Krishna Konda96e6b112013-10-28 15:25:03 -0700169#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200170#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200171#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700172
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530173struct sdhci_msm_offset {
174 u32 CORE_MCI_DATA_CNT;
175 u32 CORE_MCI_STATUS;
176 u32 CORE_MCI_FIFO_CNT;
177 u32 CORE_MCI_VERSION;
178 u32 CORE_GENERICS;
179 u32 CORE_TESTBUS_CONFIG;
180 u32 CORE_TESTBUS_SEL2_BIT;
181 u32 CORE_TESTBUS_ENA;
182 u32 CORE_TESTBUS_SEL2;
183 u32 CORE_PWRCTL_STATUS;
184 u32 CORE_PWRCTL_MASK;
185 u32 CORE_PWRCTL_CLEAR;
186 u32 CORE_PWRCTL_CTL;
187 u32 CORE_SDCC_DEBUG_REG;
188 u32 CORE_DLL_CONFIG;
189 u32 CORE_DLL_STATUS;
190 u32 CORE_VENDOR_SPEC;
191 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
193 u32 CORE_VENDOR_SPEC_FUNC2;
194 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
195 u32 CORE_DDR_200_CFG;
196 u32 CORE_VENDOR_SPEC3;
197 u32 CORE_DLL_CONFIG_2;
198 u32 CORE_DDR_CONFIG;
199 u32 CORE_DDR_CONFIG_2;
200};
201
202struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
203 .CORE_MCI_DATA_CNT = 0x35C,
204 .CORE_MCI_STATUS = 0x324,
205 .CORE_MCI_FIFO_CNT = 0x308,
206 .CORE_MCI_VERSION = 0x318,
207 .CORE_GENERICS = 0x320,
208 .CORE_TESTBUS_CONFIG = 0x32C,
209 .CORE_TESTBUS_SEL2_BIT = 3,
210 .CORE_TESTBUS_ENA = (1 << 31),
211 .CORE_TESTBUS_SEL2 = (1 << 3),
212 .CORE_PWRCTL_STATUS = 0x240,
213 .CORE_PWRCTL_MASK = 0x244,
214 .CORE_PWRCTL_CLEAR = 0x248,
215 .CORE_PWRCTL_CTL = 0x24C,
216 .CORE_SDCC_DEBUG_REG = 0x358,
217 .CORE_DLL_CONFIG = 0x200,
218 .CORE_DLL_STATUS = 0x208,
219 .CORE_VENDOR_SPEC = 0x20C,
220 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
222 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
223 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
224 .CORE_DDR_200_CFG = 0x224,
225 .CORE_VENDOR_SPEC3 = 0x250,
226 .CORE_DLL_CONFIG_2 = 0x254,
227 .CORE_DDR_CONFIG = 0x258,
228 .CORE_DDR_CONFIG_2 = 0x25C,
229};
230
231struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
232 .CORE_MCI_DATA_CNT = 0x30,
233 .CORE_MCI_STATUS = 0x34,
234 .CORE_MCI_FIFO_CNT = 0x44,
235 .CORE_MCI_VERSION = 0x050,
236 .CORE_GENERICS = 0x70,
237 .CORE_TESTBUS_CONFIG = 0x0CC,
238 .CORE_TESTBUS_SEL2_BIT = 4,
239 .CORE_TESTBUS_ENA = (1 << 3),
240 .CORE_TESTBUS_SEL2 = (1 << 4),
241 .CORE_PWRCTL_STATUS = 0xDC,
242 .CORE_PWRCTL_MASK = 0xE0,
243 .CORE_PWRCTL_CLEAR = 0xE4,
244 .CORE_PWRCTL_CTL = 0xE8,
245 .CORE_SDCC_DEBUG_REG = 0x124,
246 .CORE_DLL_CONFIG = 0x100,
247 .CORE_DLL_STATUS = 0x108,
248 .CORE_VENDOR_SPEC = 0x10C,
249 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
251 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
252 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
253 .CORE_DDR_200_CFG = 0x184,
254 .CORE_VENDOR_SPEC3 = 0x1B0,
255 .CORE_DLL_CONFIG_2 = 0x1B4,
256 .CORE_DDR_CONFIG = 0x1B8,
257 .CORE_DDR_CONFIG_2 = 0x1BC,
258};
259
260u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
264 void __iomem *base_addr;
265
266 if (msm_host->mci_removed)
267 base_addr = host->ioaddr;
268 else
269 base_addr = msm_host->core_mem;
270
271 return readb_relaxed(base_addr + offset);
272}
273
274u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
275{
276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
278 void __iomem *base_addr;
279
280 if (msm_host->mci_removed)
281 base_addr = host->ioaddr;
282 else
283 base_addr = msm_host->core_mem;
284
285 return readl_relaxed(base_addr + offset);
286}
287
288void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
289{
290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
292 void __iomem *base_addr;
293
294 if (msm_host->mci_removed)
295 base_addr = host->ioaddr;
296 else
297 base_addr = msm_host->core_mem;
298
299 writeb_relaxed(val, base_addr + offset);
300}
301
302void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
303{
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
306 void __iomem *base_addr;
307
308 if (msm_host->mci_removed)
309 base_addr = host->ioaddr;
310 else
311 base_addr = msm_host->core_mem;
312
313 writel_relaxed(val, base_addr + offset);
314}
315
Ritesh Harjani82124772014-11-04 15:34:00 +0530316/* Timeout value to avoid infinite waiting for pwr_irq */
317#define MSM_PWR_IRQ_TIMEOUT_MS 5000
318
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700319static const u32 tuning_block_64[] = {
320 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
321 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
322 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
323 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
324};
325
326static const u32 tuning_block_128[] = {
327 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
328 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
329 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
330 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
331 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
332 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
333 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
334 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
335};
Asutosh Das0ef24812012-12-18 16:14:02 +0530336
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700337/* global to hold each slot instance for debug */
338static struct sdhci_msm_host *sdhci_slot[2];
339
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700340static int disable_slots;
341/* root can write, others read */
342module_param(disable_slots, int, S_IRUGO|S_IWUSR);
343
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530344static bool nocmdq;
345module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
346
Asutosh Das0ef24812012-12-18 16:14:02 +0530347enum vdd_io_level {
348 /* set vdd_io_data->low_vol_level */
349 VDD_IO_LOW,
350 /* set vdd_io_data->high_vol_level */
351 VDD_IO_HIGH,
352 /*
353 * set whatever there in voltage_level (third argument) of
354 * sdhci_msm_set_vdd_io_vol() function.
355 */
356 VDD_IO_SET_LEVEL,
357};
358
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700359/* MSM platform specific tuning */
360static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
361 u8 poll)
362{
363 int rc = 0;
364 u32 wait_cnt = 50;
365 u8 ck_out_en = 0;
366 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530367 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
368 struct sdhci_msm_host *msm_host = pltfm_host->priv;
369 const struct sdhci_msm_offset *msm_host_offset =
370 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700371
372 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530373 ck_out_en = !!(readl_relaxed(host->ioaddr +
374 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700375
376 while (ck_out_en != poll) {
377 if (--wait_cnt == 0) {
378 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
379 mmc_hostname(mmc), __func__, poll);
380 rc = -ETIMEDOUT;
381 goto out;
382 }
383 udelay(1);
384
385 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530386 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700387 }
388out:
389 return rc;
390}
391
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530392/*
393 * Enable CDR to track changes of DAT lines and adjust sampling
394 * point according to voltage/temperature variations
395 */
396static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
397{
398 int rc = 0;
399 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530400 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
401 struct sdhci_msm_host *msm_host = pltfm_host->priv;
402 const struct sdhci_msm_offset *msm_host_offset =
403 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530404
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530405 config = readl_relaxed(host->ioaddr +
406 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530407 config |= CORE_CDR_EN;
408 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530409 writel_relaxed(config, host->ioaddr +
410 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530411
412 rc = msm_dll_poll_ck_out_en(host, 0);
413 if (rc)
414 goto err;
415
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530416 writel_relaxed((readl_relaxed(host->ioaddr +
417 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
418 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530419
420 rc = msm_dll_poll_ck_out_en(host, 1);
421 if (rc)
422 goto err;
423 goto out;
424err:
425 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
426out:
427 return rc;
428}
429
430static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
431 *attr, const char *buf, size_t count)
432{
433 struct sdhci_host *host = dev_get_drvdata(dev);
434 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
435 struct sdhci_msm_host *msm_host = pltfm_host->priv;
436 u32 tmp;
437 unsigned long flags;
438
439 if (!kstrtou32(buf, 0, &tmp)) {
440 spin_lock_irqsave(&host->lock, flags);
441 msm_host->en_auto_cmd21 = !!tmp;
442 spin_unlock_irqrestore(&host->lock, flags);
443 }
444 return count;
445}
446
447static ssize_t show_auto_cmd21(struct device *dev,
448 struct device_attribute *attr, char *buf)
449{
450 struct sdhci_host *host = dev_get_drvdata(dev);
451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
452 struct sdhci_msm_host *msm_host = pltfm_host->priv;
453
454 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
455}
456
457/* MSM auto-tuning handler */
458static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
459 bool enable,
460 u32 type)
461{
462 int rc = 0;
463 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
464 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530465 const struct sdhci_msm_offset *msm_host_offset =
466 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530467 u32 val = 0;
468
469 if (!msm_host->en_auto_cmd21)
470 return 0;
471
472 if (type == MMC_SEND_TUNING_BLOCK_HS200)
473 val = CORE_HC_AUTO_CMD21_EN;
474 else
475 return 0;
476
477 if (enable) {
478 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530479 writel_relaxed(readl_relaxed(host->ioaddr +
480 msm_host_offset->CORE_VENDOR_SPEC) | val,
481 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530482 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530483 writel_relaxed(readl_relaxed(host->ioaddr +
484 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
485 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530486 }
487 return rc;
488}
489
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700490static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
491{
492 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530493 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
494 struct sdhci_msm_host *msm_host = pltfm_host->priv;
495 const struct sdhci_msm_offset *msm_host_offset =
496 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700497 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
498 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
499 0x8};
500 unsigned long flags;
501 u32 config;
502 struct mmc_host *mmc = host->mmc;
503
504 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
505 spin_lock_irqsave(&host->lock, flags);
506
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530507 config = readl_relaxed(host->ioaddr +
508 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700509 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
510 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530511 writel_relaxed(config, host->ioaddr +
512 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700513
514 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
515 rc = msm_dll_poll_ck_out_en(host, 0);
516 if (rc)
517 goto err_out;
518
519 /*
520 * Write the selected DLL clock output phase (0 ... 15)
521 * to CDR_SELEXT bit field of DLL_CONFIG register.
522 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530523 writel_relaxed(((readl_relaxed(host->ioaddr +
524 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700525 & ~(0xF << 20))
526 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530527 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700528
529 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530530 writel_relaxed((readl_relaxed(host->ioaddr +
531 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
532 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700533
534 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
535 rc = msm_dll_poll_ck_out_en(host, 1);
536 if (rc)
537 goto err_out;
538
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530539 config = readl_relaxed(host->ioaddr +
540 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700541 config |= CORE_CDR_EN;
542 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530543 writel_relaxed(config, host->ioaddr +
544 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700545 goto out;
546
547err_out:
548 pr_err("%s: %s: Failed to set DLL phase: %d\n",
549 mmc_hostname(mmc), __func__, phase);
550out:
551 spin_unlock_irqrestore(&host->lock, flags);
552 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
553 return rc;
554}
555
556/*
557 * Find out the greatest range of consecuitive selected
558 * DLL clock output phases that can be used as sampling
559 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700560 * timing mode) or for eMMC4.5 card read operation (in
561 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700562 * Select the 3/4 of the range and configure the DLL with the
563 * selected DLL clock output phase.
564 */
565
566static int msm_find_most_appropriate_phase(struct sdhci_host *host,
567 u8 *phase_table, u8 total_phases)
568{
569 int ret;
570 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
571 u8 phases_per_row[MAX_PHASES] = {0};
572 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
573 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
574 bool phase_0_found = false, phase_15_found = false;
575 struct mmc_host *mmc = host->mmc;
576
577 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
578 if (!total_phases || (total_phases > MAX_PHASES)) {
579 pr_err("%s: %s: invalid argument: total_phases=%d\n",
580 mmc_hostname(mmc), __func__, total_phases);
581 return -EINVAL;
582 }
583
584 for (cnt = 0; cnt < total_phases; cnt++) {
585 ranges[row_index][col_index] = phase_table[cnt];
586 phases_per_row[row_index] += 1;
587 col_index++;
588
589 if ((cnt + 1) == total_phases) {
590 continue;
591 /* check if next phase in phase_table is consecutive or not */
592 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
593 row_index++;
594 col_index = 0;
595 }
596 }
597
598 if (row_index >= MAX_PHASES)
599 return -EINVAL;
600
601 /* Check if phase-0 is present in first valid window? */
602 if (!ranges[0][0]) {
603 phase_0_found = true;
604 phase_0_raw_index = 0;
605 /* Check if cycle exist between 2 valid windows */
606 for (cnt = 1; cnt <= row_index; cnt++) {
607 if (phases_per_row[cnt]) {
608 for (i = 0; i < phases_per_row[cnt]; i++) {
609 if (ranges[cnt][i] == 15) {
610 phase_15_found = true;
611 phase_15_raw_index = cnt;
612 break;
613 }
614 }
615 }
616 }
617 }
618
619 /* If 2 valid windows form cycle then merge them as single window */
620 if (phase_0_found && phase_15_found) {
621 /* number of phases in raw where phase 0 is present */
622 u8 phases_0 = phases_per_row[phase_0_raw_index];
623 /* number of phases in raw where phase 15 is present */
624 u8 phases_15 = phases_per_row[phase_15_raw_index];
625
626 if (phases_0 + phases_15 >= MAX_PHASES)
627 /*
628 * If there are more than 1 phase windows then total
629 * number of phases in both the windows should not be
630 * more than or equal to MAX_PHASES.
631 */
632 return -EINVAL;
633
634 /* Merge 2 cyclic windows */
635 i = phases_15;
636 for (cnt = 0; cnt < phases_0; cnt++) {
637 ranges[phase_15_raw_index][i] =
638 ranges[phase_0_raw_index][cnt];
639 if (++i >= MAX_PHASES)
640 break;
641 }
642
643 phases_per_row[phase_0_raw_index] = 0;
644 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
645 }
646
647 for (cnt = 0; cnt <= row_index; cnt++) {
648 if (phases_per_row[cnt] > curr_max) {
649 curr_max = phases_per_row[cnt];
650 selected_row_index = cnt;
651 }
652 }
653
654 i = ((curr_max * 3) / 4);
655 if (i)
656 i--;
657
658 ret = (int)ranges[selected_row_index][i];
659
660 if (ret >= MAX_PHASES) {
661 ret = -EINVAL;
662 pr_err("%s: %s: invalid phase selected=%d\n",
663 mmc_hostname(mmc), __func__, ret);
664 }
665
666 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
667 return ret;
668}
669
670static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
671{
672 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530673 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
674 struct sdhci_msm_host *msm_host = pltfm_host->priv;
675 const struct sdhci_msm_offset *msm_host_offset =
676 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700677
678 /* Program the MCLK value to MCLK_FREQ bit field */
679 if (host->clock <= 112000000)
680 mclk_freq = 0;
681 else if (host->clock <= 125000000)
682 mclk_freq = 1;
683 else if (host->clock <= 137000000)
684 mclk_freq = 2;
685 else if (host->clock <= 150000000)
686 mclk_freq = 3;
687 else if (host->clock <= 162000000)
688 mclk_freq = 4;
689 else if (host->clock <= 175000000)
690 mclk_freq = 5;
691 else if (host->clock <= 187000000)
692 mclk_freq = 6;
693 else if (host->clock <= 200000000)
694 mclk_freq = 7;
695
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530696 writel_relaxed(((readl_relaxed(host->ioaddr +
697 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700698 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530699 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700700}
701
702/* Initialize the DLL (Programmable Delay Line ) */
703static int msm_init_cm_dll(struct sdhci_host *host)
704{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800705 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
706 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530707 const struct sdhci_msm_offset *msm_host_offset =
708 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700709 struct mmc_host *mmc = host->mmc;
710 int rc = 0;
711 unsigned long flags;
712 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530713 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700714
715 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
716 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530717 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
718 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530719 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700720 /*
721 * Make sure that clock is always enabled when DLL
722 * tuning is in progress. Keeping PWRSAVE ON may
723 * turn off the clock. So let's disable the PWRSAVE
724 * here and re-enable it once tuning is completed.
725 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530726 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530727 writel_relaxed((readl_relaxed(host->ioaddr +
728 msm_host_offset->CORE_VENDOR_SPEC)
729 & ~CORE_CLK_PWRSAVE), host->ioaddr +
730 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530731 curr_pwrsave = false;
732 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700733
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800734 if (msm_host->use_updated_dll_reset) {
735 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530736 writel_relaxed((readl_relaxed(host->ioaddr +
737 msm_host_offset->CORE_DLL_CONFIG)
738 & ~CORE_CK_OUT_EN), host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800740
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530741 writel_relaxed((readl_relaxed(host->ioaddr +
742 msm_host_offset->CORE_DLL_CONFIG_2)
743 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800745 }
746
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700747 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530748 writel_relaxed((readl_relaxed(host->ioaddr +
749 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
750 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700751
752 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530753 writel_relaxed((readl_relaxed(host->ioaddr +
754 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
755 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700756 msm_cm_dll_set_freq(host);
757
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800758 if (msm_host->use_updated_dll_reset) {
759 u32 mclk_freq = 0;
760
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530761 if ((readl_relaxed(host->ioaddr +
762 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800763 & CORE_FLL_CYCLE_CNT))
764 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
765 else
766 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
767
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530768 writel_relaxed(((readl_relaxed(host->ioaddr +
769 msm_host_offset->CORE_DLL_CONFIG_2)
770 & ~(0xFF << 10)) | (mclk_freq << 10)),
771 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800772 /* wait for 5us before enabling DLL clock */
773 udelay(5);
774 }
775
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700776 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530777 writel_relaxed((readl_relaxed(host->ioaddr +
778 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
779 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700780
781 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530782 writel_relaxed((readl_relaxed(host->ioaddr +
783 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
784 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700785
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800786 if (msm_host->use_updated_dll_reset) {
787 msm_cm_dll_set_freq(host);
788 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530789 writel_relaxed((readl_relaxed(host->ioaddr +
790 msm_host_offset->CORE_DLL_CONFIG_2)
791 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800793 }
794
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700795 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530796 writel_relaxed((readl_relaxed(host->ioaddr +
797 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
798 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700799
800 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530801 writel_relaxed((readl_relaxed(host->ioaddr +
802 msm_host_offset->CORE_DLL_CONFIG)
803 | CORE_CK_OUT_EN), host->ioaddr +
804 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700805
806 wait_cnt = 50;
807 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530808 while (!(readl_relaxed(host->ioaddr +
809 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700810 /* max. wait for 50us sec for LOCK bit to be set */
811 if (--wait_cnt == 0) {
812 pr_err("%s: %s: DLL failed to LOCK\n",
813 mmc_hostname(mmc), __func__);
814 rc = -ETIMEDOUT;
815 goto out;
816 }
817 /* wait for 1us before polling again */
818 udelay(1);
819 }
820
821out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530822 /* Restore the correct PWRSAVE state */
823 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530824 u32 reg = readl_relaxed(host->ioaddr +
825 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530826
827 if (prev_pwrsave)
828 reg |= CORE_CLK_PWRSAVE;
829 else
830 reg &= ~CORE_CLK_PWRSAVE;
831
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530832 writel_relaxed(reg, host->ioaddr +
833 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530834 }
835
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700836 spin_unlock_irqrestore(&host->lock, flags);
837 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
838 return rc;
839}
840
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700841static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
842{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700843 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700844 int ret = 0;
845 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530846 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
847 struct sdhci_msm_host *msm_host = pltfm_host->priv;
848 const struct sdhci_msm_offset *msm_host_offset =
849 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700850
851 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
852
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700853 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530854 writel_relaxed((readl_relaxed(host->ioaddr +
855 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700856 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530857 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700858
859 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
860 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
861 & ~CORE_CDC_SWITCH_BYPASS_OFF),
862 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
863
864 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
865 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
866 | CORE_CDC_SWITCH_RC_EN),
867 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
868
869 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530870 writel_relaxed((readl_relaxed(host->ioaddr +
871 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700872 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530873 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874
875 /*
876 * Perform CDC Register Initialization Sequence
877 *
878 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
879 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
880 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
881 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
882 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
883 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
884 * CORE_CSR_CDC_DELAY_CFG 0x3AC
885 * CORE_CDC_OFFSET_CFG 0x0
886 * CORE_CDC_SLAVE_DDA_CFG 0x16334
887 */
888
889 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
890 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
891 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
892 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
893 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
894 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700895 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700896 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
897 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
898
899 /* CDC HW Calibration */
900
901 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
902 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
903 | CORE_SW_TRIG_FULL_CALIB),
904 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
905
906 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
907 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
908 & ~CORE_SW_TRIG_FULL_CALIB),
909 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
910
911 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
912 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
913 | CORE_HW_AUTOCAL_ENA),
914 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
915
916 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
917 writel_relaxed((readl_relaxed(host->ioaddr +
918 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
919 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
920
921 mb();
922
923 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700924 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
925 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
926
927 if (ret == -ETIMEDOUT) {
928 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700929 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700930 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700931 }
932
933 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
934 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
935 & CORE_CDC_ERROR_CODE_MASK;
936 if (cdc_err) {
937 pr_err("%s: %s: CDC Error Code %d\n",
938 mmc_hostname(host->mmc), __func__, cdc_err);
939 ret = -EINVAL;
940 goto out;
941 }
942
943 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530944 writel_relaxed((readl_relaxed(host->ioaddr +
945 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700946 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530947 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700948out:
949 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
950 __func__, ret);
951 return ret;
952}
953
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700954static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
955{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530956 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
957 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530958 const struct sdhci_msm_offset *msm_host_offset =
959 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530960 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700961 int ret = 0;
962
963 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
964
965 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530966 * Reprogramming the value in case it might have been modified by
967 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700968 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700969 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530970 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
971 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700972 } else {
973 ddr_config = DDR_CONFIG_POR_VAL &
974 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
975 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530976 writel_relaxed(ddr_config, host->ioaddr +
977 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700978 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700979
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530980 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530981 writel_relaxed((readl_relaxed(host->ioaddr +
982 msm_host_offset->CORE_DDR_200_CFG)
983 | CORE_CMDIN_RCLK_EN), host->ioaddr +
984 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530985
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700986 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 writel_relaxed((readl_relaxed(host->ioaddr +
988 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700989 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530990 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700991
992 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530993 ret = readl_poll_timeout(host->ioaddr +
994 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700995 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
996
997 if (ret == -ETIMEDOUT) {
998 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
999 mmc_hostname(host->mmc), __func__);
1000 goto out;
1001 }
1002
Ritesh Harjani764065e2015-05-13 14:14:45 +05301003 /*
1004 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1005 * when MCLK is gated OFF, it is not gated for less than 0.5us
1006 * and MCLK must be switched on for at-least 1us before DATA
1007 * starts coming. Controllers with 14lpp tech DLL cannot
1008 * guarantee above requirement. So PWRSAVE_DLL should not be
1009 * turned on for host controllers using this DLL.
1010 */
1011 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301012 writel_relaxed((readl_relaxed(host->ioaddr +
1013 msm_host_offset->CORE_VENDOR_SPEC3)
1014 | CORE_PWRSAVE_DLL), host->ioaddr +
1015 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001016 mb();
1017out:
1018 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1019 __func__, ret);
1020 return ret;
1021}
1022
Ritesh Harjaniea709662015-05-27 15:40:24 +05301023static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1024{
1025 int ret = 0;
1026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1027 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1028 struct mmc_host *mmc = host->mmc;
1029
1030 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1031
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301032 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1033 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301034 mmc_hostname(mmc));
1035 return -EINVAL;
1036 }
1037
1038 if (msm_host->calibration_done ||
1039 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1040 return 0;
1041 }
1042
1043 /*
1044 * Reset the tuning block.
1045 */
1046 ret = msm_init_cm_dll(host);
1047 if (ret)
1048 goto out;
1049
1050 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1051out:
1052 if (!ret)
1053 msm_host->calibration_done = true;
1054 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1055 __func__, ret);
1056 return ret;
1057}
1058
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001059static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1060{
1061 int ret = 0;
1062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1063 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301064 const struct sdhci_msm_offset *msm_host_offset =
1065 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001066
1067 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1068
1069 /*
1070 * Retuning in HS400 (DDR mode) will fail, just reset the
1071 * tuning block and restore the saved tuning phase.
1072 */
1073 ret = msm_init_cm_dll(host);
1074 if (ret)
1075 goto out;
1076
1077 /* Set the selected phase in delay line hw block */
1078 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1079 if (ret)
1080 goto out;
1081
Krishna Konda0e8efba2014-06-23 14:50:38 -07001082 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301083 writel_relaxed((readl_relaxed(host->ioaddr +
1084 msm_host_offset->CORE_DLL_CONFIG)
1085 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1086 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001087
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001088 if (msm_host->use_cdclp533)
1089 /* Calibrate CDCLP533 DLL HW */
1090 ret = sdhci_msm_cdclp533_calibration(host);
1091 else
1092 /* Calibrate CM_DLL_SDC4 HW */
1093 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1094out:
1095 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1096 __func__, ret);
1097 return ret;
1098}
1099
Krishna Konda96e6b112013-10-28 15:25:03 -07001100static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1101 u8 drv_type)
1102{
1103 struct mmc_command cmd = {0};
1104 struct mmc_request mrq = {NULL};
1105 struct mmc_host *mmc = host->mmc;
1106 u8 val = ((drv_type << 4) | 2);
1107
1108 cmd.opcode = MMC_SWITCH;
1109 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1110 (EXT_CSD_HS_TIMING << 16) |
1111 (val << 8) |
1112 EXT_CSD_CMD_SET_NORMAL;
1113 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1114 /* 1 sec */
1115 cmd.busy_timeout = 1000 * 1000;
1116
1117 memset(cmd.resp, 0, sizeof(cmd.resp));
1118 cmd.retries = 3;
1119
1120 mrq.cmd = &cmd;
1121 cmd.data = NULL;
1122
1123 mmc_wait_for_req(mmc, &mrq);
1124 pr_debug("%s: %s: set card drive type to %d\n",
1125 mmc_hostname(mmc), __func__,
1126 drv_type);
1127}
1128
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001129int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1130{
1131 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301132 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001133 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001134 const u32 *tuning_block_pattern = tuning_block_64;
1135 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1136 int rc;
1137 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301138 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1140 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001141 u8 drv_type = 0;
1142 bool drv_type_changed = false;
1143 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301144 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301145
1146 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001147 * Tuning is required for SDR104, HS200 and HS400 cards and
1148 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301149 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001150 if (host->clock <= CORE_FREQ_100MHZ ||
1151 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1152 (ios.timing == MMC_TIMING_MMC_HS200) ||
1153 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301154 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001155
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301156 /*
1157 * Don't allow re-tuning for CRC errors observed for any commands
1158 * that are sent during tuning sequence itself.
1159 */
1160 if (msm_host->tuning_in_progress)
1161 return 0;
1162 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001163 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001164
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001165 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001166 if (msm_host->tuning_done && !msm_host->calibration_done &&
1167 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001168 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001169 spin_lock_irqsave(&host->lock, flags);
1170 if (!rc)
1171 msm_host->calibration_done = true;
1172 spin_unlock_irqrestore(&host->lock, flags);
1173 goto out;
1174 }
1175
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001176 spin_lock_irqsave(&host->lock, flags);
1177
1178 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1179 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1180 tuning_block_pattern = tuning_block_128;
1181 size = sizeof(tuning_block_128);
1182 }
1183 spin_unlock_irqrestore(&host->lock, flags);
1184
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001185 data_buf = kmalloc(size, GFP_KERNEL);
1186 if (!data_buf) {
1187 rc = -ENOMEM;
1188 goto out;
1189 }
1190
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301191retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001192 tuned_phase_cnt = 0;
1193
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301194 /* first of all reset the tuning block */
1195 rc = msm_init_cm_dll(host);
1196 if (rc)
1197 goto kfree;
1198
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001199 phase = 0;
1200 do {
1201 struct mmc_command cmd = {0};
1202 struct mmc_data data = {0};
1203 struct mmc_request mrq = {
1204 .cmd = &cmd,
1205 .data = &data
1206 };
1207 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301208 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001209
1210 /* set the phase in delay line hw block */
1211 rc = msm_config_cm_dll_phase(host, phase);
1212 if (rc)
1213 goto kfree;
1214
1215 cmd.opcode = opcode;
1216 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1217
1218 data.blksz = size;
1219 data.blocks = 1;
1220 data.flags = MMC_DATA_READ;
1221 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1222
1223 data.sg = &sg;
1224 data.sg_len = 1;
1225 sg_init_one(&sg, data_buf, size);
1226 memset(data_buf, 0, size);
1227 mmc_wait_for_req(mmc, &mrq);
1228
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301229 if (card && (cmd.error || data.error)) {
1230 sts_cmd.opcode = MMC_SEND_STATUS;
1231 sts_cmd.arg = card->rca << 16;
1232 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1233 sts_retry = 5;
1234 while (sts_retry) {
1235 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1236
1237 if (sts_cmd.error ||
1238 (R1_CURRENT_STATE(sts_cmd.resp[0])
1239 != R1_STATE_TRAN)) {
1240 sts_retry--;
1241 /*
1242 * wait for at least 146 MCLK cycles for
1243 * the card to move to TRANS state. As
1244 * the MCLK would be min 200MHz for
1245 * tuning, we need max 0.73us delay. To
1246 * be on safer side 1ms delay is given.
1247 */
1248 usleep_range(1000, 1200);
1249 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1250 mmc_hostname(mmc), phase,
1251 sts_cmd.error, sts_cmd.resp[0]);
1252 continue;
1253 }
1254 break;
1255 };
1256 }
1257
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001258 if (!cmd.error && !data.error &&
1259 !memcmp(data_buf, tuning_block_pattern, size)) {
1260 /* tuning is successful at this tuning point */
1261 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001262 pr_debug("%s: %s: found *** good *** phase = %d\n",
1263 mmc_hostname(mmc), __func__, phase);
1264 } else {
1265 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001266 mmc_hostname(mmc), __func__, phase);
1267 }
1268 } while (++phase < 16);
1269
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301270 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1271 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001272 /*
1273 * If all phases pass then its a problem. So change the card's
1274 * drive type to a different value, if supported and repeat
1275 * tuning until at least one phase fails. Then set the original
1276 * drive type back.
1277 *
1278 * If all the phases still pass after trying all possible
1279 * drive types, then one of those 16 phases will be picked.
1280 * This is no different from what was going on before the
1281 * modification to change drive type and retune.
1282 */
1283 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1284 tuned_phase_cnt);
1285
1286 /* set drive type to other value . default setting is 0x0 */
1287 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001288 pr_debug("%s: trying different drive strength (%d)\n",
1289 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001290 if (card->ext_csd.raw_driver_strength &
1291 (1 << drv_type)) {
1292 sdhci_msm_set_mmc_drv_type(host, opcode,
1293 drv_type);
1294 if (!drv_type_changed)
1295 drv_type_changed = true;
1296 goto retry;
1297 }
1298 }
1299 }
1300
1301 /* reset drive type to default (50 ohm) if changed */
1302 if (drv_type_changed)
1303 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1304
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001305 if (tuned_phase_cnt) {
1306 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1307 tuned_phase_cnt);
1308 if (rc < 0)
1309 goto kfree;
1310 else
1311 phase = (u8)rc;
1312
1313 /*
1314 * Finally set the selected phase in delay
1315 * line hw block.
1316 */
1317 rc = msm_config_cm_dll_phase(host, phase);
1318 if (rc)
1319 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001320 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001321 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1322 mmc_hostname(mmc), __func__, phase);
1323 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301324 if (--tuning_seq_cnt)
1325 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001326 /* tuning failed */
1327 pr_err("%s: %s: no tuning point found\n",
1328 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301329 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001330 }
1331
1332kfree:
1333 kfree(data_buf);
1334out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001335 spin_lock_irqsave(&host->lock, flags);
1336 if (!rc)
1337 msm_host->tuning_done = true;
1338 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301339 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001340 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001341 return rc;
1342}
1343
Asutosh Das0ef24812012-12-18 16:14:02 +05301344static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1345{
1346 struct sdhci_msm_gpio_data *curr;
1347 int i, ret = 0;
1348
1349 curr = pdata->pin_data->gpio_data;
1350 for (i = 0; i < curr->size; i++) {
1351 if (!gpio_is_valid(curr->gpio[i].no)) {
1352 ret = -EINVAL;
1353 pr_err("%s: Invalid gpio = %d\n", __func__,
1354 curr->gpio[i].no);
1355 goto free_gpios;
1356 }
1357 if (enable) {
1358 ret = gpio_request(curr->gpio[i].no,
1359 curr->gpio[i].name);
1360 if (ret) {
1361 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1362 __func__, curr->gpio[i].no,
1363 curr->gpio[i].name, ret);
1364 goto free_gpios;
1365 }
1366 curr->gpio[i].is_enabled = true;
1367 } else {
1368 gpio_free(curr->gpio[i].no);
1369 curr->gpio[i].is_enabled = false;
1370 }
1371 }
1372 return ret;
1373
1374free_gpios:
1375 for (i--; i >= 0; i--) {
1376 gpio_free(curr->gpio[i].no);
1377 curr->gpio[i].is_enabled = false;
1378 }
1379 return ret;
1380}
1381
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301382static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1383 bool enable)
1384{
1385 int ret = 0;
1386
1387 if (enable)
1388 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1389 pdata->pctrl_data->pins_active);
1390 else
1391 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1392 pdata->pctrl_data->pins_sleep);
1393
1394 if (ret < 0)
1395 pr_err("%s state for pinctrl failed with %d\n",
1396 enable ? "Enabling" : "Disabling", ret);
1397
1398 return ret;
1399}
1400
Asutosh Das0ef24812012-12-18 16:14:02 +05301401static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1402{
1403 int ret = 0;
1404
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301405 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301406 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301407 } else if (pdata->pctrl_data) {
1408 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1409 goto out;
1410 } else if (!pdata->pin_data) {
1411 return 0;
1412 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301413
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301414 if (pdata->pin_data->is_gpio)
1415 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301416out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301417 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301418 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301419
1420 return ret;
1421}
1422
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301423static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1424 u32 **out, int *len, u32 size)
1425{
1426 int ret = 0;
1427 struct device_node *np = dev->of_node;
1428 size_t sz;
1429 u32 *arr = NULL;
1430
1431 if (!of_get_property(np, prop_name, len)) {
1432 ret = -EINVAL;
1433 goto out;
1434 }
1435 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001436 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301437 dev_err(dev, "%s invalid size\n", prop_name);
1438 ret = -EINVAL;
1439 goto out;
1440 }
1441
1442 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1443 if (!arr) {
1444 dev_err(dev, "%s failed allocating memory\n", prop_name);
1445 ret = -ENOMEM;
1446 goto out;
1447 }
1448
1449 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1450 if (ret < 0) {
1451 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1452 goto out;
1453 }
1454 *out = arr;
1455out:
1456 if (ret)
1457 *len = 0;
1458 return ret;
1459}
1460
Asutosh Das0ef24812012-12-18 16:14:02 +05301461#define MAX_PROP_SIZE 32
1462static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1463 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1464{
1465 int len, ret = 0;
1466 const __be32 *prop;
1467 char prop_name[MAX_PROP_SIZE];
1468 struct sdhci_msm_reg_data *vreg;
1469 struct device_node *np = dev->of_node;
1470
1471 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1472 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301473 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301474 return ret;
1475 }
1476
1477 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1478 if (!vreg) {
1479 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1480 ret = -ENOMEM;
1481 return ret;
1482 }
1483
1484 vreg->name = vreg_name;
1485
1486 snprintf(prop_name, MAX_PROP_SIZE,
1487 "qcom,%s-always-on", vreg_name);
1488 if (of_get_property(np, prop_name, NULL))
1489 vreg->is_always_on = true;
1490
1491 snprintf(prop_name, MAX_PROP_SIZE,
1492 "qcom,%s-lpm-sup", vreg_name);
1493 if (of_get_property(np, prop_name, NULL))
1494 vreg->lpm_sup = true;
1495
1496 snprintf(prop_name, MAX_PROP_SIZE,
1497 "qcom,%s-voltage-level", vreg_name);
1498 prop = of_get_property(np, prop_name, &len);
1499 if (!prop || (len != (2 * sizeof(__be32)))) {
1500 dev_warn(dev, "%s %s property\n",
1501 prop ? "invalid format" : "no", prop_name);
1502 } else {
1503 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1504 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1505 }
1506
1507 snprintf(prop_name, MAX_PROP_SIZE,
1508 "qcom,%s-current-level", vreg_name);
1509 prop = of_get_property(np, prop_name, &len);
1510 if (!prop || (len != (2 * sizeof(__be32)))) {
1511 dev_warn(dev, "%s %s property\n",
1512 prop ? "invalid format" : "no", prop_name);
1513 } else {
1514 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1515 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1516 }
1517
1518 *vreg_data = vreg;
1519 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1520 vreg->name, vreg->is_always_on ? "always_on," : "",
1521 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1522 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1523
1524 return ret;
1525}
1526
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301527static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1528 struct sdhci_msm_pltfm_data *pdata)
1529{
1530 struct sdhci_pinctrl_data *pctrl_data;
1531 struct pinctrl *pctrl;
1532 int ret = 0;
1533
1534 /* Try to obtain pinctrl handle */
1535 pctrl = devm_pinctrl_get(dev);
1536 if (IS_ERR(pctrl)) {
1537 ret = PTR_ERR(pctrl);
1538 goto out;
1539 }
1540 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1541 if (!pctrl_data) {
1542 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1543 ret = -ENOMEM;
1544 goto out;
1545 }
1546 pctrl_data->pctrl = pctrl;
1547 /* Look-up and keep the states handy to be used later */
1548 pctrl_data->pins_active = pinctrl_lookup_state(
1549 pctrl_data->pctrl, "active");
1550 if (IS_ERR(pctrl_data->pins_active)) {
1551 ret = PTR_ERR(pctrl_data->pins_active);
1552 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1553 goto out;
1554 }
1555 pctrl_data->pins_sleep = pinctrl_lookup_state(
1556 pctrl_data->pctrl, "sleep");
1557 if (IS_ERR(pctrl_data->pins_sleep)) {
1558 ret = PTR_ERR(pctrl_data->pins_sleep);
1559 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1560 goto out;
1561 }
1562 pdata->pctrl_data = pctrl_data;
1563out:
1564 return ret;
1565}
1566
Asutosh Das0ef24812012-12-18 16:14:02 +05301567#define GPIO_NAME_MAX_LEN 32
1568static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1569 struct sdhci_msm_pltfm_data *pdata)
1570{
1571 int ret = 0, cnt, i;
1572 struct sdhci_msm_pin_data *pin_data;
1573 struct device_node *np = dev->of_node;
1574
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301575 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1576 if (!ret) {
1577 goto out;
1578 } else if (ret == -EPROBE_DEFER) {
1579 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1580 goto out;
1581 } else {
1582 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1583 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301584 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301585 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301586 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1587 if (!pin_data) {
1588 dev_err(dev, "No memory for pin_data\n");
1589 ret = -ENOMEM;
1590 goto out;
1591 }
1592
1593 cnt = of_gpio_count(np);
1594 if (cnt > 0) {
1595 pin_data->gpio_data = devm_kzalloc(dev,
1596 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1597 if (!pin_data->gpio_data) {
1598 dev_err(dev, "No memory for gpio_data\n");
1599 ret = -ENOMEM;
1600 goto out;
1601 }
1602 pin_data->gpio_data->size = cnt;
1603 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1604 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1605
1606 if (!pin_data->gpio_data->gpio) {
1607 dev_err(dev, "No memory for gpio\n");
1608 ret = -ENOMEM;
1609 goto out;
1610 }
1611
1612 for (i = 0; i < cnt; i++) {
1613 const char *name = NULL;
1614 char result[GPIO_NAME_MAX_LEN];
1615 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1616 of_property_read_string_index(np,
1617 "qcom,gpio-names", i, &name);
1618
1619 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1620 dev_name(dev), name ? name : "?");
1621 pin_data->gpio_data->gpio[i].name = result;
1622 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1623 pin_data->gpio_data->gpio[i].name,
1624 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301625 }
1626 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301627 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301628out:
1629 if (ret)
1630 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1631 return ret;
1632}
1633
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001634#ifdef CONFIG_SMP
1635static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1636{
1637 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1638}
1639#else
1640static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1641#endif
1642
Gilad Bronerc788a672015-09-08 15:39:11 +03001643static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1644 struct sdhci_msm_pltfm_data *pdata)
1645{
1646 struct device_node *np = dev->of_node;
1647 const char *str;
1648 u32 cpu;
1649 int ret = 0;
1650 int i;
1651
1652 pdata->pm_qos_data.irq_valid = false;
1653 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1654 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1655 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001656 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001657 }
1658
1659 /* must specify cpu for "affine_cores" type */
1660 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1661 pdata->pm_qos_data.irq_cpu = -1;
1662 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1663 if (ret) {
1664 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1665 ret);
1666 goto out;
1667 }
1668 if (cpu < 0 || cpu >= num_possible_cpus()) {
1669 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1670 __func__, cpu, num_possible_cpus());
1671 ret = -EINVAL;
1672 goto out;
1673 }
1674 pdata->pm_qos_data.irq_cpu = cpu;
1675 }
1676
1677 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1678 SDHCI_POWER_POLICY_NUM) {
1679 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1680 __func__, SDHCI_POWER_POLICY_NUM);
1681 ret = -EINVAL;
1682 goto out;
1683 }
1684
1685 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1686 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1687 &pdata->pm_qos_data.irq_latency.latency[i]);
1688
1689 pdata->pm_qos_data.irq_valid = true;
1690out:
1691 return ret;
1692}
1693
1694static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1695 struct sdhci_msm_pltfm_data *pdata)
1696{
1697 struct device_node *np = dev->of_node;
1698 u32 mask;
1699 int nr_groups;
1700 int ret;
1701 int i;
1702
1703 /* Read cpu group mapping */
1704 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1705 if (nr_groups <= 0) {
1706 ret = -EINVAL;
1707 goto out;
1708 }
1709 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1710 pdata->pm_qos_data.cpu_group_map.mask =
1711 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1712 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1713 ret = -ENOMEM;
1714 goto out;
1715 }
1716
1717 for (i = 0; i < nr_groups; i++) {
1718 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1719 i, &mask);
1720
1721 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1722 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1723 cpu_possible_mask)) {
1724 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1725 __func__, mask, i);
1726 ret = -EINVAL;
1727 goto free_res;
1728 }
1729 }
1730 return 0;
1731
1732free_res:
1733 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1734out:
1735 return ret;
1736}
1737
1738static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1739 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1740{
1741 struct device_node *np = dev->of_node;
1742 struct sdhci_msm_pm_qos_latency *values;
1743 int ret;
1744 int i;
1745 int group;
1746 int cfg;
1747
1748 ret = of_property_count_u32_elems(np, name);
1749 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1750 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1751 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1752 ret);
1753 return -EINVAL;
1754 } else if (ret < 0) {
1755 return ret;
1756 }
1757
1758 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1759 GFP_KERNEL);
1760 if (!values)
1761 return -ENOMEM;
1762
1763 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1764 group = i / SDHCI_POWER_POLICY_NUM;
1765 cfg = i % SDHCI_POWER_POLICY_NUM;
1766 of_property_read_u32_index(np, name, i,
1767 &(values[group].latency[cfg]));
1768 }
1769
1770 *latency = values;
1771 return 0;
1772}
1773
1774static void sdhci_msm_pm_qos_parse(struct device *dev,
1775 struct sdhci_msm_pltfm_data *pdata)
1776{
1777 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1778 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1779 __func__);
1780
1781 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1782 pdata->pm_qos_data.cmdq_valid =
1783 !sdhci_msm_pm_qos_parse_latency(dev,
1784 "qcom,pm-qos-cmdq-latency-us",
1785 pdata->pm_qos_data.cpu_group_map.nr_groups,
1786 &pdata->pm_qos_data.cmdq_latency);
1787 pdata->pm_qos_data.legacy_valid =
1788 !sdhci_msm_pm_qos_parse_latency(dev,
1789 "qcom,pm-qos-legacy-latency-us",
1790 pdata->pm_qos_data.cpu_group_map.nr_groups,
1791 &pdata->pm_qos_data.latency);
1792 if (!pdata->pm_qos_data.cmdq_valid &&
1793 !pdata->pm_qos_data.legacy_valid) {
1794 /* clean-up previously allocated arrays */
1795 kfree(pdata->pm_qos_data.latency);
1796 kfree(pdata->pm_qos_data.cmdq_latency);
1797 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1798 __func__);
1799 }
1800 } else {
1801 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1802 __func__);
1803 }
1804}
1805
Asutosh Das0ef24812012-12-18 16:14:02 +05301806/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001807static
1808struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1809 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301810{
1811 struct sdhci_msm_pltfm_data *pdata = NULL;
1812 struct device_node *np = dev->of_node;
1813 u32 bus_width = 0;
1814 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301815 int clk_table_len;
1816 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301817 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301818 const char *lower_bus_speed = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301819
1820 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1821 if (!pdata) {
1822 dev_err(dev, "failed to allocate memory for platform data\n");
1823 goto out;
1824 }
1825
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301826 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1827 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1828 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301829
Asutosh Das0ef24812012-12-18 16:14:02 +05301830 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1831 if (bus_width == 8)
1832 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1833 else if (bus_width == 4)
1834 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1835 else {
1836 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1837 pdata->mmc_bus_width = 0;
1838 }
1839
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001840 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301841 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1842 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001843 pr_debug("%s: no clock scaling frequencies were supplied\n",
1844 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301845 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1846 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1847 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001848
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301849 /*
1850 * Few hosts can support DDR52 mode at the same lower
1851 * system voltage corner as high-speed mode. In such cases,
1852 * it is always better to put it in DDR mode which will
1853 * improve the performance without any power impact.
1854 */
1855 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
1856 &lower_bus_speed)) {
1857 if (!strcmp(lower_bus_speed, "DDR52"))
1858 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
1859 MMC_SCALING_LOWER_DDR52_MODE;
1860 }
1861
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301862 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1863 &clk_table, &clk_table_len, 0)) {
1864 dev_err(dev, "failed parsing supported clock rates\n");
1865 goto out;
1866 }
1867 if (!clk_table || !clk_table_len) {
1868 dev_err(dev, "Invalid clock table\n");
1869 goto out;
1870 }
1871 pdata->sup_clk_table = clk_table;
1872 pdata->sup_clk_cnt = clk_table_len;
1873
Asutosh Das0ef24812012-12-18 16:14:02 +05301874 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1875 sdhci_msm_slot_reg_data),
1876 GFP_KERNEL);
1877 if (!pdata->vreg_data) {
1878 dev_err(dev, "failed to allocate memory for vreg data\n");
1879 goto out;
1880 }
1881
1882 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1883 "vdd")) {
1884 dev_err(dev, "failed parsing vdd data\n");
1885 goto out;
1886 }
1887 if (sdhci_msm_dt_parse_vreg_info(dev,
1888 &pdata->vreg_data->vdd_io_data,
1889 "vdd-io")) {
1890 dev_err(dev, "failed parsing vdd-io data\n");
1891 goto out;
1892 }
1893
1894 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1895 dev_err(dev, "failed parsing gpio data\n");
1896 goto out;
1897 }
1898
Asutosh Das0ef24812012-12-18 16:14:02 +05301899 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1900
1901 for (i = 0; i < len; i++) {
1902 const char *name = NULL;
1903
1904 of_property_read_string_index(np,
1905 "qcom,bus-speed-mode", i, &name);
1906 if (!name)
1907 continue;
1908
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001909 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1910 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1911 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1912 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1913 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301914 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1915 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1916 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1917 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1918 pdata->caps |= MMC_CAP_1_8V_DDR
1919 | MMC_CAP_UHS_DDR50;
1920 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1921 pdata->caps |= MMC_CAP_1_2V_DDR
1922 | MMC_CAP_UHS_DDR50;
1923 }
1924
1925 if (of_get_property(np, "qcom,nonremovable", NULL))
1926 pdata->nonremovable = true;
1927
Guoping Yuf7c91332014-08-20 16:56:18 +08001928 if (of_get_property(np, "qcom,nonhotplug", NULL))
1929 pdata->nonhotplug = true;
1930
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001931 pdata->largeaddressbus =
1932 of_property_read_bool(np, "qcom,large-address-bus");
1933
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001934 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1935 msm_host->mmc->wakeup_on_idle = true;
1936
Gilad Bronerc788a672015-09-08 15:39:11 +03001937 sdhci_msm_pm_qos_parse(dev, pdata);
1938
Pavan Anamula5a256df2015-10-16 14:38:28 +05301939 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05301940 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05301941
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001942 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
1943
Asutosh Das0ef24812012-12-18 16:14:02 +05301944 return pdata;
1945out:
1946 return NULL;
1947}
1948
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301949/* Returns required bandwidth in Bytes per Sec */
1950static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1951 struct mmc_ios *ios)
1952{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1955
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301956 unsigned int bw;
1957
Sahitya Tummala2886c922013-04-03 18:03:31 +05301958 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301959 /*
1960 * For DDR mode, SDCC controller clock will be at
1961 * the double rate than the actual clock that goes to card.
1962 */
1963 if (ios->bus_width == MMC_BUS_WIDTH_4)
1964 bw /= 2;
1965 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1966 bw /= 8;
1967
1968 return bw;
1969}
1970
1971static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1972 unsigned int bw)
1973{
1974 unsigned int *table = host->pdata->voting_data->bw_vecs;
1975 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1976 int i;
1977
1978 if (host->msm_bus_vote.is_max_bw_needed && bw)
1979 return host->msm_bus_vote.max_bw_vote;
1980
1981 for (i = 0; i < size; i++) {
1982 if (bw <= table[i])
1983 break;
1984 }
1985
1986 if (i && (i == size))
1987 i--;
1988
1989 return i;
1990}
1991
1992/*
1993 * This function must be called with host lock acquired.
1994 * Caller of this function should also ensure that msm bus client
1995 * handle is not null.
1996 */
1997static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1998 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301999 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302000{
2001 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2002 int rc = 0;
2003
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302004 BUG_ON(!flags);
2005
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302006 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302007 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302008 rc = msm_bus_scale_client_update_request(
2009 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302010 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302011 if (rc) {
2012 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2013 mmc_hostname(host->mmc),
2014 msm_host->msm_bus_vote.client_handle, vote, rc);
2015 goto out;
2016 }
2017 msm_host->msm_bus_vote.curr_vote = vote;
2018 }
2019out:
2020 return rc;
2021}
2022
2023/*
2024 * Internal work. Work to set 0 bandwidth for msm bus.
2025 */
2026static void sdhci_msm_bus_work(struct work_struct *work)
2027{
2028 struct sdhci_msm_host *msm_host;
2029 struct sdhci_host *host;
2030 unsigned long flags;
2031
2032 msm_host = container_of(work, struct sdhci_msm_host,
2033 msm_bus_vote.vote_work.work);
2034 host = platform_get_drvdata(msm_host->pdev);
2035
2036 if (!msm_host->msm_bus_vote.client_handle)
2037 return;
2038
2039 spin_lock_irqsave(&host->lock, flags);
2040 /* don't vote for 0 bandwidth if any request is in progress */
2041 if (!host->mrq) {
2042 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302043 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302044 } else
2045 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2046 mmc_hostname(host->mmc), __func__);
2047 spin_unlock_irqrestore(&host->lock, flags);
2048}
2049
2050/*
2051 * This function cancels any scheduled delayed work and sets the bus
2052 * vote based on bw (bandwidth) argument.
2053 */
2054static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2055 unsigned int bw)
2056{
2057 int vote;
2058 unsigned long flags;
2059 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2060 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2061
2062 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2063 spin_lock_irqsave(&host->lock, flags);
2064 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302065 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302066 spin_unlock_irqrestore(&host->lock, flags);
2067}
2068
2069#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2070
2071/* This function queues a work which will set the bandwidth requiement to 0 */
2072static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2073{
2074 unsigned long flags;
2075 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2076 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2077
2078 spin_lock_irqsave(&host->lock, flags);
2079 if (msm_host->msm_bus_vote.min_bw_vote !=
2080 msm_host->msm_bus_vote.curr_vote)
2081 queue_delayed_work(system_wq,
2082 &msm_host->msm_bus_vote.vote_work,
2083 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2084 spin_unlock_irqrestore(&host->lock, flags);
2085}
2086
2087static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2088 struct platform_device *pdev)
2089{
2090 int rc = 0;
2091 struct msm_bus_scale_pdata *bus_pdata;
2092
2093 struct sdhci_msm_bus_voting_data *data;
2094 struct device *dev = &pdev->dev;
2095
2096 data = devm_kzalloc(dev,
2097 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2098 if (!data) {
2099 dev_err(&pdev->dev,
2100 "%s: failed to allocate memory\n", __func__);
2101 rc = -ENOMEM;
2102 goto out;
2103 }
2104 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2105 if (data->bus_pdata) {
2106 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2107 &data->bw_vecs, &data->bw_vecs_size, 0);
2108 if (rc) {
2109 dev_err(&pdev->dev,
2110 "%s: Failed to get bus-bw-vectors-bps\n",
2111 __func__);
2112 goto out;
2113 }
2114 host->pdata->voting_data = data;
2115 }
2116 if (host->pdata->voting_data &&
2117 host->pdata->voting_data->bus_pdata &&
2118 host->pdata->voting_data->bw_vecs &&
2119 host->pdata->voting_data->bw_vecs_size) {
2120
2121 bus_pdata = host->pdata->voting_data->bus_pdata;
2122 host->msm_bus_vote.client_handle =
2123 msm_bus_scale_register_client(bus_pdata);
2124 if (!host->msm_bus_vote.client_handle) {
2125 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2126 rc = -EFAULT;
2127 goto out;
2128 }
2129 /* cache the vote index for minimum and maximum bandwidth */
2130 host->msm_bus_vote.min_bw_vote =
2131 sdhci_msm_bus_get_vote_for_bw(host, 0);
2132 host->msm_bus_vote.max_bw_vote =
2133 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2134 } else {
2135 devm_kfree(dev, data);
2136 }
2137
2138out:
2139 return rc;
2140}
2141
2142static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2143{
2144 if (host->msm_bus_vote.client_handle)
2145 msm_bus_scale_unregister_client(
2146 host->msm_bus_vote.client_handle);
2147}
2148
2149static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2150{
2151 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2152 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2153 struct mmc_ios *ios = &host->mmc->ios;
2154 unsigned int bw;
2155
2156 if (!msm_host->msm_bus_vote.client_handle)
2157 return;
2158
2159 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302160 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302161 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302162 } else {
2163 /*
2164 * If clock gating is enabled, then remove the vote
2165 * immediately because clocks will be disabled only
2166 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2167 * additional delay is required to remove the bus vote.
2168 */
2169#ifdef CONFIG_MMC_CLKGATE
2170 if (host->mmc->clkgate_delay)
2171 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2172 else
2173#endif
2174 sdhci_msm_bus_queue_work(host);
2175 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302176}
2177
Asutosh Das0ef24812012-12-18 16:14:02 +05302178/* Regulator utility functions */
2179static int sdhci_msm_vreg_init_reg(struct device *dev,
2180 struct sdhci_msm_reg_data *vreg)
2181{
2182 int ret = 0;
2183
2184 /* check if regulator is already initialized? */
2185 if (vreg->reg)
2186 goto out;
2187
2188 /* Get the regulator handle */
2189 vreg->reg = devm_regulator_get(dev, vreg->name);
2190 if (IS_ERR(vreg->reg)) {
2191 ret = PTR_ERR(vreg->reg);
2192 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2193 __func__, vreg->name, ret);
2194 goto out;
2195 }
2196
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302197 if (regulator_count_voltages(vreg->reg) > 0) {
2198 vreg->set_voltage_sup = true;
2199 /* sanity check */
2200 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2201 pr_err("%s: %s invalid constraints specified\n",
2202 __func__, vreg->name);
2203 ret = -EINVAL;
2204 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302205 }
2206
2207out:
2208 return ret;
2209}
2210
2211static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2212{
2213 if (vreg->reg)
2214 devm_regulator_put(vreg->reg);
2215}
2216
2217static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2218 *vreg, int uA_load)
2219{
2220 int ret = 0;
2221
2222 /*
2223 * regulators that do not support regulator_set_voltage also
2224 * do not support regulator_set_optimum_mode
2225 */
2226 if (vreg->set_voltage_sup) {
2227 ret = regulator_set_load(vreg->reg, uA_load);
2228 if (ret < 0)
2229 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2230 __func__, vreg->name, uA_load, ret);
2231 else
2232 /*
2233 * regulator_set_load() can return non zero
2234 * value even for success case.
2235 */
2236 ret = 0;
2237 }
2238 return ret;
2239}
2240
2241static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2242 int min_uV, int max_uV)
2243{
2244 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302245 if (vreg->set_voltage_sup) {
2246 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2247 if (ret) {
2248 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302249 __func__, vreg->name, min_uV, max_uV, ret);
2250 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302251 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302252
2253 return ret;
2254}
2255
2256static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2257{
2258 int ret = 0;
2259
2260 /* Put regulator in HPM (high power mode) */
2261 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2262 if (ret < 0)
2263 return ret;
2264
2265 if (!vreg->is_enabled) {
2266 /* Set voltage level */
2267 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2268 vreg->high_vol_level);
2269 if (ret)
2270 return ret;
2271 }
2272 ret = regulator_enable(vreg->reg);
2273 if (ret) {
2274 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2275 __func__, vreg->name, ret);
2276 return ret;
2277 }
2278 vreg->is_enabled = true;
2279 return ret;
2280}
2281
2282static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2283{
2284 int ret = 0;
2285
2286 /* Never disable regulator marked as always_on */
2287 if (vreg->is_enabled && !vreg->is_always_on) {
2288 ret = regulator_disable(vreg->reg);
2289 if (ret) {
2290 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2291 __func__, vreg->name, ret);
2292 goto out;
2293 }
2294 vreg->is_enabled = false;
2295
2296 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2297 if (ret < 0)
2298 goto out;
2299
2300 /* Set min. voltage level to 0 */
2301 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2302 if (ret)
2303 goto out;
2304 } else if (vreg->is_enabled && vreg->is_always_on) {
2305 if (vreg->lpm_sup) {
2306 /* Put always_on regulator in LPM (low power mode) */
2307 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2308 vreg->lpm_uA);
2309 if (ret < 0)
2310 goto out;
2311 }
2312 }
2313out:
2314 return ret;
2315}
2316
2317static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2318 bool enable, bool is_init)
2319{
2320 int ret = 0, i;
2321 struct sdhci_msm_slot_reg_data *curr_slot;
2322 struct sdhci_msm_reg_data *vreg_table[2];
2323
2324 curr_slot = pdata->vreg_data;
2325 if (!curr_slot) {
2326 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2327 __func__);
2328 goto out;
2329 }
2330
2331 vreg_table[0] = curr_slot->vdd_data;
2332 vreg_table[1] = curr_slot->vdd_io_data;
2333
2334 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2335 if (vreg_table[i]) {
2336 if (enable)
2337 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2338 else
2339 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2340 if (ret)
2341 goto out;
2342 }
2343 }
2344out:
2345 return ret;
2346}
2347
2348/*
2349 * Reset vreg by ensuring it is off during probe. A call
2350 * to enable vreg is needed to balance disable vreg
2351 */
2352static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2353{
2354 int ret;
2355
2356 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2357 if (ret)
2358 return ret;
2359 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2360 return ret;
2361}
2362
2363/* This init function should be called only once for each SDHC slot */
2364static int sdhci_msm_vreg_init(struct device *dev,
2365 struct sdhci_msm_pltfm_data *pdata,
2366 bool is_init)
2367{
2368 int ret = 0;
2369 struct sdhci_msm_slot_reg_data *curr_slot;
2370 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2371
2372 curr_slot = pdata->vreg_data;
2373 if (!curr_slot)
2374 goto out;
2375
2376 curr_vdd_reg = curr_slot->vdd_data;
2377 curr_vdd_io_reg = curr_slot->vdd_io_data;
2378
2379 if (!is_init)
2380 /* Deregister all regulators from regulator framework */
2381 goto vdd_io_reg_deinit;
2382
2383 /*
2384 * Get the regulator handle from voltage regulator framework
2385 * and then try to set the voltage level for the regulator
2386 */
2387 if (curr_vdd_reg) {
2388 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2389 if (ret)
2390 goto out;
2391 }
2392 if (curr_vdd_io_reg) {
2393 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2394 if (ret)
2395 goto vdd_reg_deinit;
2396 }
2397 ret = sdhci_msm_vreg_reset(pdata);
2398 if (ret)
2399 dev_err(dev, "vreg reset failed (%d)\n", ret);
2400 goto out;
2401
2402vdd_io_reg_deinit:
2403 if (curr_vdd_io_reg)
2404 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2405vdd_reg_deinit:
2406 if (curr_vdd_reg)
2407 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2408out:
2409 return ret;
2410}
2411
2412
2413static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2414 enum vdd_io_level level,
2415 unsigned int voltage_level)
2416{
2417 int ret = 0;
2418 int set_level;
2419 struct sdhci_msm_reg_data *vdd_io_reg;
2420
2421 if (!pdata->vreg_data)
2422 return ret;
2423
2424 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2425 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2426 switch (level) {
2427 case VDD_IO_LOW:
2428 set_level = vdd_io_reg->low_vol_level;
2429 break;
2430 case VDD_IO_HIGH:
2431 set_level = vdd_io_reg->high_vol_level;
2432 break;
2433 case VDD_IO_SET_LEVEL:
2434 set_level = voltage_level;
2435 break;
2436 default:
2437 pr_err("%s: invalid argument level = %d",
2438 __func__, level);
2439 ret = -EINVAL;
2440 return ret;
2441 }
2442 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2443 set_level);
2444 }
2445 return ret;
2446}
2447
Ritesh Harjani42876f42015-11-17 17:46:51 +05302448/*
2449 * Acquire spin-lock host->lock before calling this function
2450 */
2451static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2452 bool enable)
2453{
2454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2456
2457 if (enable && !msm_host->is_sdiowakeup_enabled)
2458 enable_irq(msm_host->pdata->sdiowakeup_irq);
2459 else if (!enable && msm_host->is_sdiowakeup_enabled)
2460 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2461 else
2462 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2463 __func__, enable, msm_host->is_sdiowakeup_enabled);
2464 msm_host->is_sdiowakeup_enabled = enable;
2465}
2466
2467static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2468{
2469 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302470 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2471 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2472
Ritesh Harjani42876f42015-11-17 17:46:51 +05302473 unsigned long flags;
2474
2475 pr_debug("%s: irq (%d) received\n", __func__, irq);
2476
2477 spin_lock_irqsave(&host->lock, flags);
2478 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2479 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302480 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302481
2482 return IRQ_HANDLED;
2483}
2484
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302485void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2486{
2487 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2488 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302489 const struct sdhci_msm_offset *msm_host_offset =
2490 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302491
2492 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2493 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302494 sdhci_msm_readl_relaxed(host,
2495 msm_host_offset->CORE_PWRCTL_STATUS),
2496 sdhci_msm_readl_relaxed(host,
2497 msm_host_offset->CORE_PWRCTL_MASK),
2498 sdhci_msm_readl_relaxed(host,
2499 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302500}
2501
Asutosh Das0ef24812012-12-18 16:14:02 +05302502static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2503{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002504 struct sdhci_host *host = (struct sdhci_host *)data;
2505 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2506 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302507 const struct sdhci_msm_offset *msm_host_offset =
2508 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302509 u8 irq_status = 0;
2510 u8 irq_ack = 0;
2511 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302512 int pwr_state = 0, io_level = 0;
2513 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302514 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302515
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302516 irq_status = sdhci_msm_readb_relaxed(host,
2517 msm_host_offset->CORE_PWRCTL_STATUS);
2518
Asutosh Das0ef24812012-12-18 16:14:02 +05302519 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2520 mmc_hostname(msm_host->mmc), irq, irq_status);
2521
2522 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302523 sdhci_msm_writeb_relaxed(irq_status, host,
2524 msm_host_offset->CORE_PWRCTL_CLEAR);
2525
Asutosh Das0ef24812012-12-18 16:14:02 +05302526 /*
2527 * SDHC has core_mem and hc_mem device memory and these memory
2528 * addresses do not fall within 1KB region. Hence, any update to
2529 * core_mem address space would require an mb() to ensure this gets
2530 * completed before its next update to registers within hc_mem.
2531 */
2532 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302533 /*
2534 * There is a rare HW scenario where the first clear pulse could be
2535 * lost when actual reset and clear/read of status register is
2536 * happening at a time. Hence, retry for at least 10 times to make
2537 * sure status register is cleared. Otherwise, this will result in
2538 * a spurious power IRQ resulting in system instability.
2539 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302540 while (irq_status & sdhci_msm_readb_relaxed(host,
2541 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302542 if (retry == 0) {
2543 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2544 mmc_hostname(host->mmc), irq_status);
2545 sdhci_msm_dump_pwr_ctrl_regs(host);
2546 BUG_ON(1);
2547 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302548 sdhci_msm_writeb_relaxed(irq_status, host,
2549 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302550 retry--;
2551 udelay(10);
2552 }
2553 if (likely(retry < 10))
2554 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2555 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302556
2557 /* Handle BUS ON/OFF*/
2558 if (irq_status & CORE_PWRCTL_BUS_ON) {
2559 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302560 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302561 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302562 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2563 VDD_IO_HIGH, 0);
2564 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302565 if (ret)
2566 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2567 else
2568 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302569
2570 pwr_state = REQ_BUS_ON;
2571 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302572 }
2573 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2574 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302575 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302576 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302577 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2578 VDD_IO_LOW, 0);
2579 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302580 if (ret)
2581 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2582 else
2583 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302584
2585 pwr_state = REQ_BUS_OFF;
2586 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302587 }
2588 /* Handle IO LOW/HIGH */
2589 if (irq_status & CORE_PWRCTL_IO_LOW) {
2590 /* Switch voltage Low */
2591 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2592 if (ret)
2593 irq_ack |= CORE_PWRCTL_IO_FAIL;
2594 else
2595 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302596
2597 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302598 }
2599 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2600 /* Switch voltage High */
2601 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2602 if (ret)
2603 irq_ack |= CORE_PWRCTL_IO_FAIL;
2604 else
2605 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302606
2607 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302608 }
2609
2610 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302611 sdhci_msm_writeb_relaxed(irq_ack, host,
2612 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302613 /*
2614 * SDHC has core_mem and hc_mem device memory and these memory
2615 * addresses do not fall within 1KB region. Hence, any update to
2616 * core_mem address space would require an mb() to ensure this gets
2617 * completed before its next update to registers within hc_mem.
2618 */
2619 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302620 if ((io_level & REQ_IO_HIGH) &&
2621 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2622 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302623 writel_relaxed((readl_relaxed(host->ioaddr +
2624 msm_host_offset->CORE_VENDOR_SPEC) &
2625 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2626 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002627 else if ((io_level & REQ_IO_LOW) ||
2628 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302629 writel_relaxed((readl_relaxed(host->ioaddr +
2630 msm_host_offset->CORE_VENDOR_SPEC) |
2631 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2632 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002633 mb();
2634
Asutosh Das0ef24812012-12-18 16:14:02 +05302635 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2636 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302637 spin_lock_irqsave(&host->lock, flags);
2638 if (pwr_state)
2639 msm_host->curr_pwr_state = pwr_state;
2640 if (io_level)
2641 msm_host->curr_io_level = io_level;
2642 complete(&msm_host->pwr_irq_completion);
2643 spin_unlock_irqrestore(&host->lock, flags);
2644
Asutosh Das0ef24812012-12-18 16:14:02 +05302645 return IRQ_HANDLED;
2646}
2647
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302648static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302649show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2650{
2651 struct sdhci_host *host = dev_get_drvdata(dev);
2652 int poll;
2653 unsigned long flags;
2654
2655 spin_lock_irqsave(&host->lock, flags);
2656 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2657 spin_unlock_irqrestore(&host->lock, flags);
2658
2659 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2660}
2661
2662static ssize_t
2663store_polling(struct device *dev, struct device_attribute *attr,
2664 const char *buf, size_t count)
2665{
2666 struct sdhci_host *host = dev_get_drvdata(dev);
2667 int value;
2668 unsigned long flags;
2669
2670 if (!kstrtou32(buf, 0, &value)) {
2671 spin_lock_irqsave(&host->lock, flags);
2672 if (value) {
2673 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2674 mmc_detect_change(host->mmc, 0);
2675 } else {
2676 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2677 }
2678 spin_unlock_irqrestore(&host->lock, flags);
2679 }
2680 return count;
2681}
2682
2683static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302684show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2685 char *buf)
2686{
2687 struct sdhci_host *host = dev_get_drvdata(dev);
2688 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2689 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2690
2691 return snprintf(buf, PAGE_SIZE, "%u\n",
2692 msm_host->msm_bus_vote.is_max_bw_needed);
2693}
2694
2695static ssize_t
2696store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2697 const char *buf, size_t count)
2698{
2699 struct sdhci_host *host = dev_get_drvdata(dev);
2700 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2701 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2702 uint32_t value;
2703 unsigned long flags;
2704
2705 if (!kstrtou32(buf, 0, &value)) {
2706 spin_lock_irqsave(&host->lock, flags);
2707 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2708 spin_unlock_irqrestore(&host->lock, flags);
2709 }
2710 return count;
2711}
2712
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302713static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302714{
2715 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2716 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302717 const struct sdhci_msm_offset *msm_host_offset =
2718 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302719 unsigned long flags;
2720 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302721 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302722
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302723 spin_lock_irqsave(&host->lock, flags);
2724 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2725 mmc_hostname(host->mmc), __func__, req_type,
2726 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302727 if (!msm_host->mci_removed)
2728 io_sig_sts = sdhci_msm_readl_relaxed(host,
2729 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302730
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302731 /*
2732 * The IRQ for request type IO High/Low will be generated when -
2733 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2734 * 2. If 1 is true and when there is a state change in 1.8V enable
2735 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2736 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2737 * layer tries to set it to 3.3V before card detection happens, the
2738 * IRQ doesn't get triggered as there is no state change in this bit.
2739 * The driver already handles this case by changing the IO voltage
2740 * level to high as part of controller power up sequence. Hence, check
2741 * for host->pwr to handle a case where IO voltage high request is
2742 * issued even before controller power up.
2743 */
2744 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2745 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2746 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2747 pr_debug("%s: do not wait for power IRQ that never comes\n",
2748 mmc_hostname(host->mmc));
2749 spin_unlock_irqrestore(&host->lock, flags);
2750 return;
2751 }
2752 }
2753
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302754 if ((req_type & msm_host->curr_pwr_state) ||
2755 (req_type & msm_host->curr_io_level))
2756 done = true;
2757 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302758
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302759 /*
2760 * This is needed here to hanlde a case where IRQ gets
2761 * triggered even before this function is called so that
2762 * x->done counter of completion gets reset. Otherwise,
2763 * next call to wait_for_completion returns immediately
2764 * without actually waiting for the IRQ to be handled.
2765 */
2766 if (done)
2767 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302768 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
2769 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
2770 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2771 mmc_hostname(host->mmc), req_type);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302772
2773 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2774 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302775}
2776
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002777static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2778{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302779 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2780 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2781 const struct sdhci_msm_offset *msm_host_offset =
2782 msm_host->offset;
2783 u32 config = readl_relaxed(host->ioaddr +
2784 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302785
2786 if (enable) {
2787 config |= CORE_CDR_EN;
2788 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302789 writel_relaxed(config, host->ioaddr +
2790 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302791 } else {
2792 config &= ~CORE_CDR_EN;
2793 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302794 writel_relaxed(config, host->ioaddr +
2795 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302796 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002797}
2798
Asutosh Das648f9d12013-01-10 21:11:04 +05302799static unsigned int sdhci_msm_max_segs(void)
2800{
2801 return SDHCI_MSM_MAX_SEGMENTS;
2802}
2803
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302804static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302805{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302806 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2807 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302808
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302809 return msm_host->pdata->sup_clk_table[0];
2810}
2811
2812static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2813{
2814 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2815 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2816 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2817
2818 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2819}
2820
2821static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2822 u32 req_clk)
2823{
2824 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2825 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2826 unsigned int sel_clk = -1;
2827 unsigned char cnt;
2828
2829 if (req_clk < sdhci_msm_get_min_clock(host)) {
2830 sel_clk = sdhci_msm_get_min_clock(host);
2831 return sel_clk;
2832 }
2833
2834 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2835 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2836 break;
2837 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2838 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2839 break;
2840 } else {
2841 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2842 }
2843 }
2844 return sel_clk;
2845}
2846
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302847static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2848{
2849 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2850 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2851 int rc = 0;
2852
2853 if (atomic_read(&msm_host->controller_clock))
2854 return 0;
2855
2856 sdhci_msm_bus_voting(host, 1);
2857
2858 if (!IS_ERR(msm_host->pclk)) {
2859 rc = clk_prepare_enable(msm_host->pclk);
2860 if (rc) {
2861 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2862 mmc_hostname(host->mmc), __func__, rc);
2863 goto remove_vote;
2864 }
2865 }
2866
2867 rc = clk_prepare_enable(msm_host->clk);
2868 if (rc) {
2869 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2870 mmc_hostname(host->mmc), __func__, rc);
2871 goto disable_pclk;
2872 }
2873
2874 atomic_set(&msm_host->controller_clock, 1);
2875 pr_debug("%s: %s: enabled controller clock\n",
2876 mmc_hostname(host->mmc), __func__);
2877 goto out;
2878
2879disable_pclk:
2880 if (!IS_ERR(msm_host->pclk))
2881 clk_disable_unprepare(msm_host->pclk);
2882remove_vote:
2883 if (msm_host->msm_bus_vote.client_handle)
2884 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2885out:
2886 return rc;
2887}
2888
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302889static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
2890{
2891 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2892 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302893
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302894 if (atomic_read(&msm_host->controller_clock)) {
2895 if (!IS_ERR(msm_host->clk))
2896 clk_disable_unprepare(msm_host->clk);
2897 if (!IS_ERR(msm_host->pclk))
2898 clk_disable_unprepare(msm_host->pclk);
2899 if (!IS_ERR(msm_host->ice_clk))
2900 clk_disable_unprepare(msm_host->ice_clk);
2901 sdhci_msm_bus_voting(host, 0);
2902 atomic_set(&msm_host->controller_clock, 0);
2903 pr_debug("%s: %s: disabled controller clock\n",
2904 mmc_hostname(host->mmc), __func__);
2905 }
2906}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302907
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302908static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2909{
2910 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2911 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2912 int rc = 0;
2913
2914 if (enable && !atomic_read(&msm_host->clks_on)) {
2915 pr_debug("%s: request to enable clocks\n",
2916 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302917
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302918 /*
2919 * The bus-width or the clock rate might have changed
2920 * after controller clocks are enbaled, update bus vote
2921 * in such case.
2922 */
2923 if (atomic_read(&msm_host->controller_clock))
2924 sdhci_msm_bus_voting(host, 1);
2925
2926 rc = sdhci_msm_enable_controller_clock(host);
2927 if (rc)
2928 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302929
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302930 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2931 rc = clk_prepare_enable(msm_host->bus_clk);
2932 if (rc) {
2933 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2934 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302935 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302936 }
2937 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002938 if (!IS_ERR(msm_host->ff_clk)) {
2939 rc = clk_prepare_enable(msm_host->ff_clk);
2940 if (rc) {
2941 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2942 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302943 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002944 }
2945 }
2946 if (!IS_ERR(msm_host->sleep_clk)) {
2947 rc = clk_prepare_enable(msm_host->sleep_clk);
2948 if (rc) {
2949 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2950 mmc_hostname(host->mmc), __func__, rc);
2951 goto disable_ff_clk;
2952 }
2953 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302954 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302955
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302956 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302957 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2958 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302959 /*
2960 * During 1.8V signal switching the clock source must
2961 * still be ON as it requires accessing SDHC
2962 * registers (SDHCi host control2 register bit 3 must
2963 * be written and polled after stopping the SDCLK).
2964 */
2965 if (host->mmc->card_clock_off)
2966 return 0;
2967 pr_debug("%s: request to disable clocks\n",
2968 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002969 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2970 clk_disable_unprepare(msm_host->sleep_clk);
2971 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2972 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302973 clk_disable_unprepare(msm_host->clk);
2974 if (!IS_ERR(msm_host->pclk))
2975 clk_disable_unprepare(msm_host->pclk);
2976 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2977 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302978
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302979 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302980 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302981 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302982 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302983 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002984disable_ff_clk:
2985 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2986 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302987disable_bus_clk:
2988 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2989 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302990disable_controller_clk:
2991 if (!IS_ERR_OR_NULL(msm_host->clk))
2992 clk_disable_unprepare(msm_host->clk);
2993 if (!IS_ERR_OR_NULL(msm_host->pclk))
2994 clk_disable_unprepare(msm_host->pclk);
2995 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302996remove_vote:
2997 if (msm_host->msm_bus_vote.client_handle)
2998 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302999out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303000 return rc;
3001}
3002
3003static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3004{
3005 int rc;
3006 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3007 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303008 const struct sdhci_msm_offset *msm_host_offset =
3009 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003010 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303011 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003012 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303013 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303014
3015 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303016 /*
3017 * disable pwrsave to ensure clock is not auto-gated until
3018 * the rate is >400KHz (initialization complete).
3019 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303020 writel_relaxed(readl_relaxed(host->ioaddr +
3021 msm_host_offset->CORE_VENDOR_SPEC) &
3022 ~CORE_CLK_PWRSAVE, host->ioaddr +
3023 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303024 sdhci_msm_prepare_clocks(host, false);
3025 host->clock = clock;
3026 goto out;
3027 }
3028
3029 rc = sdhci_msm_prepare_clocks(host, true);
3030 if (rc)
3031 goto out;
3032
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303033 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3034 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303035 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003036 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303037 writel_relaxed(readl_relaxed(host->ioaddr +
3038 msm_host_offset->CORE_VENDOR_SPEC)
3039 | CORE_CLK_PWRSAVE, host->ioaddr +
3040 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303041 /*
3042 * Disable pwrsave for a newly added card if doesn't allow clock
3043 * gating.
3044 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003045 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303046 writel_relaxed(readl_relaxed(host->ioaddr +
3047 msm_host_offset->CORE_VENDOR_SPEC)
3048 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3049 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303050
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303051 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003052 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003053 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003054 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303055 /*
3056 * The SDHC requires internal clock frequency to be double the
3057 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003058 * uses the faster clock(100/400MHz) for some of its parts and
3059 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303060 */
3061 ddr_clock = clock * 2;
3062 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3063 ddr_clock);
3064 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003065
3066 /*
3067 * In general all timing modes are controlled via UHS mode select in
3068 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3069 * their respective modes defined here, hence we use these values.
3070 *
3071 * HS200 - SDR104 (Since they both are equivalent in functionality)
3072 * HS400 - This involves multiple configurations
3073 * Initially SDR104 - when tuning is required as HS200
3074 * Then when switching to DDR @ 400MHz (HS400) we use
3075 * the vendor specific HC_SELECT_IN to control the mode.
3076 *
3077 * In addition to controlling the modes we also need to select the
3078 * correct input clock for DLL depending on the mode.
3079 *
3080 * HS400 - divided clock (free running MCLK/2)
3081 * All other modes - default (free running MCLK)
3082 */
3083 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3084 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303085 writel_relaxed(((readl_relaxed(host->ioaddr +
3086 msm_host_offset->CORE_VENDOR_SPEC)
3087 & ~CORE_HC_MCLK_SEL_MASK)
3088 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3089 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003090 /*
3091 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3092 * register
3093 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303094 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003095 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303096 msm_host->enhanced_strobe)) &&
3097 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003098 /*
3099 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3100 * field in VENDOR_SPEC_FUNC
3101 */
3102 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303103 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003104 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303105 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3106 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003107 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003108 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3109 /*
3110 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3111 * CORE_DLL_STATUS to be set. This should get set
3112 * with in 15 us at 200 MHz.
3113 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303114 rc = readl_poll_timeout(host->ioaddr +
3115 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003116 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3117 CORE_DDR_DLL_LOCK)), 10, 1000);
3118 if (rc == -ETIMEDOUT)
3119 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3120 mmc_hostname(host->mmc),
3121 dll_lock);
3122 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003123 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003124 if (!msm_host->use_cdclp533)
3125 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3126 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303127 msm_host_offset->CORE_VENDOR_SPEC3)
3128 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3129 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003130
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003131 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303132 writel_relaxed(((readl_relaxed(host->ioaddr +
3133 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003134 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303135 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3136 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003137
3138 /*
3139 * Disable HC_SELECT_IN to be able to use the UHS mode select
3140 * configuration from Host Control2 register for all other
3141 * modes.
3142 *
3143 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3144 * in VENDOR_SPEC_FUNC
3145 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303146 writel_relaxed((readl_relaxed(host->ioaddr +
3147 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003148 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303149 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3150 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003151 }
3152 mb();
3153
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303154 if (sup_clock != msm_host->clk_rate) {
3155 pr_debug("%s: %s: setting clk rate to %u\n",
3156 mmc_hostname(host->mmc), __func__, sup_clock);
3157 rc = clk_set_rate(msm_host->clk, sup_clock);
3158 if (rc) {
3159 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3160 mmc_hostname(host->mmc), __func__,
3161 sup_clock, rc);
3162 goto out;
3163 }
3164 msm_host->clk_rate = sup_clock;
3165 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303166 /*
3167 * Update the bus vote in case of frequency change due to
3168 * clock scaling.
3169 */
3170 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303171 }
3172out:
3173 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303174}
3175
Sahitya Tummala14613432013-03-21 11:13:25 +05303176static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3177 unsigned int uhs)
3178{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003179 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3180 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303181 const struct sdhci_msm_offset *msm_host_offset =
3182 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303183 u16 ctrl_2;
3184
3185 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3186 /* Select Bus Speed Mode for host */
3187 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003188 if ((uhs == MMC_TIMING_MMC_HS400) ||
3189 (uhs == MMC_TIMING_MMC_HS200) ||
3190 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303191 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3192 else if (uhs == MMC_TIMING_UHS_SDR12)
3193 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3194 else if (uhs == MMC_TIMING_UHS_SDR25)
3195 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3196 else if (uhs == MMC_TIMING_UHS_SDR50)
3197 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003198 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3199 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303200 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303201 /*
3202 * When clock frquency is less than 100MHz, the feedback clock must be
3203 * provided and DLL must not be used so that tuning can be skipped. To
3204 * provide feedback clock, the mode selection can be any value less
3205 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3206 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003207 if (host->clock <= CORE_FREQ_100MHZ) {
3208 if ((uhs == MMC_TIMING_MMC_HS400) ||
3209 (uhs == MMC_TIMING_MMC_HS200) ||
3210 (uhs == MMC_TIMING_UHS_SDR104))
3211 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303212
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003213 /*
3214 * Make sure DLL is disabled when not required
3215 *
3216 * Write 1 to DLL_RST bit of DLL_CONFIG register
3217 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303218 writel_relaxed((readl_relaxed(host->ioaddr +
3219 msm_host_offset->CORE_DLL_CONFIG)
3220 | CORE_DLL_RST), host->ioaddr +
3221 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003222
3223 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303224 writel_relaxed((readl_relaxed(host->ioaddr +
3225 msm_host_offset->CORE_DLL_CONFIG)
3226 | CORE_DLL_PDN), host->ioaddr +
3227 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003228 mb();
3229
3230 /*
3231 * The DLL needs to be restored and CDCLP533 recalibrated
3232 * when the clock frequency is set back to 400MHz.
3233 */
3234 msm_host->calibration_done = false;
3235 }
3236
3237 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3238 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303239 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3240
3241}
3242
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003243#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003244#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303245static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003246{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303247 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303248 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3249 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303250 const struct sdhci_msm_offset *msm_host_offset =
3251 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303252 struct cmdq_host *cq_host = host->cq_host;
3253
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303254 u32 version = sdhci_msm_readl_relaxed(host,
3255 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003256 u16 minor = version & CORE_VERSION_TARGET_MASK;
3257 /* registers offset changed starting from 4.2.0 */
3258 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3259
3260 pr_err("---- Debug RAM dump ----\n");
3261 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3262 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3263 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3264
3265 while (i < 16) {
3266 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3267 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3268 i++;
3269 }
3270 pr_err("-------------------------\n");
3271}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303272
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303273static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3274{
3275 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3276 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3277 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3278
3279 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3280 sizeof(struct mmc_host));
3281 if (msm_host->mmc->card)
3282 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3283 sizeof(struct mmc_card));
3284 memcpy(&cached_data->copy_host, host,
3285 sizeof(struct sdhci_host));
3286}
3287
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303288void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3289{
3290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303292 const struct sdhci_msm_offset *msm_host_offset =
3293 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303294 int tbsel, tbsel2;
3295 int i, index = 0;
3296 u32 test_bus_val = 0;
3297 u32 debug_reg[MAX_TEST_BUS] = {0};
3298
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303299 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303300 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003301 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303302 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003303
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303304 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3305 sdhci_msm_readl_relaxed(host,
3306 msm_host_offset->CORE_MCI_DATA_CNT),
3307 sdhci_msm_readl_relaxed(host,
3308 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303309 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303310 sdhci_msm_readl_relaxed(host,
3311 msm_host_offset->CORE_MCI_DATA_CNT),
3312 sdhci_msm_readl_relaxed(host,
3313 msm_host_offset->CORE_MCI_FIFO_CNT),
3314 sdhci_msm_readl_relaxed(host,
3315 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303316 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303317 readl_relaxed(host->ioaddr +
3318 msm_host_offset->CORE_DLL_CONFIG),
3319 readl_relaxed(host->ioaddr +
3320 msm_host_offset->CORE_DLL_STATUS),
3321 sdhci_msm_readl_relaxed(host,
3322 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303323 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303324 readl_relaxed(host->ioaddr +
3325 msm_host_offset->CORE_VENDOR_SPEC),
3326 readl_relaxed(host->ioaddr +
3327 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3328 readl_relaxed(host->ioaddr +
3329 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303330 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303331 readl_relaxed(host->ioaddr +
3332 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303333
3334 /*
3335 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3336 * of CORE_TESTBUS_CONFIG register.
3337 *
3338 * To select test bus 0 to 7 use tbsel and to select any test bus
3339 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3340 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3341 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3342 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003343 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303344 for (tbsel = 0; tbsel < 8; tbsel++) {
3345 if (index >= MAX_TEST_BUS)
3346 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303347 test_bus_val =
3348 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3349 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3350 sdhci_msm_writel_relaxed(test_bus_val, host,
3351 msm_host_offset->CORE_TESTBUS_CONFIG);
3352 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3353 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303354 }
3355 }
3356 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3357 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3358 i, i + 3, debug_reg[i], debug_reg[i+1],
3359 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003360}
3361
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303362/*
3363 * sdhci_msm_enhanced_strobe_mask :-
3364 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3365 * SW should write 3 to
3366 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3367 * The default reset value of this register is 2.
3368 */
3369static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3370{
3371 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3372 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303373 const struct sdhci_msm_offset *msm_host_offset =
3374 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303375
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303376 if (!msm_host->enhanced_strobe ||
3377 !mmc_card_strobe(msm_host->mmc->card)) {
3378 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303379 mmc_hostname(host->mmc));
3380 return;
3381 }
3382
3383 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303384 writel_relaxed((readl_relaxed(host->ioaddr +
3385 msm_host_offset->CORE_VENDOR_SPEC3)
3386 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3387 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303388 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303389 writel_relaxed((readl_relaxed(host->ioaddr +
3390 msm_host_offset->CORE_VENDOR_SPEC3)
3391 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3392 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303393 }
3394}
3395
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003396static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3397{
3398 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3399 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303400 const struct sdhci_msm_offset *msm_host_offset =
3401 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003402
3403 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303404 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3405 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003406 } else {
3407 u32 value;
3408
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303409 value = sdhci_msm_readl_relaxed(host,
3410 msm_host_offset->CORE_TESTBUS_CONFIG);
3411 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3412 sdhci_msm_writel_relaxed(value, host,
3413 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003414 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303415}
3416
Pavan Anamula691dd592015-08-25 16:11:20 +05303417void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3418{
3419 u32 vendor_func2;
3420 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303421 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3422 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3423 const struct sdhci_msm_offset *msm_host_offset =
3424 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303425
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303426 vendor_func2 = readl_relaxed(host->ioaddr +
3427 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303428
3429 if (enable) {
3430 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303431 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303432 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303433 while (readl_relaxed(host->ioaddr +
3434 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303435 if (timeout == 0) {
3436 pr_info("%s: Applying wait idle disable workaround\n",
3437 mmc_hostname(host->mmc));
3438 /*
3439 * Apply the reset workaround to not wait for
3440 * pending data transfers on AXI before
3441 * resetting the controller. This could be
3442 * risky if the transfers were stuck on the
3443 * AXI bus.
3444 */
3445 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303446 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303447 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303448 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3449 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303450 host->reset_wa_t = ktime_get();
3451 return;
3452 }
3453 timeout--;
3454 udelay(10);
3455 }
3456 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3457 mmc_hostname(host->mmc));
3458 } else {
3459 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303460 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303461 }
3462}
3463
Gilad Broner44445992015-09-29 16:05:39 +03003464static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3465{
3466 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303467 container_of(work, struct sdhci_msm_pm_qos_irq,
3468 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003469
3470 if (atomic_read(&pm_qos_irq->counter))
3471 return;
3472
3473 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3474 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3475}
3476
3477void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3478{
3479 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3480 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3481 struct sdhci_msm_pm_qos_latency *latency =
3482 &msm_host->pdata->pm_qos_data.irq_latency;
3483 int counter;
3484
3485 if (!msm_host->pm_qos_irq.enabled)
3486 return;
3487
3488 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3489 /* Make sure to update the voting in case power policy has changed */
3490 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3491 && counter > 1)
3492 return;
3493
Asutosh Das36c2e922015-12-01 12:19:58 +05303494 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003495 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3496 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3497 msm_host->pm_qos_irq.latency);
3498}
3499
3500void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3501{
3502 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3503 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3504 int counter;
3505
3506 if (!msm_host->pm_qos_irq.enabled)
3507 return;
3508
Subhash Jadavani4d813902015-10-15 12:16:43 -07003509 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3510 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3511 } else {
3512 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3513 return;
Gilad Broner44445992015-09-29 16:05:39 +03003514 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003515
Gilad Broner44445992015-09-29 16:05:39 +03003516 if (counter)
3517 return;
3518
3519 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303520 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3521 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003522 return;
3523 }
3524
3525 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3526 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3527 msm_host->pm_qos_irq.latency);
3528}
3529
Gilad Broner68c54562015-09-20 11:59:46 +03003530static ssize_t
3531sdhci_msm_pm_qos_irq_show(struct device *dev,
3532 struct device_attribute *attr, char *buf)
3533{
3534 struct sdhci_host *host = dev_get_drvdata(dev);
3535 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3536 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3537 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3538
3539 return snprintf(buf, PAGE_SIZE,
3540 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3541 irq->enabled, atomic_read(&irq->counter), irq->latency);
3542}
3543
3544static ssize_t
3545sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3546 struct device_attribute *attr, char *buf)
3547{
3548 struct sdhci_host *host = dev_get_drvdata(dev);
3549 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3550 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3551
3552 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3553}
3554
3555static ssize_t
3556sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3557 struct device_attribute *attr, const char *buf, size_t count)
3558{
3559 struct sdhci_host *host = dev_get_drvdata(dev);
3560 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3561 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3562 uint32_t value;
3563 bool enable;
3564 int ret;
3565
3566 ret = kstrtou32(buf, 0, &value);
3567 if (ret)
3568 goto out;
3569 enable = !!value;
3570
3571 if (enable == msm_host->pm_qos_irq.enabled)
3572 goto out;
3573
3574 msm_host->pm_qos_irq.enabled = enable;
3575 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303576 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003577 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3578 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3579 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3580 msm_host->pm_qos_irq.latency);
3581 }
3582
3583out:
3584 return count;
3585}
3586
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003587#ifdef CONFIG_SMP
3588static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3589 struct sdhci_host *host)
3590{
3591 msm_host->pm_qos_irq.req.irq = host->irq;
3592}
3593#else
3594static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3595 struct sdhci_host *host) { }
3596#endif
3597
Gilad Broner44445992015-09-29 16:05:39 +03003598void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3599{
3600 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3601 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3602 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003603 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003604
3605 if (!msm_host->pdata->pm_qos_data.irq_valid)
3606 return;
3607
3608 /* Initialize only once as this gets called per partition */
3609 if (msm_host->pm_qos_irq.enabled)
3610 return;
3611
3612 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3613 msm_host->pm_qos_irq.req.type =
3614 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003615 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3616 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3617 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003618 else
3619 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3620 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3621
Asutosh Das36c2e922015-12-01 12:19:58 +05303622 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003623 sdhci_msm_pm_qos_irq_unvote_work);
3624 /* For initialization phase, set the performance latency */
3625 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3626 msm_host->pm_qos_irq.latency =
3627 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3628 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3629 msm_host->pm_qos_irq.latency);
3630 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003631
3632 /* sysfs */
3633 msm_host->pm_qos_irq.enable_attr.show =
3634 sdhci_msm_pm_qos_irq_enable_show;
3635 msm_host->pm_qos_irq.enable_attr.store =
3636 sdhci_msm_pm_qos_irq_enable_store;
3637 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3638 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3639 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3640 ret = device_create_file(&msm_host->pdev->dev,
3641 &msm_host->pm_qos_irq.enable_attr);
3642 if (ret)
3643 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3644 __func__, ret);
3645
3646 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3647 msm_host->pm_qos_irq.status_attr.store = NULL;
3648 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3649 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3650 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3651 ret = device_create_file(&msm_host->pdev->dev,
3652 &msm_host->pm_qos_irq.status_attr);
3653 if (ret)
3654 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3655 __func__, ret);
3656}
3657
3658static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3659 struct device_attribute *attr, char *buf)
3660{
3661 struct sdhci_host *host = dev_get_drvdata(dev);
3662 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3663 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3664 struct sdhci_msm_pm_qos_group *group;
3665 int i;
3666 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3667 int offset = 0;
3668
3669 for (i = 0; i < nr_groups; i++) {
3670 group = &msm_host->pm_qos[i];
3671 offset += snprintf(&buf[offset], PAGE_SIZE,
3672 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3673 i, group->req.cpus_affine.bits[0],
3674 msm_host->pm_qos_group_enable,
3675 atomic_read(&group->counter),
3676 group->latency);
3677 }
3678
3679 return offset;
3680}
3681
3682static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3683 struct device_attribute *attr, char *buf)
3684{
3685 struct sdhci_host *host = dev_get_drvdata(dev);
3686 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3687 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3688
3689 return snprintf(buf, PAGE_SIZE, "%s\n",
3690 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3691}
3692
3693static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3694 struct device_attribute *attr, const char *buf, size_t count)
3695{
3696 struct sdhci_host *host = dev_get_drvdata(dev);
3697 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3698 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3699 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3700 uint32_t value;
3701 bool enable;
3702 int ret;
3703 int i;
3704
3705 ret = kstrtou32(buf, 0, &value);
3706 if (ret)
3707 goto out;
3708 enable = !!value;
3709
3710 if (enable == msm_host->pm_qos_group_enable)
3711 goto out;
3712
3713 msm_host->pm_qos_group_enable = enable;
3714 if (!enable) {
3715 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303716 cancel_delayed_work_sync(
3717 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003718 atomic_set(&msm_host->pm_qos[i].counter, 0);
3719 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3720 pm_qos_update_request(&msm_host->pm_qos[i].req,
3721 msm_host->pm_qos[i].latency);
3722 }
3723 }
3724
3725out:
3726 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003727}
3728
3729static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3730{
3731 int i;
3732 struct sdhci_msm_cpu_group_map *map =
3733 &msm_host->pdata->pm_qos_data.cpu_group_map;
3734
3735 if (cpu < 0)
3736 goto not_found;
3737
3738 for (i = 0; i < map->nr_groups; i++)
3739 if (cpumask_test_cpu(cpu, &map->mask[i]))
3740 return i;
3741
3742not_found:
3743 return -EINVAL;
3744}
3745
3746void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3747 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3748{
3749 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3750 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3751 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3752 struct sdhci_msm_pm_qos_group *pm_qos_group;
3753 int counter;
3754
3755 if (!msm_host->pm_qos_group_enable || group < 0)
3756 return;
3757
3758 pm_qos_group = &msm_host->pm_qos[group];
3759 counter = atomic_inc_return(&pm_qos_group->counter);
3760
3761 /* Make sure to update the voting in case power policy has changed */
3762 if (pm_qos_group->latency == latency->latency[host->power_policy]
3763 && counter > 1)
3764 return;
3765
Asutosh Das36c2e922015-12-01 12:19:58 +05303766 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003767
3768 pm_qos_group->latency = latency->latency[host->power_policy];
3769 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3770}
3771
3772static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3773{
3774 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303775 container_of(work, struct sdhci_msm_pm_qos_group,
3776 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003777
3778 if (atomic_read(&group->counter))
3779 return;
3780
3781 group->latency = PM_QOS_DEFAULT_VALUE;
3782 pm_qos_update_request(&group->req, group->latency);
3783}
3784
Gilad Broner07d92eb2015-09-29 16:57:21 +03003785bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003786{
3787 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3788 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3789 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3790
3791 if (!msm_host->pm_qos_group_enable || group < 0 ||
3792 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003793 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003794
3795 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303796 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3797 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003798 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003799 }
3800
3801 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3802 pm_qos_update_request(&msm_host->pm_qos[group].req,
3803 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003804 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003805}
3806
3807void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3808 struct sdhci_msm_pm_qos_latency *latency)
3809{
3810 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3811 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3812 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3813 struct sdhci_msm_pm_qos_group *group;
3814 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003815 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003816
3817 if (msm_host->pm_qos_group_enable)
3818 return;
3819
3820 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3821 GFP_KERNEL);
3822 if (!msm_host->pm_qos)
3823 return;
3824
3825 for (i = 0; i < nr_groups; i++) {
3826 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303827 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003828 sdhci_msm_pm_qos_cpu_unvote_work);
3829 atomic_set(&group->counter, 0);
3830 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3831 cpumask_copy(&group->req.cpus_affine,
3832 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3833 /* For initialization phase, set the performance mode latency */
3834 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3835 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3836 group->latency);
3837 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3838 __func__, i,
3839 group->req.cpus_affine.bits[0],
3840 group->latency,
3841 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3842 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003843 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003844 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003845
3846 /* sysfs */
3847 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3848 msm_host->pm_qos_group_status_attr.store = NULL;
3849 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3850 msm_host->pm_qos_group_status_attr.attr.name =
3851 "pm_qos_cpu_groups_status";
3852 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3853 ret = device_create_file(&msm_host->pdev->dev,
3854 &msm_host->pm_qos_group_status_attr);
3855 if (ret)
3856 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3857 __func__, ret);
3858 msm_host->pm_qos_group_enable_attr.show =
3859 sdhci_msm_pm_qos_group_enable_show;
3860 msm_host->pm_qos_group_enable_attr.store =
3861 sdhci_msm_pm_qos_group_enable_store;
3862 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3863 msm_host->pm_qos_group_enable_attr.attr.name =
3864 "pm_qos_cpu_groups_enable";
3865 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3866 ret = device_create_file(&msm_host->pdev->dev,
3867 &msm_host->pm_qos_group_enable_attr);
3868 if (ret)
3869 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3870 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003871}
3872
Gilad Broner07d92eb2015-09-29 16:57:21 +03003873static void sdhci_msm_pre_req(struct sdhci_host *host,
3874 struct mmc_request *mmc_req)
3875{
3876 int cpu;
3877 int group;
3878 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3879 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3880 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3881 msm_host->pm_qos_prev_cpu);
3882
3883 sdhci_msm_pm_qos_irq_vote(host);
3884
3885 cpu = get_cpu();
3886 put_cpu();
3887 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3888 if (group < 0)
3889 return;
3890
3891 if (group != prev_group && prev_group >= 0) {
3892 sdhci_msm_pm_qos_cpu_unvote(host,
3893 msm_host->pm_qos_prev_cpu, false);
3894 prev_group = -1; /* make sure to vote for new group */
3895 }
3896
3897 if (prev_group < 0) {
3898 sdhci_msm_pm_qos_cpu_vote(host,
3899 msm_host->pdata->pm_qos_data.latency, cpu);
3900 msm_host->pm_qos_prev_cpu = cpu;
3901 }
3902}
3903
3904static void sdhci_msm_post_req(struct sdhci_host *host,
3905 struct mmc_request *mmc_req)
3906{
3907 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3908 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3909
3910 sdhci_msm_pm_qos_irq_unvote(host, false);
3911
3912 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3913 msm_host->pm_qos_prev_cpu = -1;
3914}
3915
3916static void sdhci_msm_init(struct sdhci_host *host)
3917{
3918 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3919 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3920
3921 sdhci_msm_pm_qos_irq_init(host);
3922
3923 if (msm_host->pdata->pm_qos_data.legacy_valid)
3924 sdhci_msm_pm_qos_cpu_init(host,
3925 msm_host->pdata->pm_qos_data.latency);
3926}
3927
Sahitya Tummala9150a942014-10-31 15:33:04 +05303928static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
3929{
3930 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3931 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3932 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
3933 u32 max_curr = 0;
3934
3935 if (curr_slot && curr_slot->vdd_data)
3936 max_curr = curr_slot->vdd_data->hpm_uA;
3937
3938 return max_curr;
3939}
3940
Asutosh Das0ef24812012-12-18 16:14:02 +05303941static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303942 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303943 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003944 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303945 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003946 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303947 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303948 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303949 .get_min_clock = sdhci_msm_get_min_clock,
3950 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303951 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303952 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303953 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003954 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003955 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003956 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303957 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303958 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003959 .init = sdhci_msm_init,
3960 .pre_req = sdhci_msm_pre_req,
3961 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05303962 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05303963};
3964
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303965static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3966 struct sdhci_host *host)
3967{
Krishna Konda46fd1432014-10-30 21:13:27 -07003968 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303969 u16 minor;
3970 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303971 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303972 const struct sdhci_msm_offset *msm_host_offset =
3973 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303974
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303975 version = sdhci_msm_readl_relaxed(host,
3976 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303977 major = (version & CORE_VERSION_MAJOR_MASK) >>
3978 CORE_VERSION_MAJOR_SHIFT;
3979 minor = version & CORE_VERSION_TARGET_MASK;
3980
Krishna Konda46fd1432014-10-30 21:13:27 -07003981 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3982
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303983 /*
3984 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003985 * controller won't advertise 3.0v, 1.8v and 8-bit features
3986 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303987 */
3988 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003989 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003990 /*
3991 * Enable 1.8V support capability on controllers that
3992 * support dual voltage
3993 */
3994 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003995 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3996 caps |= CORE_3_0V_SUPPORT;
3997 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003998 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303999 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4000 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304001 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004002
4003 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304004 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4005 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4006 */
4007 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304008 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304009 val = readl_relaxed(host->ioaddr +
4010 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304011 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304012 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304013 }
4014 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004015 * SDCC 5 controller with major version 1, minor version 0x34 and later
4016 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4017 */
4018 if ((major == 1) && (minor < 0x34))
4019 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004020
4021 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004022 * SDCC 5 controller with major version 1, minor version 0x42 and later
4023 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304024 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004025 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304026 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004027 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304028 msm_host->enhanced_strobe = true;
4029 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004030
4031 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004032 * SDCC 5 controller with major version 1 and minor version 0x42,
4033 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4034 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304035 * when MCLK is gated OFF, it is not gated for less than 0.5us
4036 * and MCLK must be switched on for at-least 1us before DATA
4037 * starts coming.
4038 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004039 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
4040 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304041 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004042
Pavan Anamula5a256df2015-10-16 14:38:28 +05304043 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304044 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304045 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304046 writel_relaxed((readl_relaxed(host->ioaddr +
4047 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4048 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304049 }
4050
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004051 if ((major == 1) && (minor >= 0x49))
4052 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304053 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004054 * Mask 64-bit support for controller with 32-bit address bus so that
4055 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004056 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004057 if (!msm_host->pdata->largeaddressbus)
4058 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4059
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304060 writel_relaxed(caps, host->ioaddr +
4061 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004062 /* keep track of the value in SDHCI_CAPABILITIES */
4063 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304064
4065 if ((major == 1) && (minor >= 0x6b))
4066 msm_host->ice_hci_support = true;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304067}
4068
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004069#ifdef CONFIG_MMC_CQ_HCI
4070static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4071 struct platform_device *pdev)
4072{
4073 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4074 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4075
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304076 if (nocmdq) {
4077 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4078 return;
4079 }
4080
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004081 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004082 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004083 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4084 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004085 host->cq_host = NULL;
4086 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004087 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004088 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004089}
4090#else
4091static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4092 struct platform_device *pdev)
4093{
4094
4095}
4096#endif
4097
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004098static bool sdhci_msm_is_bootdevice(struct device *dev)
4099{
4100 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4101 strlen(saved_command_line))) {
4102 char search_string[50];
4103
4104 snprintf(search_string, ARRAY_SIZE(search_string),
4105 "androidboot.bootdevice=%s", dev_name(dev));
4106 if (strnstr(saved_command_line, search_string,
4107 strlen(saved_command_line)))
4108 return true;
4109 else
4110 return false;
4111 }
4112
4113 /*
4114 * "androidboot.bootdevice=" argument is not present then
4115 * return true as we don't know the boot device anyways.
4116 */
4117 return true;
4118}
4119
Asutosh Das0ef24812012-12-18 16:14:02 +05304120static int sdhci_msm_probe(struct platform_device *pdev)
4121{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304122 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304123 struct sdhci_host *host;
4124 struct sdhci_pltfm_host *pltfm_host;
4125 struct sdhci_msm_host *msm_host;
4126 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004127 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004128 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004129 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304130 struct resource *tlmm_memres = NULL;
4131 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304132 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304133
4134 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4135 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4136 GFP_KERNEL);
4137 if (!msm_host) {
4138 ret = -ENOMEM;
4139 goto out;
4140 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304141
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304142 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4143 msm_host->mci_removed = true;
4144 msm_host->offset = &sdhci_msm_offset_mci_removed;
4145 } else {
4146 msm_host->mci_removed = false;
4147 msm_host->offset = &sdhci_msm_offset_mci_present;
4148 }
4149 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304150 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4151 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4152 if (IS_ERR(host)) {
4153 ret = PTR_ERR(host);
4154 goto out;
4155 }
4156
4157 pltfm_host = sdhci_priv(host);
4158 pltfm_host->priv = msm_host;
4159 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304160 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304161
4162 /* Extract platform data */
4163 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004164 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304165 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004166 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4167 ret);
4168 goto pltfm_free;
4169 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004170
4171 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004172 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4173 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004174 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004175 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004176
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004177 if (disable_slots & (1 << (ret - 1))) {
4178 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4179 ret);
4180 ret = -ENODEV;
4181 goto pltfm_free;
4182 }
4183
Sayali Lokhande5f768322016-04-11 18:36:53 +05304184 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004185 sdhci_slot[ret-1] = msm_host;
4186
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004187 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4188 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304189 if (!msm_host->pdata) {
4190 dev_err(&pdev->dev, "DT parsing error\n");
4191 goto pltfm_free;
4192 }
4193 } else {
4194 dev_err(&pdev->dev, "No device tree node\n");
4195 goto pltfm_free;
4196 }
4197
4198 /* Setup Clocks */
4199
4200 /* Setup SDCC bus voter clock. */
4201 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4202 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4203 /* Vote for max. clk rate for max. performance */
4204 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4205 if (ret)
4206 goto pltfm_free;
4207 ret = clk_prepare_enable(msm_host->bus_clk);
4208 if (ret)
4209 goto pltfm_free;
4210 }
4211
4212 /* Setup main peripheral bus clock */
4213 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4214 if (!IS_ERR(msm_host->pclk)) {
4215 ret = clk_prepare_enable(msm_host->pclk);
4216 if (ret)
4217 goto bus_clk_disable;
4218 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304219 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304220
4221 /* Setup SDC MMC clock */
4222 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4223 if (IS_ERR(msm_host->clk)) {
4224 ret = PTR_ERR(msm_host->clk);
4225 goto pclk_disable;
4226 }
4227
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304228 /* Set to the minimum supported clock frequency */
4229 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4230 if (ret) {
4231 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304232 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304233 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304234 ret = clk_prepare_enable(msm_host->clk);
4235 if (ret)
4236 goto pclk_disable;
4237
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304238 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304239 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304240
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004241 /* Setup CDC calibration fixed feedback clock */
4242 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4243 if (!IS_ERR(msm_host->ff_clk)) {
4244 ret = clk_prepare_enable(msm_host->ff_clk);
4245 if (ret)
4246 goto clk_disable;
4247 }
4248
4249 /* Setup CDC calibration sleep clock */
4250 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4251 if (!IS_ERR(msm_host->sleep_clk)) {
4252 ret = clk_prepare_enable(msm_host->sleep_clk);
4253 if (ret)
4254 goto ff_clk_disable;
4255 }
4256
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004257 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4258
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304259 ret = sdhci_msm_bus_register(msm_host, pdev);
4260 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004261 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304262
4263 if (msm_host->msm_bus_vote.client_handle)
4264 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4265 sdhci_msm_bus_work);
4266 sdhci_msm_bus_voting(host, 1);
4267
Asutosh Das0ef24812012-12-18 16:14:02 +05304268 /* Setup regulators */
4269 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4270 if (ret) {
4271 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304272 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304273 }
4274
4275 /* Reset the core and Enable SDHC mode */
4276 core_memres = platform_get_resource_byname(pdev,
4277 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304278 if (!msm_host->mci_removed) {
4279 if (!core_memres) {
4280 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4281 goto vreg_deinit;
4282 }
4283 msm_host->core_mem = devm_ioremap(&pdev->dev,
4284 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304285
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304286 if (!msm_host->core_mem) {
4287 dev_err(&pdev->dev, "Failed to remap registers\n");
4288 ret = -ENOMEM;
4289 goto vreg_deinit;
4290 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304291 }
4292
Sahitya Tummala079ed852015-10-29 20:18:45 +05304293 tlmm_memres = platform_get_resource_byname(pdev,
4294 IORESOURCE_MEM, "tlmm_mem");
4295 if (tlmm_memres) {
4296 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4297 resource_size(tlmm_memres));
4298
4299 if (!tlmm_mem) {
4300 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4301 ret = -ENOMEM;
4302 goto vreg_deinit;
4303 }
4304 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4305 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4306 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4307 }
4308
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304309 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004310 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304311 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004312 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304313 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304314
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304315 if (!msm_host->mci_removed) {
4316 /* Set HC_MODE_EN bit in HC_MODE register */
4317 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304318
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304319 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4320 writel_relaxed(readl_relaxed(msm_host->core_mem +
4321 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4322 msm_host->core_mem + CORE_HC_MODE);
4323 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304324 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004325
4326 /*
4327 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4328 * be used as required later on.
4329 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304330 writel_relaxed((readl_relaxed(host->ioaddr +
4331 msm_host_offset->CORE_VENDOR_SPEC) |
4332 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4333 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304334 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304335 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4336 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4337 * interrupt in GIC (by registering the interrupt handler), we need to
4338 * ensure that any pending power irq interrupt status is acknowledged
4339 * otherwise power irq interrupt handler would be fired prematurely.
4340 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304341 irq_status = sdhci_msm_readl_relaxed(host,
4342 msm_host_offset->CORE_PWRCTL_STATUS);
4343 sdhci_msm_writel_relaxed(irq_status, host,
4344 msm_host_offset->CORE_PWRCTL_CLEAR);
4345 irq_ctl = sdhci_msm_readl_relaxed(host,
4346 msm_host_offset->CORE_PWRCTL_CTL);
4347
Subhash Jadavani28137342013-05-14 17:46:43 +05304348 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4349 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4350 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4351 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304352 sdhci_msm_writel_relaxed(irq_ctl, host,
4353 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004354
Subhash Jadavani28137342013-05-14 17:46:43 +05304355 /*
4356 * Ensure that above writes are propogated before interrupt enablement
4357 * in GIC.
4358 */
4359 mb();
4360
4361 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304362 * Following are the deviations from SDHC spec v3.0 -
4363 * 1. Card detection is handled using separate GPIO.
4364 * 2. Bus power control is handled by interacting with PMIC.
4365 */
4366 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4367 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304368 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004369 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304370 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304371 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304372 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304373 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304374 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304375 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304376
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304377 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4378 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4379
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004380 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004381 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4382 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4383 SDHCI_VENDOR_VER_SHIFT));
4384 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4385 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4386 /*
4387 * Add 40us delay in interrupt handler when
4388 * operating at initialization frequency(400KHz).
4389 */
4390 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4391 /*
4392 * Set Software Reset for DAT line in Software
4393 * Reset Register (Bit 2).
4394 */
4395 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4396 }
4397
Asutosh Das214b9662013-06-13 14:27:42 +05304398 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4399
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004400 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004401 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4402 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304403 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004404 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304405 goto vreg_deinit;
4406 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004407 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304408 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004409 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304410 if (ret) {
4411 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004412 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304413 goto vreg_deinit;
4414 }
4415
4416 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304417 sdhci_msm_writel_relaxed(INT_MASK, host,
4418 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304419
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304420#ifdef CONFIG_MMC_CLKGATE
4421 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4422 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4423#endif
4424
Asutosh Das0ef24812012-12-18 16:14:02 +05304425 /* Set host capabilities */
4426 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4427 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004428 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304429 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304430 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004431 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004432 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004433 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304434 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004435 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004436 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304437 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304438
4439 if (msm_host->pdata->nonremovable)
4440 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4441
Guoping Yuf7c91332014-08-20 16:56:18 +08004442 if (msm_host->pdata->nonhotplug)
4443 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4444
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07004445 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
4446
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304447 init_completion(&msm_host->pwr_irq_completion);
4448
Sahitya Tummala581df132013-03-12 14:57:46 +05304449 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304450 /*
4451 * Set up the card detect GPIO in active configuration before
4452 * configuring it as an IRQ. Otherwise, it can be in some
4453 * weird/inconsistent state resulting in flood of interrupts.
4454 */
4455 sdhci_msm_setup_pins(msm_host->pdata, true);
4456
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304457 /*
4458 * This delay is needed for stabilizing the card detect GPIO
4459 * line after changing the pull configs.
4460 */
4461 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304462 ret = mmc_gpio_request_cd(msm_host->mmc,
4463 msm_host->pdata->status_gpio, 0);
4464 if (ret) {
4465 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4466 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304467 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304468 }
4469 }
4470
Krishna Konda7feab352013-09-17 23:55:40 -07004471 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4472 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4473 host->dma_mask = DMA_BIT_MASK(64);
4474 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304475 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004476 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304477 host->dma_mask = DMA_BIT_MASK(32);
4478 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304479 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304480 } else {
4481 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4482 }
4483
Ritesh Harjani42876f42015-11-17 17:46:51 +05304484 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4485 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304486 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304487 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4488 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304489 msm_host->is_sdiowakeup_enabled = true;
4490 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4491 sdhci_msm_sdiowakeup_irq,
4492 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4493 "sdhci-msm sdiowakeup", host);
4494 if (ret) {
4495 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4496 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4497 msm_host->pdata->sdiowakeup_irq = -1;
4498 msm_host->is_sdiowakeup_enabled = false;
4499 goto vreg_deinit;
4500 } else {
4501 spin_lock_irqsave(&host->lock, flags);
4502 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304503 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304504 spin_unlock_irqrestore(&host->lock, flags);
4505 }
4506 }
4507
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004508 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304509 ret = sdhci_add_host(host);
4510 if (ret) {
4511 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304512 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304513 }
4514
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004515 pm_runtime_set_active(&pdev->dev);
4516 pm_runtime_enable(&pdev->dev);
4517 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4518 pm_runtime_use_autosuspend(&pdev->dev);
4519
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304520 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4521 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4522 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4523 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4524 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4525 ret = device_create_file(&pdev->dev,
4526 &msm_host->msm_bus_vote.max_bus_bw);
4527 if (ret)
4528 goto remove_host;
4529
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304530 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4531 msm_host->polling.show = show_polling;
4532 msm_host->polling.store = store_polling;
4533 sysfs_attr_init(&msm_host->polling.attr);
4534 msm_host->polling.attr.name = "polling";
4535 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4536 ret = device_create_file(&pdev->dev, &msm_host->polling);
4537 if (ret)
4538 goto remove_max_bus_bw_file;
4539 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304540
4541 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4542 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4543 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4544 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4545 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4546 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4547 if (ret) {
4548 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4549 mmc_hostname(host->mmc), __func__, ret);
4550 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4551 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304552 /* Successful initialization */
4553 goto out;
4554
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304555remove_max_bus_bw_file:
4556 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304557remove_host:
4558 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004559 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304560 sdhci_remove_host(host, dead);
4561vreg_deinit:
4562 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304563bus_unregister:
4564 if (msm_host->msm_bus_vote.client_handle)
4565 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4566 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004567sleep_clk_disable:
4568 if (!IS_ERR(msm_host->sleep_clk))
4569 clk_disable_unprepare(msm_host->sleep_clk);
4570ff_clk_disable:
4571 if (!IS_ERR(msm_host->ff_clk))
4572 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304573clk_disable:
4574 if (!IS_ERR(msm_host->clk))
4575 clk_disable_unprepare(msm_host->clk);
4576pclk_disable:
4577 if (!IS_ERR(msm_host->pclk))
4578 clk_disable_unprepare(msm_host->pclk);
4579bus_clk_disable:
4580 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4581 clk_disable_unprepare(msm_host->bus_clk);
4582pltfm_free:
4583 sdhci_pltfm_free(pdev);
4584out:
4585 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4586 return ret;
4587}
4588
4589static int sdhci_msm_remove(struct platform_device *pdev)
4590{
4591 struct sdhci_host *host = platform_get_drvdata(pdev);
4592 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4593 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4594 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4595 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4596 0xffffffff);
4597
4598 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304599 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4600 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304601 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004602 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304603 sdhci_remove_host(host, dead);
4604 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304605
Asutosh Das0ef24812012-12-18 16:14:02 +05304606 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304607
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304608 sdhci_msm_setup_pins(pdata, true);
4609 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304610
4611 if (msm_host->msm_bus_vote.client_handle) {
4612 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4613 sdhci_msm_bus_unregister(msm_host);
4614 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304615 return 0;
4616}
4617
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004618#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304619static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4620{
4621 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4622 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4623 unsigned long flags;
4624 int ret = 0;
4625
4626 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4627 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4628 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304629 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304630 return 1;
4631 }
4632
4633 spin_lock_irqsave(&host->lock, flags);
4634 if (enable) {
4635 /* configure DAT1 gpio if applicable */
4636 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304637 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304638 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4639 if (!ret)
4640 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4641 goto out;
4642 } else {
4643 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4644 mmc_hostname(host->mmc), enable);
4645 }
4646 } else {
4647 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4648 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4649 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304650 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304651 } else {
4652 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4653 mmc_hostname(host->mmc), enable);
4654
4655 }
4656 }
4657out:
4658 if (ret)
4659 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4660 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4661 ret, msm_host->pdata->sdiowakeup_irq);
4662 spin_unlock_irqrestore(&host->lock, flags);
4663 return ret;
4664}
4665
4666
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004667static int sdhci_msm_runtime_suspend(struct device *dev)
4668{
4669 struct sdhci_host *host = dev_get_drvdata(dev);
4670 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4671 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004672 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004673
Ritesh Harjani42876f42015-11-17 17:46:51 +05304674 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4675 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304676
Ritesh Harjani42876f42015-11-17 17:46:51 +05304677 sdhci_cfg_irq(host, false, true);
4678
4679defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004680 disable_irq(msm_host->pwr_irq);
4681
4682 /*
4683 * Remove the vote immediately only if clocks are off in which
4684 * case we might have queued work to remove vote but it may not
4685 * be completed before runtime suspend or system suspend.
4686 */
4687 if (!atomic_read(&msm_host->clks_on)) {
4688 if (msm_host->msm_bus_vote.client_handle)
4689 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4690 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004691 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4692 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004693
4694 return 0;
4695}
4696
4697static int sdhci_msm_runtime_resume(struct device *dev)
4698{
4699 struct sdhci_host *host = dev_get_drvdata(dev);
4700 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4701 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004702 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004703
Ritesh Harjani42876f42015-11-17 17:46:51 +05304704 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4705 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304706
Ritesh Harjani42876f42015-11-17 17:46:51 +05304707 sdhci_cfg_irq(host, true, true);
4708
4709defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004710 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004711
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004712 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4713 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004714 return 0;
4715}
4716
4717static int sdhci_msm_suspend(struct device *dev)
4718{
4719 struct sdhci_host *host = dev_get_drvdata(dev);
4720 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4721 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004722 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304723 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004724 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004725
4726 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4727 (msm_host->mmc->slot.cd_irq >= 0))
4728 disable_irq(msm_host->mmc->slot.cd_irq);
4729
4730 if (pm_runtime_suspended(dev)) {
4731 pr_debug("%s: %s: already runtime suspended\n",
4732 mmc_hostname(host->mmc), __func__);
4733 goto out;
4734 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004735 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004736out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05304737 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304738 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4739 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4740 if (sdio_cfg)
4741 sdhci_cfg_irq(host, false, true);
4742 }
4743
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004744 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4745 ktime_to_us(ktime_sub(ktime_get(), start)));
4746 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004747}
4748
4749static int sdhci_msm_resume(struct device *dev)
4750{
4751 struct sdhci_host *host = dev_get_drvdata(dev);
4752 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4753 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4754 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304755 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004756 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004757
4758 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4759 (msm_host->mmc->slot.cd_irq >= 0))
4760 enable_irq(msm_host->mmc->slot.cd_irq);
4761
4762 if (pm_runtime_suspended(dev)) {
4763 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4764 mmc_hostname(host->mmc), __func__);
4765 goto out;
4766 }
4767
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004768 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004769out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304770 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4771 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4772 if (sdio_cfg)
4773 sdhci_cfg_irq(host, true, true);
4774 }
4775
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004776 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4777 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004778 return ret;
4779}
4780
Ritesh Harjani42876f42015-11-17 17:46:51 +05304781static int sdhci_msm_suspend_noirq(struct device *dev)
4782{
4783 struct sdhci_host *host = dev_get_drvdata(dev);
4784 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4785 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4786 int ret = 0;
4787
4788 /*
4789 * ksdioirqd may be running, hence retry
4790 * suspend in case the clocks are ON
4791 */
4792 if (atomic_read(&msm_host->clks_on)) {
4793 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4794 mmc_hostname(host->mmc), __func__);
4795 ret = -EAGAIN;
4796 }
4797
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304798 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4799 if (msm_host->sdio_pending_processing)
4800 ret = -EBUSY;
4801
Ritesh Harjani42876f42015-11-17 17:46:51 +05304802 return ret;
4803}
4804
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004805static const struct dev_pm_ops sdhci_msm_pmops = {
4806 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4807 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4808 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304809 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004810};
4811
4812#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4813
4814#else
4815#define SDHCI_MSM_PMOPS NULL
4816#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304817static const struct of_device_id sdhci_msm_dt_match[] = {
4818 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304819 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004820 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304821};
4822MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4823
4824static struct platform_driver sdhci_msm_driver = {
4825 .probe = sdhci_msm_probe,
4826 .remove = sdhci_msm_remove,
4827 .driver = {
4828 .name = "sdhci_msm",
4829 .owner = THIS_MODULE,
4830 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004831 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304832 },
4833};
4834
4835module_platform_driver(sdhci_msm_driver);
4836
4837MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4838MODULE_LICENSE("GPL v2");